repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bigswitch/nova
|
nova/notifications.py
|
1
|
15437
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functionality related to notifications common to multiple layers of
the system.
"""
import datetime
from oslo_context import context as common_context
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import timeutils
import six
import nova.conf
import nova.context
from nova import exception
from nova.i18n import _LE
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import utils
LOG = log.getLogger(__name__)
CONF = nova.conf.CONF
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param fn: - object of the function
:returns: fn -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = (common_context.get_context_from_function_and_args(
fn, args, kwarg) or
common_context.get_current() or
nova.context.RequestContext())
notifier = rpc.get_notifier('api',
publisher_id=(CONF.default_publisher_id
or CONF.host))
method = getattr(notifier, CONF.default_notification_level.lower(),
notifier.info)
method(ctxt, name, body)
return fn(*args, **kwarg)
return wrapped_func
def send_api_fault(url, status, exception):
"""Send an api.fault notification."""
if not CONF.notify_api_faults:
return
payload = {'url': url, 'exception': six.text_type(exception),
'status': status}
rpc.get_notifier('api').error(common_context.get_current() or
nova.context.get_admin_context(),
'api.fault',
payload)
def send_update(context, old_instance, new_instance, service="compute",
host=None):
"""Send compute.instance.update notification to report any changes occurred
in that instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
update_with_state_change = False
old_vm_state = old_instance["vm_state"]
new_vm_state = new_instance["vm_state"]
old_task_state = old_instance["task_state"]
new_task_state = new_instance["task_state"]
# we should check if we need to send a state change or a regular
# notification
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
update_with_state_change = True
elif (CONF.notify_on_state_change == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
if update_with_state_change:
# send a notification with state changes
# value of verify_states need not be True as the check for states is
# already done here
send_update_with_states(context, new_instance, old_vm_state,
new_vm_state, old_task_state, new_task_state, service, host)
else:
try:
old_display_name = None
if new_instance["display_name"] != old_instance["display_name"]:
old_display_name = old_instance["display_name"]
_send_instance_update_notification(context, new_instance,
service=service, host=host,
old_display_name=old_display_name)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=new_instance)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=new_instance)
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
old_task_state, new_task_state, service="compute", host=None,
verify_states=False):
"""Send compute.instance.update notification to report changes if there
are any, in the instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
fire_update = True
# send update notification by default
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
# do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
fire_update = True
elif (CONF.notify_on_state_change == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
if fire_update:
# send either a state change or a regular notification
try:
_send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
new_vm_state=new_vm_state, new_task_state=new_task_state,
service=service, host=host)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=instance)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=instance)
def _compute_states_payload(instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None):
# If the states were not specified we assume the current instance
# states are the correct information. This is important to do for
# both old and new states because otherwise we create some really
# confusing nofications like:
#
# None(None) => Building(none)
#
# When we really were just continuing to build
if new_vm_state is None:
new_vm_state = instance["vm_state"]
if new_task_state is None:
new_task_state = instance["task_state"]
if old_vm_state is None:
old_vm_state = instance["vm_state"]
if old_task_state is None:
old_task_state = instance["task_state"]
states_payload = {
"old_state": old_vm_state,
"state": new_vm_state,
"old_task_state": old_task_state,
"new_task_state": new_task_state,
}
return states_payload
def _send_instance_update_notification(context, instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None,
service="compute", host=None, old_display_name=None):
"""Send 'compute.instance.update' notification to inform observers
about instance state changes.
"""
payload = info_from_instance(context, instance, None, None)
# determine how we'll report states
payload.update(
_compute_states_payload(
instance, old_vm_state, old_task_state,
new_vm_state, new_task_state))
# add audit fields:
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload["audit_period_beginning"] = audit_start
payload["audit_period_ending"] = audit_end
# add bw usage info:
bw = bandwidth_usage(instance, audit_start)
payload["bandwidth"] = bw
# add old display name if it is changed
if old_display_name:
payload["old_display_name"] = old_display_name
rpc.get_notifier(service, host).info(context,
'compute.instance.update', payload)
def audit_period_bounds(current_period=False):
"""Get the start and end of the relevant audit usage period
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
"""
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end
return (audit_start, audit_end)
def bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data=True):
"""Get bandwidth usage information for the instance for the
specified audit period.
"""
admin_context = nova.context.get_admin_context(read_deleted='yes')
def _get_nwinfo_old_skool():
"""Support for getting network info without objects."""
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
if isinstance(cached_info, network_model.NetworkInfo):
return cached_info
return network_model.NetworkInfo.hydrate(cached_info)
try:
return network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
try:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to get nw_info'),
instance=instance_ref)
except Exception:
if ignore_missing_network_data:
return
raise
# FIXME(comstud): Temporary as we transition to objects.
if isinstance(instance_ref, obj_base.NovaObject):
nw_info = instance_ref.info_cache.network_info
if nw_info is None:
nw_info = network_model.NetworkInfo()
else:
nw_info = _get_nwinfo_old_skool()
macs = [vif['address'] for vif in nw_info]
uuids = [instance_ref["uuid"]]
bw_usages = objects.BandwidthUsageList.get_by_uuids(admin_context, uuids,
audit_start)
bw = {}
for b in bw_usages:
if b.mac in macs:
label = 'net-name-not-found-%s' % b.mac
for vif in nw_info:
if vif['address'] == b.mac:
label = vif['network']['label']
break
bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
return bw
def image_meta(system_metadata):
"""Format image metadata for use in notifications from the instance
system metadata.
"""
image_meta = {}
for md_key, md_value in six.iteritems(system_metadata):
if md_key.startswith('image_'):
image_meta[md_key[6:]] = md_value
return image_meta
def info_from_instance(context, instance, network_info,
system_metadata, **kw):
"""Get detailed instance information for an instance which is common to all
notifications.
:param:instance: nova.objects.Instance
:param:network_info: network_info provided if not None
:param:system_metadata: system_metadata DB entries for the instance,
if not None
.. note::
Currently unused here in trunk, but needed for potential custom
modifications.
"""
def null_safe_str(s):
return str(s) if s else ''
def null_safe_int(s):
return int(s) if s else ''
def null_safe_isotime(s):
if isinstance(s, datetime.datetime):
return utils.strtime(s)
else:
return str(s) if s else ''
image_ref_url = glance.generate_image_url(instance.image_ref)
instance_type = instance.get_flavor()
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
instance_info = dict(
# Owner properties
tenant_id=instance.project_id,
user_id=instance.user_id,
# Identity properties
instance_id=instance.uuid,
display_name=instance.display_name,
reservation_id=instance.reservation_id,
hostname=instance.hostname,
# Type properties
instance_type=instance_type_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,
# Capacity properties
memory_mb=instance.memory_mb,
disk_gb=instance.root_gb + instance.ephemeral_gb,
vcpus=instance.vcpus,
# Note(dhellmann): This makes the disk_gb value redundant, but
# we are keeping it for backwards-compatibility with existing
# users of notifications.
root_gb=instance.root_gb,
ephemeral_gb=instance.ephemeral_gb,
# Location properties
host=instance.host,
node=instance.node,
availability_zone=instance.availability_zone,
cell_name=null_safe_str(instance.cell_name),
# Date properties
created_at=str(instance.created_at),
# Terminated and Deleted are slightly different (although being
# terminated and not deleted is a transient state), so include
# both and let the recipient decide which they want to use.
terminated_at=null_safe_isotime(instance.get('terminated_at', None)),
deleted_at=null_safe_isotime(instance.get('deleted_at', None)),
launched_at=null_safe_isotime(instance.get('launched_at', None)),
# Image properties
image_ref_url=image_ref_url,
os_type=instance.os_type,
kernel_id=instance.kernel_id,
ramdisk_id=instance.ramdisk_id,
# Status properties
state=instance.vm_state,
state_description=null_safe_str(instance.task_state),
progress=null_safe_int(instance.progress),
# accessIPs
access_ip_v4=instance.access_ip_v4,
access_ip_v6=instance.access_ip_v6,
)
if network_info is not None:
fixed_ips = []
for vif in network_info:
for ip in vif.fixed_ips():
ip["label"] = vif["network"]["label"]
ip["vif_mac"] = vif["address"]
fixed_ips.append(ip)
instance_info['fixed_ips'] = fixed_ips
# add image metadata
image_meta_props = image_meta(instance.system_metadata)
instance_info["image_meta"] = image_meta_props
# add instance metadata
instance_info['metadata'] = instance.metadata
instance_info.update(kw)
return instance_info
|
apache-2.0
| -7,944,843,228,296,015,000
| 33.689888
| 79
| 0.615081
| false
| 4.10777
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/graphrbac/azure-graphrbac/tests/test_graphrbac.py
|
1
|
8439
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.graphrbac.models
from devtools_testutils import AzureMgmtTestCase
import pytest
# GraphRBAC tests
AD_DOMAIN = "myaddomain.onmicrosoft.com"
class GraphRbacTest(AzureMgmtTestCase):
def setUp(self):
super(GraphRbacTest, self).setUp()
# Set the env variable AZURE_AD_DOMAIN or put AD_DOMAIN in your "mgmt_settings_real" file
self.ad_domain = self.set_value_to_scrub('AD_DOMAIN', AD_DOMAIN)
self.graphrbac_client = self.create_basic_client(
azure.graphrbac.GraphRbacManagementClient,
tenant_id=self.ad_domain
)
def _build_object_url(self, object_id):
return "https://graph.windows.net/{}/directoryObjects/{}".format(
self.ad_domain,
object_id
)
def test_signed_in_user(self):
user = self.graphrbac_client.signed_in_user.get()
assert user.mail_nickname.startswith("admin") # Assuming we do the test with adminXXX account
# Create a group, and check I own it
group_create_parameters = azure.graphrbac.models.GroupCreateParameters(
display_name="pytestgroup_display",
mail_nickname="pytestgroup_nickname"
)
group = None
try:
group = self.graphrbac_client.groups.create(group_create_parameters)
self.graphrbac_client.groups.add_owner(
group.object_id,
self._build_object_url(user.object_id)
)
owned_objects = list(self.graphrbac_client.signed_in_user.list_owned_objects())
for obj in owned_objects:
if obj.display_name == "pytestgroup_display":
break
else:
pytest.fail("Didn't found the group I just created in my owned objects")
try:
self.graphrbac_client.groups.remove_owner(
group.object_id,
user.object_id
)
pytest.fail("Remove the only owner MUST fail")
except azure.graphrbac.models.GraphErrorException as err:
assert "The group must have at least one owner, hence this owner cannot be removed." in err.message
finally:
if group:
self.graphrbac_client.groups.delete(group.object_id)
def test_deleted_applications(self):
existing_deleted_applications = list(self.graphrbac_client.deleted_applications.list())
# Delete the app if already exists
for app in self.graphrbac_client.applications.list(filter="displayName eq 'pytest_deleted_app'"):
self.graphrbac_client.applications.delete(app.object_id)
# Create an app
app = self.graphrbac_client.applications.create({
'available_to_other_tenants': False,
'display_name': 'pytest_deleted_app',
'identifier_uris': ['http://pytest_deleted_app.org']
})
# Delete the app
self.graphrbac_client.applications.delete(app.object_id)
# I should see it now in deletedApplications
existing_deleted_applications = list(self.graphrbac_client.deleted_applications.list(
filter="displayName eq 'pytest_deleted_app'"
))
# At least one, but if you executed this test a lot, you might see several app deleted with this name
assert len(existing_deleted_applications) >= 1
assert all(app.display_name == 'pytest_deleted_app' for app in existing_deleted_applications)
# Ho my god, most important app ever
restored_app = self.graphrbac_client.deleted_applications.restore(app.object_id)
assert restored_app.object_id == app.object_id
# You know what, no I don't care
self.graphrbac_client.applications.delete(app.object_id)
self.graphrbac_client.deleted_applications.hard_delete(app.object_id)
def test_graphrbac_users(self):
user = self.graphrbac_client.users.create(
azure.graphrbac.models.UserCreateParameters(
user_principal_name="testbuddy#TEST@{}".format(self.ad_domain),
account_enabled=False,
display_name='Test Buddy',
mail_nickname='testbuddy',
password_profile=azure.graphrbac.models.PasswordProfile(
password='MyStr0ngP4ssword',
force_change_password_next_login=True
)
)
)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.users.get(user.object_id)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.users.get(user.user_principal_name)
self.assertEqual(user.display_name, 'Test Buddy')
users = self.graphrbac_client.users.list(
filter="displayName eq 'Test Buddy'"
)
users = list(users)
self.assertEqual(len(users), 1)
self.assertEqual(users[0].display_name, 'Test Buddy')
self.graphrbac_client.users.delete(user.object_id)
def test_groups(self):
group_create_parameters = azure.graphrbac.models.GroupCreateParameters(
display_name="pytestgroup_display",
mail_nickname="pytestgroup_nickname"
)
group = self.graphrbac_client.groups.create(group_create_parameters)
self.assertEqual(group.display_name, "pytestgroup_display")
group = self.graphrbac_client.groups.get(group.object_id)
self.assertEqual(group.display_name, "pytestgroup_display")
groups = self.graphrbac_client.groups.list(
filter="displayName eq 'pytestgroup_display'"
)
groups = list(groups)
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0].display_name, "pytestgroup_display")
self.graphrbac_client.groups.delete(group.object_id)
def test_apps_and_sp(self):
# Delete the app if already exists
for app in self.graphrbac_client.applications.list(filter="displayName eq 'pytest_app'"):
self.graphrbac_client.applications.delete(app.object_id)
app = self.graphrbac_client.applications.create({
'available_to_other_tenants': False,
'display_name': 'pytest_app',
'identifier_uris': ['http://pytest_app.org'],
'app_roles': [{
"allowed_member_types": ["User"],
"description": "Creators can create Surveys",
"display_name": "SurveyCreator",
"id": "1b4f816e-5eaf-48b9-8613-7923830595ad", # Random, but fixed for tests
"is_enabled": True,
"value": "SurveyCreator"
}]
})
# Take this opportunity to test get_objects_by_object_ids
objects = self.graphrbac_client.objects.get_objects_by_object_ids({
'object_ids': [app.object_id],
'types': ['Application']
})
objects = list(objects)
assert len(objects) == 1
assert objects[0].display_name == 'pytest_app'
apps = list(self.graphrbac_client.applications.list(
filter="displayName eq 'pytest_app'"
))
assert len(apps) == 1
assert apps[0].app_roles[0].display_name == "SurveyCreator"
sp = self.graphrbac_client.service_principals.create({
'app_id': app.app_id, # Do NOT use app.object_id
'account_enabled': False
})
# Testing getting SP id by app ID
result = self.graphrbac_client.applications.get_service_principals_id_by_app_id(app.app_id)
assert result.value == sp.object_id
self.graphrbac_client.service_principals.update(
sp.object_id,
{
'account_enabled': False
}
)
self.graphrbac_client.service_principals.delete(sp.object_id)
self.graphrbac_client.applications.delete(app.object_id)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
mit
| -7,201,706,950,653,175,000
| 37.701835
| 115
| 0.601162
| false
| 4.011888
| true
| false
| false
|
jjlee/git-meld-index
|
setup.py
|
1
|
1882
|
#!/usr/bin/env python
import ast
import codecs
from setuptools import setup
def read_text(path):
with codecs.open(path, "r", "utf-8") as fh:
return fh.read()
def read_version(path):
with open(path) as fh:
for line in fh:
stripped = line.strip()
if stripped == "" or stripped.startswith("#"):
continue
elif line.startswith("from __future__ import"):
continue
else:
if not line.startswith("__version__ = "):
raise Exception("Can't find __version__ line in " + path)
break
else:
raise Exception("Can't find __version__ line in " + path)
_, _, quoted = line.rstrip().partition("= ")
return ast.literal_eval(quoted)
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: POSIX",
"Programming Language :: Python",
# "Programming Language :: Python :: 3", # TODO
"Topic :: Software Development :: Version Control",
]
scripts = [
"bin/git-meld-index-run-merge-tool",
]
setup(
name="git-meld-index",
url='https://github.com/jjlee/git-meld-index',
author='John Lee',
author_email='jjl@pobox.com',
classifiers=classifiers,
data_files=[("share/man/man1", ["doc/git-meld-index.1"])],
description="Like git add -p but with meld (or any difftool)",
license="GPL",
long_description=read_text("README.md"),
package_dir={"": "src"},
platforms=["any"],
py_modules=["git_meld_index"],
scripts=scripts,
version=read_version("src/git_meld_index.py"),
zip_safe=False,
entry_points={
"console_scripts": [
"git-meld-index = git_meld_index:main",
],
}
)
|
gpl-2.0
| 6,906,737,977,692,128,000
| 25.507042
| 77
| 0.571201
| false
| 3.764
| false
| false
| false
|
google/hypebot
|
hypebot/plugins/league/summoner_lib.py
|
1
|
12403
|
# Copyright 2018 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summoner-related libraries.
Fetches summoner data from Riot API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from absl import logging
import arrow
from hypebot.core import inflect_lib
from hypebot.protos.riot.v4 import constants_pb2
from hypebot.protos.riot.v4 import league_pb2
DEFAULT_REGION = 'na'
GAME_MODES = {
'ARAM': 'ARAM',
'ASCENSION': 'Ascension',
'CLASSIC': {
constants_pb2.QueueType.BOT_5x5: 'Bots',
constants_pb2.QueueType.BOT_TT_3x3: 'TT Bots',
constants_pb2.QueueType.GROUP_FINDER_5x5: 'Team Builder',
constants_pb2.QueueType.NORMAL_5x5_BLIND: 'Normals',
constants_pb2.QueueType.NORMAL_5x5_DRAFT: 'Normals',
constants_pb2.QueueType.NORMAL_3x3: 'TT Normals',
constants_pb2.QueueType.ONEFORALL_5x5: 'One For All (SR)',
constants_pb2.QueueType.RANKED_FLEX_SR: 'Flecks',
constants_pb2.QueueType.RANKED_FLEX_TT: 'TT Flecks',
constants_pb2.QueueType.RANKED_SOLO_5x5: 'YoloQ',
constants_pb2.QueueType.TEAM_BUILDER_RANKED_SOLO:
'YoloQ', # this is weird
constants_pb2.QueueType.RANKED_TEAM_3x3: 'Ranked 3s',
constants_pb2.QueueType.RANKED_TEAM_5x5: 'Ranked 5s',
constants_pb2.QueueType.TEAM_BUILDER_DRAFT_UNRANKED_5x5: 'Normals',
constants_pb2.QueueType.URF_5x5: 'URF',
constants_pb2.QueueType.CLASH: 'CLASH',
},
'KINGPORO': 'Poro King',
'ODIN': 'Dominion',
'ONEFORALL': 'One For All',
'SIEGE': 'Nexus Siege',
'GAMEMODEX': {
constants_pb2.QueueType.NEXUS_BLITZ: 'Blitz',
},
}
def NormalizeSummoner(input_text):
return ''.join(input_text.split()).lower()
class SummonerLib(object):
"""Class for fetching various data from Riot API."""
def __init__(self, rito, game):
self._rito = rito
self._game = game
def _GetMatchParticipant(self, encrypted_account_id, match_ref, match):
participant_ids = [
p.participant_id
for p in match.participant_identities
if p.player.current_account_id == encrypted_account_id
]
participant = None
if participant_ids:
[participant] = [
p for p in match.participants
if p.participant_id == participant_ids[0]
]
return participant
participants = [
p for p in match.participants if p.champion_id == match_ref.champion
]
if participants:
# Best guess, which is wrong for blind pick and one-for-all game types.
# Rito is full of filthy casuals.
return participants[0]
def Who(self, summoner):
"""Gets and formats data for a summoner."""
summoner_data = {}
game_data = {}
# Populate basic data (username, summoner name, region)
summoner_data['username'] = summoner['username']
summoner_data['summoner'] = summoner['summoner']
region = summoner.get('region', DEFAULT_REGION)
summoner_data['region'] = region
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
encrypted_account_id = summoner.get('encrypted_account_id', '')
r = self._rito.GetSummoner(region, summoner['summoner'])
if r:
summoner_data['profile_icon_id'] = r.profile_icon_id
r = self._rito.ListRecentMatches(region, encrypted_account_id)
last_game_ref = None
last_game = None
participant = None
if r:
last_game_ref = r.matches[0]
last_game = self._rito.GetMatch(region, last_game_ref.game_id)
if last_game:
participant = self._GetMatchParticipant(encrypted_account_id,
last_game_ref, last_game)
if last_game_ref and last_game and participant:
# Champion played
champion_id = participant.champion_id
game_data['champion'] = self._game.champion_id_to_name[str(champion_id)]
# Game type
logging.info('Evaluating (%s, %s)', last_game.game_mode,
last_game.game_type)
game_type = GAME_MODES.get(last_game.game_mode)
if last_game.game_mode == 'CLASSIC':
game_type = game_type.get(last_game.queue_id)
game_data['type'] = game_type or 'Unknown'
# Game time
# It seems rito api returns games in US/Pacific time, but this could
# change at any point in the future.
logging.info('SummonerLib: gametime: %s', last_game_ref.timestamp)
game_data['time'] = arrow.get(last_game_ref.timestamp /
1000.0).to('US/Pacific')
# Other data (win/loss, fantasy points, penta)
game_data['win'] = participant.stats.win
game_data['fantasy_points'] = self._ComputeFantasyPoints(
participant.stats)
summoner_data['penta'] = participant.stats.penta_kills > 0
summoner_data['last_game'] = game_data
# Find dynamic queue rank
rank = None
r = self._rito.ListLeaguePositions(region, encrypted_summoner_id)
if r:
leagues = r.positions
for league in leagues:
if league.queue_type == constants_pb2.QueueType.RANKED_SOLO_5x5:
tier = constants_pb2.Tier.Enum.Name(league.tier)[0].upper()
division = self._RomanToLatin(
league_pb2.TierRank.Enum.Name(league.rank))
rank = tier + division
if not rank:
rank = 'Unranked'
summoner_data['rank'] = rank
return summoner_data
def Champs(self, summoner):
"""Gets and formats champion mastery data for summoner."""
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
region = summoner.get('region', DEFAULT_REGION)
r = self._rito.ListChampionMasteries(region, encrypted_summoner_id)
if r:
logging.info('Got champ mastery data for %s/%s [%s]', region,
encrypted_summoner_id, summoner['summoner'])
# Calculate total number of chests received
total_chests = sum(1 for x in r.champion_masteries if x.chest_granted)
top_champs = []
for champ in r.champion_masteries[:3]:
top_champs.append(self._game.champion_id_to_name[str(
champ.champion_id)])
top_champ_lvl = r.champion_masteries[0].champion_level
chest_verb = ''
chest_verb_dict = {
(0, 2): 'receiving',
(2, 4): 'collecting',
(4, 8): 'earning',
(8, 16): 'amassing',
(16, 32): 'hoarding'
}
for range_spec, verb in chest_verb_dict.items():
if total_chests in range(*range_spec):
chest_verb = verb
break
if chest_verb:
chest_str = '%s %s' % (chest_verb,
inflect_lib.Plural(total_chests, 'chest'))
else:
chest_str = 'with a boatload of chests (%d)' % total_chests
return (u'{0} is a L{1} {2[0]} main, but sometimes likes to play {2[1]} '
'and {2[2]}, {3} this season.').format(summoner['summoner'],
top_champ_lvl, top_champs,
chest_str)
def ChampMasterySingle(self, summoner, champ_name):
"""Gets and formats champion mastery for summoner and specific champ."""
# Get the champ ID.
champ_id = self._game.GetChampId(champ_name)
if champ_id is None:
return 'Champion "%s" not found.' % champ_name
champ_display_name = self._game.GetChampDisplayName(champ_name)
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
region = summoner.get('region', DEFAULT_REGION)
r = self._rito.GetChampionMastery(region, encrypted_summoner_id, champ_id)
if r:
logging.info('Got single champ mastery data for %s/%s [%s] on Champ %s',
region, encrypted_summoner_id, summoner['summoner'],
champ_display_name)
champ_level = r.champion_level
points = r.champion_points
return ('%s is a L%d %s player with %d mastery points.' %
(summoner['summoner'], champ_level, champ_display_name, points))
else:
logging.info(
'Got chimp mastery data for %s/%s [%s] on Champ %s (no data)', region,
encrypted_summoner_id, summoner['summoner'], champ_display_name)
return '%s does not play %s.' % (summoner['summoner'], champ_display_name)
def Chimps(self, summoner):
"""Gets and formats Chimp mastery data for summoner."""
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
region = summoner.get('region', DEFAULT_REGION)
# Wukong is Champ ID 62
r = self._rito.GetChampionMastery(region, encrypted_summoner_id, 62)
if r:
logging.info('Got chimp mastery data for %s/%s [%s]', region,
encrypted_summoner_id, summoner['summoner'])
champ_level = r.champion_level
points = r.champion_points
return ('%s is a L%d Wukong player with %d mastery points.' %
(summoner['summoner'], champ_level, points))
else:
logging.info('Got chimp mastery data for %s/%s [%s] (no data)', region,
encrypted_summoner_id, summoner['summoner'])
return '%s is not a fan of monkeys.' % summoner['summoner']
def _ComputeFantasyPoints(self, stats):
"""Calculates the number of fantasy points recieved in a game."""
point_mapping = {
'kills': 2,
'deaths': -0.5,
'assists': 1.5,
'triple_kills': 2,
'quadra_kills': 5,
'penta_kills': 10,
'neutral_minions_killed': 0.01,
'total_minions_killed': 0.01
}
points = 0
for stat in point_mapping:
points += point_mapping[stat] * getattr(stats, stat)
if max(stats.assists, stats.kills) > 10:
points += 2
return points
def _RomanToLatin(self, roman_numerals):
"""Translates a str roman numeral (I to V) into the latin equivalent."""
roman = roman_numerals.strip().upper()
return {'I': '1', 'II': '2', 'III': '3', 'IV': '4', 'V': '5'}[roman]
class SummonerTracker(object):
"""Tracks summoners."""
def __init__(self, rito, user_prefs):
self._rito = rito
self._user_prefs = user_prefs
def ParseSummoner(self, user, smurfs, region, name):
"""Parses a summoner(s) out of mangled garbage the user supplied as input.
Args:
user: The user which triggered this parsing. Converts 'me'.
smurfs: Whether to include smurfs.
region: If any/not default.
name: summoner or special string (e.g., 'me').
Returns:
A list of summoner_info dicts with the following fields:
- username: Unused for now
- summoner: The parsed summoner name
- encrypted_summoner_id: The encrypted rito summoner id, which is useful
for other API calls
- encrypted_account_id: The encrypted rito account id, which is useful
for other API calls
- encrypted_puuid: The encrypted rito PUUID, which is useful for other
API calls
- region: The given or inferred region for which this summoner is valid
"""
region = (region or self._user_prefs.Get(user, 'lol_region')).lower()
if name == 'me':
names = self._user_prefs.Get(user, 'lol_summoner')
if not names:
return []
else:
names = self._user_prefs.Get(name, 'lol_summoner') or name
names = [NormalizeSummoner(name) for name in names.split(',')]
if smurfs is None:
names = names[:1]
summoners = []
for name in names:
r = self._rito.GetSummoner(region, name)
if r:
summoners.append({
'username': None,
'summoner': r.name,
'encrypted_summoner_id': r.id,
'encrypted_account_id': r.account_id,
'encrypted_puuid': r.puuid,
'region': region
})
return summoners
|
apache-2.0
| -3,853,912,355,005,328,000
| 36.584848
| 80
| 0.622188
| false
| 3.243462
| false
| false
| false
|
gsmke/django-leaf
|
leaf/tests/test_page.py
|
1
|
2974
|
import os
import pytest
from model_mommy import mommy
@pytest.mark.parametrize('value', (
'',
'/',
'/test/test2',
'/test/test2/',
))
def test_strip_trailing_slash(value):
from leaf.page import strip_trailing_slash
assert not strip_trailing_slash(value).endswith('/')
@pytest.mark.parametrize('url', (
'',
'example',
'example/test',
'example/test/',
'example/test2/test3',
))
def test_get_names(url):
from leaf.page import get_names
valid_paths = [
url,
os.path.join(url, 'index'),
os.path.join('pages', url),
os.path.join('pages', url, 'index'),
]
return get_names(url) == valid_paths
@pytest.mark.parametrize('url', (
'admin',
'admin/',
'admin/example',
'admin/example/test',
'admin/example/test/',
'admin/example/test2/test3',
))
def test_get_names_admin(url):
from leaf.page import get_names
assert get_names(url) == []
@pytest.mark.parametrize('url,expected', (
('', '/index'),
('/', '/index'),
('/test', '/test'),
('/test/test2', '/test/test2'),
))
def test_get_url(url, expected):
from leaf.page import get_url
class View:
kwargs = {
'url': url
}
assert get_url(View()) == expected
def test_get_url_kwarg():
from leaf.page import get_url
class View:
url = '/testing'
assert get_url(View()) == '/testing'
def test_get_url_none():
from django.http import Http404
from leaf.page import get_url
class View:
kwargs = {
'url': None
}
with pytest.raises(Http404):
get_url(View())
@pytest.mark.django_db
def test_get_from_database():
from leaf.page import get_from_database
node = mommy.make('leaf.PageNode', slug='test', template='example-page')
page_class = mommy.make("leaf_test.PageClass", node=node)
assert get_from_database('test/') == page_class
assert get_from_database('test') == page_class
@pytest.mark.django_db
def test_get_from_database_no_template():
from leaf.page import get_from_database
mommy.make('leaf.PageNode', slug='test')
assert get_from_database('test/') is None
assert get_from_database('test') is None
@pytest.mark.django_db
def test_get_from_database_no_page_class():
from leaf.page import get_from_database
mommy.make('leaf.PageNode', slug='test', template='example-page')
assert get_from_database('test/') is None
assert get_from_database('test') is None
@pytest.mark.django_db
def test_get_from_database_home_page():
from leaf.page import get_from_database
home_page = mommy.make('leaf.PageNode', slug='home', template='example-page')
page_class = mommy.make("leaf_test.PageClass", node=home_page)
assert get_from_database('') == page_class
assert get_from_database('/') == page_class
assert get_from_database('home') == page_class
assert get_from_database('home/') == page_class
|
bsd-3-clause
| 1,072,196,295,359,407,600
| 22.417323
| 81
| 0.624748
| false
| 3.379545
| true
| false
| false
|
felgari/k2
|
aptrend.py
|
1
|
2851
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Felipe Gallego. All rights reserved.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Script to calculate ap for trend.
"""
import sys
import os
import csv
from ctes import *
class ApTrend(object):
def __init__(self):
self._ap = []
def calculate_ap(self, trend, first_trend, second_trend, pos1, pos2):
cur_ap = TREND_IG
v1 = trend[0]
v2 = trend[1]
v3 = trend[2]
if v1 > TREND_HIGH_VALUE:
if first_trend == AVPOS_TREND_UP:
cur_ap = TREND_1
elif v2 > TREND_HIGH_VALUE:
if v2 - v1 > v3 and first_trend == AVPOS_TREND_UP:
cur_ap = TREND_2
elif v2 - v3 > v1 and second_trend == AVPOS_TREND_UP:
cur_ap = TREND_4
else:
cur_ap = TREND_3
elif v3 > TREND_HIGH_VALUE:
if first_trend == AVPOS_TREND_DOWN or second_trend == AVPOS_TREND_UP:
cur_ap = TREND_5
elif abs(v1 - TREND_AV < TREND_AV_DIFF) and \
abs(v2 - TREND_AV < TREND_AV_DIFF) and \
abs(v3 - TREND_AV < TREND_AV_DIFF):
cur_ap = TREND_3
elif first_trend == AVPOS_TREND_UP and second_trend == AVPOS_TREND_DOWN:
cur_ap = TREND_1
elif first_trend == AVPOS_TREND_DOWN and second_trend == AVPOS_TREND_UP \
and v2 >= v1 and v3 > v1:
cur_ap = TREND_4
elif pos1 < pos2 and pos1 - pos2 <= TREND_POS_DIFF_H:
cur_ap = TREND_1
elif pos1 > pos2 and pos1 - pos2 >= TREND_POS_DIFF_V:
cur_ap = TREND_4
self._ap.append(cur_ap)
return cur_ap
def write_data(self, index):
out_file_name = os.path.join(DATA_PATH, AP_FILE_TREND_PREFIX + str(index) + AP_FILE_TREND_EXT)
print("Saving trend ap in: %s" % out_file_name)
with open(out_file_name, "wt") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=CSV_DELIMITER)
for ap_d in self._ap:
row = [ ap_d ]
csvwriter.writerow(row)
|
gpl-3.0
| 607,653,928,808,055,200
| 32.952381
| 102
| 0.558401
| false
| 3.346244
| false
| false
| false
|
protwis/protwis
|
angles/migrations/0011_auto_20200402_1344.py
|
1
|
1225
|
# Generated by Django 2.0.8 on 2020-04-02 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('angles', '0010_residueangle_tau_angle'),
]
operations = [
migrations.AddField(
model_name='residueangle',
name='chi1',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi2',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi3',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi4',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi5',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='missing_atoms',
field=models.IntegerField(default=0, null=True),
),
]
|
apache-2.0
| -1,172,428,415,425,360,100
| 27.488372
| 60
| 0.54449
| false
| 4.253472
| false
| false
| false
|
protwis/protwis
|
build/management/commands/parse_excel_annotations.py
|
1
|
23312
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from common.alignment import Alignment, ClosestReceptorHomolog
from protein.models import Protein, ProteinSegment
from structure.models import Structure
import datetime
import logging
from optparse import make_option
import os
import shutil
import xlrd
import yaml
from collections import OrderedDict
import pprint
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
yaml.add_representer(OrderedDict, represent_ordereddict)
yaml.add_constructor(_mapping_tag, dict_constructor)
class Command(BaseCommand):
help = 'Basic functions for build scrips'
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('-f', '--filename',
action='store',
dest='filename',
help='Path to Uniprot text file')
parser.add_argument('-m',
action='store_true',
default=False,
help='Run main template search. Updates Xtal_Templ.csv with closest receptor homologs')
annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'Structural_Annotation.xlsx'])
xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_segends.yaml'])
mod_xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'mod_xtal_segends.yaml'])
xtal_seg_end_bw_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_segends_bw.yaml'])
ECD_annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'ECD_annotation.xlsx'])
ClassD_annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'Class_D_Annotation.xlsx'])
non_xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'non_xtal_segends.yaml'])
non_xtal_seg_end_bw_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'non_xtal_segends_bw.yaml'])
all_anomalities_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'all_anomalities.yaml'])
xtal_anomalities_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_anomalities.yaml'])
sequence_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'sequences.yaml'])
ECD_wt_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'ECD_wt.yaml'])
ECD_anomalies_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'ECD_anomalies.yaml'])
if not os.path.exists(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation'])):
os.makedirs(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation']))
def handle(self, *args, **options):
self.data = self.parse_excel(self.annotation_source_file)
self.dump_files()
self.ECD_data = self.parse_excel(self.ECD_annotation_source_file)
self.dump_ECD_files()
self.ClassD_data = self.parse_excel(self.ClassD_annotation_source_file)
self.dump_ClassD_data()
# self.analyse_annotation_consistency()
self.find_representatives()
if options['m']:
self.main_template_search()
def dump_ECD_files(self):
data_dict = OrderedDict()
for key, val in self.ECD_data['wt'].items():
if val['H1x50']=='':
continue
entry_name = val['UniProt']
del val['Key']
del val['UniProt']
data_dict[entry_name] = val
with open(self.ECD_wt_file, 'w') as outfile:
yaml.dump(data_dict, outfile, indent=4)
anomalies = OrderedDict()
for key, val in self.ECD_data['anomalies'].items():
entry_name = val['protein']
del val['protein']
anomalies[entry_name] = val
with open(self.ECD_anomalies_file, 'w') as outfile:
yaml.dump(anomalies, outfile, indent=4)
def dump_ClassD_data(self):
data_dict1, data_dict2 = OrderedDict(), OrderedDict()
for key, val in self.ClassD_data['SegEnds_NonXtal_Prot#'].items():
entry_name = val['UniProt'].lower()
del val['Key']
del val['UniProt']
del val['']
data_dict1[entry_name] = val
with open(self.non_xtal_seg_end_file, 'a') as outfile:
yaml.dump(data_dict1, outfile, indent=4)
for key, val in self.ClassD_data['SegEnds_NonXtal_BW#'].items():
entry_name = val['UniProt'].lower()
del val['UniProt']
data_dict2[entry_name] = val
with open(self.non_xtal_seg_end_bw_file, 'a') as outfile:
yaml.dump(data_dict2, outfile, indent=4)
data = self.ClassD_data["Bulges_Constrictions"]
NonXtal_Bulges_Constr_GPCRdb = {}
for structure,vals in data.items():
entry = structure.lower()
NonXtal_Bulges_Constr_GPCRdb[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
NonXtal_Bulges_Constr_GPCRdb[entry][key] = val
NonXtal_Bulges_Constr_GPCRdb = OrderedDict(sorted(NonXtal_Bulges_Constr_GPCRdb.items()))
with open(self.all_anomalities_file, 'a') as outfile:
yaml.dump(NonXtal_Bulges_Constr_GPCRdb, outfile, indent=4)
data = self.ClassD_data["Seqs"]
Seqs = {}
for structure,vals in data.items():
entry = structure.lower()
Seqs[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
Seqs[entry][key] = val
Seqs = OrderedDict(sorted(Seqs.items()))
with open(self.sequence_file, 'a') as outfile:
yaml.dump(Seqs, outfile, indent=4)
structures = self.ClassD_data["SegEnds_Xtal_Prot#"]
pdb_info = {}
pdb_info_all = {}
for structure,vals in structures.items():
if structure.split("_")[-1] == "wt":
continue
if structure.split("_")[-1] == "dist":
continue
#print(structure)
pdb_id = structure.split("_")[-1]
pdb_info[pdb_id] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info[pdb_id][key] = val
for structure,vals in structures.items():
entry = structure
pdb_info_all[entry] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info_all[entry][key] = val
pdb_info = OrderedDict(sorted(pdb_info.items()))
with open(self.mod_xtal_seg_end_file, 'a') as outfile:
yaml.dump(pdb_info, outfile, indent=4)
pdb_info_all = OrderedDict(sorted(pdb_info_all.items()))
with open(self.xtal_seg_end_file, 'a') as outfile:
yaml.dump(pdb_info_all, outfile, indent=4)
def parse_excel(self,path):
workbook = xlrd.open_workbook(path)
worksheets = workbook.sheet_names()
d = {}
for worksheet_name in worksheets:
if worksheet_name in d:
print('Error, worksheet with this name already loaded')
continue
d[worksheet_name] = OrderedDict()
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0 #skip first, otherwise -1
headers = []
for i in range(num_cells):
h = worksheet.cell_value(0, i)
if h=="":
#replace header with index if empty
h = "i_"+str(i)
if h in headers:
# print('already have ',h)
h += "_"+str(i)
# print(h)
headers.append(worksheet.cell_value(0, i))
for curr_row in range(1,num_rows+1):
row = worksheet.row(curr_row)
key = worksheet.cell_value(curr_row, 0)
if key=='':
#in case there is no key for whatever reason
# print("no key!")
continue
# if key in d[worksheet_name]:
# print(key, "already in",worksheet_name)
d[worksheet_name][key] = OrderedDict()
temprow = {}
for curr_cell in range(num_cells):
# cell_type = worksheet.cell_type(curr_row, curr_cell)
cell_value = worksheet.cell_value(curr_row, curr_cell)
# temprow.append(cell_value)
if headers[curr_cell] not in d[worksheet_name][key]:
#do not overwrite
d[worksheet_name][key][headers[curr_cell]] = cell_value
# if curr_row>2: break
return d
def analyse_annotation_consistency(self):
NonXtal = self.data["Bulges_Constr_NonXtal_GPCRdb#"]
Xtal = self.data["Bulges_Constr_Xtal_GPCRdb#"]
output = {}
counting_xtal = {}
counting_non_xtal = {}
for entry_protein,vals in NonXtal.items():
anomalies=[]
anomalies_xtal=[]
for key,val in vals.items():
if "x" in val and "_" not in val:
if val.index("x") in [1,2]:
anomalies.append(val)
if vals['Xtal Templ'] in Xtal:
#print(Xtal[vals['Xtal Templ']])
for key,val in Xtal[vals['Xtal Templ']].items():
if "x" in val and "_" not in val:
if val.index("x") in [1,2]:
anomalies_xtal.append(val)
if entry_protein==vals['Xtal Templ']:
list1 = list(set(anomalies) - set(anomalies_xtal))
list2 = list(set(anomalies_xtal) - set(anomalies))
if list1 or list2:
for i in list1:
if i not in counting_non_xtal:
counting_non_xtal[i] = 0
counting_non_xtal[i] += 1
for i in list2:
if i not in counting_xtal:
counting_xtal[i] = 0
counting_xtal[i] += 1
#print("ISSUE!")
#print(entry_protein)
#print("NonXtal_anomalies",anomalies,"Xtal_anomalies",anomalies_xtal)
if list1: print(entry_protein,vals['Xtal Templ'],"Present in non-xtal, but not xtal",list1)
if list2: print(entry_protein,vals['Xtal Templ'],"Present in xtal, but not non-xtal",list2)
print("Overall")
print("Present in non-xtal, but not xtal",counting_xtal)
print("Present in xtal, but not non-xtal",counting_non_xtal)
structures = self.data["SegEnds_Xtal_Prot#"]
structures_non_xtal = self.data["SegEnds_NonXtal_Prot#"]
info = {}
for structure,vals in structures.items():
if structure.split("_")[-1] == "wt":
# print(structure)
entry = vals['UniProt']
info[entry] = {}
for key,val in vals.items():
# print(val,key)
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
info[entry][key] = val
if structures_non_xtal[entry][key]!=val:
print("error with ",entry,key,"Xtal sheet:",val,"NonXtal sheet:",structures_non_xtal[entry][key])
print(structures_non_xtal[entry])
print(vals)
#print(structure,info)
# with open(self.xtal_seg_end_file, 'w') as outfile:
# yaml.dump(pdb_info, outfile)
def dump_files(self):
structures = self.data["SegEnds_Xtal_Prot#"]
pdb_info = {}
pdb_info_all = {}
for structure,vals in structures.items():
if structure.split("_")[-1] == "wt":
continue
if structure.split("_")[-1] == "dist":
continue
#print(structure)
pdb_id = structure.split("_")[-1]
pdb_info[pdb_id] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info[pdb_id][key] = val
for structure,vals in structures.items():
entry = structure
pdb_info_all[entry] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info_all[entry][key] = val
data = self.data["SegEnds_Xtal_BW#"]
Xtal_SegEnds_BW = {}
for structure,vals in data.items():
entry = structure
Xtal_SegEnds_BW[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
if len(key)>3 and key[-1]!="b" and key[-1]!="e":
continue
Xtal_SegEnds_BW[entry][key] = val
data = self.data["SegEnds_NonXtal_BW#"]
NonXtal_SegEnds_BW = {}
for structure,vals in data.items():
entry = structure
NonXtal_SegEnds_BW[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
if len(key)>3 and key[-1]!="b" and key[-1]!="e" and key!="XtalTempl":
continue
NonXtal_SegEnds_BW[entry][key] = val
data = self.data["SegEnds_NonXtal_Prot#"]
NonXtal_SegEnds_Prot = {}
for structure,vals in data.items():
entry = structure
NonXtal_SegEnds_Prot[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
if len(key)>3 and key[-1]!="b" and key[-1]!="e" and key!="Xtal Templ":
continue
NonXtal_SegEnds_Prot[entry][key] = val
# data = self.data["Bulges_Constr_Xtal_GPCRdb#"]
# Xtal_Bulges_Constr_GPCRdb = {}
# for structure,vals in data.items():
# entry = structure
# Xtal_Bulges_Constr_GPCRdb[entry] = OrderedDict()
# for key,val in vals.items():
# if not key:
# continue
# Xtal_Bulges_Constr_GPCRdb[entry][key] = val
data = self.data["Bulges_Constr_NonXtal_GPCRdb#"]
NonXtal_Bulges_Constr_GPCRdb = {}
for structure,vals in data.items():
entry = structure
NonXtal_Bulges_Constr_GPCRdb[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
NonXtal_Bulges_Constr_GPCRdb[entry][key] = val
data = self.data["Seqs"]
Seqs = {}
for structure,vals in data.items():
entry = structure
Seqs[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
Seqs[entry][key] = val
pdb_info = OrderedDict(sorted(pdb_info.items()))
with open(self.mod_xtal_seg_end_file, 'w') as outfile:
yaml.dump(pdb_info, outfile, indent=4)
pdb_info_all = OrderedDict(sorted(pdb_info_all.items()))
with open(self.xtal_seg_end_file, 'w') as outfile:
yaml.dump(pdb_info_all, outfile, indent=4)
Xtal_SegEnds_BW = OrderedDict(sorted(Xtal_SegEnds_BW.items()))
with open(self.xtal_seg_end_bw_file, 'w') as outfile:
yaml.dump(Xtal_SegEnds_BW, outfile, indent=4)
NonXtal_SegEnds_BW = OrderedDict(sorted(NonXtal_SegEnds_BW.items()))
with open(self.non_xtal_seg_end_bw_file, 'w') as outfile:
yaml.dump(NonXtal_SegEnds_BW, outfile, indent=4)
NonXtal_SegEnds_Prot = OrderedDict(sorted(NonXtal_SegEnds_Prot.items()))
with open(self.non_xtal_seg_end_file, 'w') as outfile:
yaml.dump(NonXtal_SegEnds_Prot, outfile, indent=4)
# Xtal_Bulges_Constr_GPCRdb = OrderedDict(sorted(Xtal_Bulges_Constr_GPCRdb.items()))
# with open(self.xtal_anomalities_file, 'w') as outfile:
# yaml.dump(Xtal_Bulges_Constr_GPCRdb, outfile, indent=4)
NonXtal_Bulges_Constr_GPCRdb = OrderedDict(sorted(NonXtal_Bulges_Constr_GPCRdb.items()))
with open(self.all_anomalities_file, 'w') as outfile:
yaml.dump(NonXtal_Bulges_Constr_GPCRdb, outfile, indent=4)
Seqs = OrderedDict(sorted(Seqs.items()))
with open(self.sequence_file, 'w') as outfile:
yaml.dump(Seqs, outfile, indent=4)
def main_template_search(self):
output_csv = ''
changes = {}
counter = 0
for protein, values in self.data['Xtal_Templ'].items():
values = self.data['Xtal_Templ'][protein]
crh = ClosestReceptorHomolog(protein)
closest_hom = crh.find_closest_receptor_homolog()
if values['Template']!=closest_hom.entry_name:
changes[protein] = [values['Template'], closest_hom.entry_name]
output_csv+='{},{}\n'.format(protein, closest_hom.entry_name)
counter+=1
with open(os.sep.join([settings.DATA_DIR,'structure_data','annotation','xtal_templates.csv']),'w') as f:
f.write(output_csv)
if len(changes)>0:
print('Changed {} entries out of {} (reference: [changed_from, changed_to]):'.format(len(changes), counter))
print(changes)
print('INFO: xtal_templates.csv file updated. Please update Structural_Annotation.xlsx Xtal_Templ sheet with this .csv')
return changes
def find_representatives(self):
grouped = {}
counter = 0
xtals, nums, states, resolutions = [], [], [], []
out = OrderedDict()
exceptions = ['4L6R']
with open(os.sep.join([settings.DATA_DIR,'structure_data','annotation','xtal_representatives.yaml']), 'w') as outfile:
for key, values in self.data['SegEnds_Xtal_Prot#'].items():
if counter==0:
prev_rec = values['UniProt']
counter+=1
if values['PDB']=='_wt' or 'dist' in key:
continue
if values['Repr']!='-':
if values['Repr']=='Repr_Act':
actstat = 'Active'
elif values['Repr']=='Repr_Inter':
actstat = 'Intermediate'
elif values['Repr']=='Repr_Inact':
actstat = 'Inactive'
out[values['PDB']] = actstat
yaml.dump(out, outfile, default_flow_style=False)
# if prev_rec!=values['UniProt'] or counter==len(self.data['SegEnds_Xtal_Prot#']):
# if counter==len(self.data['SegEnds_Xtal_Prot#']):
# xtals.append(key)
# nums.append(values['#Res'])
# states.append(values['State'])
# resolutions.append(values['Resolution'])
# if len(xtals)>0:
# max_num_ia, max_x_ia, max_num_a, max_x_a, ia_count, a_count = 0, 0, 0, 0, 0, 0
# for x, n, s, r in zip(xtals, nums, states, resolutions):
# if s=='Inact':
# if ia_count==0:
# max_res_ia = r
# if n>max_num_ia and x[-4:] not in exceptions:
# max_num_ia = n
# max_x_ia = x
# max_res_ia = r
# elif n==max_num_ia and x[-4:] not in exceptions:
# if r<max_res_ia:
# max_num_ia = n
# max_x_ia = x
# max_res_ia = r
# ia_count+=1
# elif s=='Act':
# if a_count==0:
# max_res_a = r
# if n>max_num_a and x[-4:] not in exceptions:
# max_num_a = n
# max_x_a = x
# elif n==max_num_a and x[-4:] not in exceptions:
# if r<max_res_a:
# max_num_a = n
# max_x_a = x
# max_res_a = r
# a_count+=1
# for x, n in zip(xtals, nums):
# if x==max_x_ia:
# out[x] = 'Repr_Inact'
# elif x==max_x_a:
# out[x] = 'Repr_Act'
# else:
# out[x] = '-'
# yaml.dump(out, outfile, indent=4)
# xtals, nums, states, resolutions = [], [], [], []
# out = OrderedDict()
# xtals.append(key)
# nums.append(values['#Res'])
# states.append(values['State'])
# resolutions.append(values['Resolution'])
# else:
# xtals.append(key)
# nums.append(values['#Res'])
# states.append(values['State'])
# resolutions.append(values['Resolution'])
# prev_rec = values['UniProt']
|
apache-2.0
| 7,577,168,397,185,332,000
| 42.090573
| 132
| 0.506992
| false
| 3.752133
| false
| false
| false
|
bwhite/picarus
|
server/holding/faces.py
|
1
|
2228
|
elif action == 'i/faces':
# TODO: Temporary, remove when done
names = set(['George_W_Bush', 'Colin_Powell', 'Tony_Blair', 'Donald_Rumsfeld', 'Gerhard_Schroeder',
'Ariel_Sharon', 'Hugo_Chavez', 'Junichiro_Koizumi', 'Serena_Williams', 'John_Ashcroft'])
self._slice_validate(start_row, stop_row, 'r')
import cv2
r = None
labels = {}
pos = 0
neg = 0
data = []
lab = []
num_train = 2000
for n, (cur_row, cur_cols) in enumerate(hadoopy_hbase.scanner(thrift, self.table,
start_row=start_row, per_call=10,
stop_row=stop_row, columns=['data:image', 'meta:class'])):
cur_class = cur_cols['meta:class']
if cur_class not in names:
continue
if cur_class not in labels:
labels[cur_class] = len(labels)
label = labels[cur_class]
image = cv2.imdecode(np.fromstring(cur_cols['data:image'], np.uint8), 0)
# Crop
image = np.ascontiguousarray(image[62:-62, 62:-62])
#if n == 0:
# cv2.imwrite('out.png', image)
if n < num_train:
lab.append(label)
data.append(image)
else:
if r is None:
r = cv2.createLBPHFaceRecognizer()
r.train(data, np.array(lab))
print('TRAINED-----------------------')
pred = r.predict(image)[0]
print((pred, label))
if pred == label:
pos += 1
else:
neg += 1
print((cur_class, image.shape, n, pos, neg, pos / float(pos + neg + .00000001)))
|
apache-2.0
| -8,709,221,514,537,708,000
| 52.047619
| 136
| 0.374327
| false
| 4.565574
| false
| false
| false
|
trondkr/OceanLight
|
IOwrite.py
|
1
|
2407
|
from datetime import datetime, timedelta
from netCDF4 import Dataset
from netCDF4 import num2date
import numpy as np
import time
import os
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@imr.no'
__created__ = datetime(2014, 1, 23)
__modified__ = datetime(2014, 1, 23)
__version__ = "0.1"
__status__ = "Development"
def help ():
"""
This function generates a netCDF4 file and saves the runnings average values for
specific years into file for each IPCC AR5 model.
Used to gether with extractIce.py
"""
def writeCMIP5File(modelName,scenario,myvarname,lon,lat,time,mydata,mydataanomaly,outfilename):
myformat='NETCDF3_CLASSIC'
if os.path.exists(outfilename):
os.remove(outfilename)
print "Results written to netcdf file: %s"%(outfilename)
if myvarname=="sic": myvar="SIC"
f1 = Dataset(outfilename, mode='w', format=myformat)
f1.title = "IPCC AR5 %s"%(myvar)
f1.description = "IPCC AR5 running averages of %s for model %s for scenario %s"%(myvar,modelName,scenario)
f1.history = "Created " + str(datetime.now())
f1.source = "Trond Kristiansen (trond.kristiansen@imr.no)"
f1.type = "File in NetCDF3 format created using iceExtract.py"
f1.Conventions = "CF-1.0"
"""Define dimensions"""
f1.createDimension('x', len(lon))
f1.createDimension('y', len(lat))
f1.createDimension('time', None)
vnc = f1.createVariable('longitude', 'd', ('x',),zlib=False)
vnc.long_name = 'Longitude'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:] = lon
vnc = f1.createVariable('latitude', 'd', ('y',),zlib=False)
vnc.long_name = 'Latitude'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:] = lat
v_time = f1.createVariable('time', 'd', ('time',),zlib=False)
v_time.long_name = 'Years'
v_time.units = 'Years'
v_time.field = 'time, scalar, series'
v_time[:]=time
v_temp=f1.createVariable('SIC', 'd', ('time', 'y', 'x',),zlib=False)
v_temp.long_name = "Sea-ice area fraction (%)"
v_temp.units = "%"
v_temp.time = "time"
v_temp.field="SIC, scalar, series"
v_temp.missing_value = 1e20
if myvarname=='sic':
f1.variables['SIC'][:,:,:] = mydata
f1.close()
|
mit
| -4,307,165,508,289,405,400
| 31.093333
| 111
| 0.607811
| false
| 3.09383
| false
| false
| false
|
tkolhar/robottelo
|
robottelo/ui/role.py
|
1
|
2544
|
# -*- encoding: utf-8 -*-
"""Implements Roles UI."""
from robottelo.constants import FILTER
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
from selenium.webdriver.support.select import Select
class Role(Base):
"""Implements the CRUD functions for Roles."""
def navigate_to_entity(self):
"""Navigate to Role entity page"""
Navigator(self.browser).go_to_roles()
def _search_locator(self):
"""Specify locator for Role entity search procedure"""
return locators['roles.role']
def create(self, name):
"""Creates new Role with default permissions."""
self.click(locators['roles.new'])
if self.wait_until_element(locators['roles.name']):
self.find_element(locators['roles.name']).send_keys(name)
self.click(common_locators['submit'])
else:
raise UIError(
'Could not create new role "{0}"'.format(name)
)
def delete(self, name, really=True):
"""Delete existing role."""
self.delete_entity(
name,
really,
locators['roles.delete'],
locators['roles.dropdown'],
)
def update(self, name, new_name=None, add_permission=False,
resource_type=None, permission_list=None, organization=None):
"""Update role name/permissions/org."""
element = self.search(name)
if element is None:
raise UIError('Could not find role "{0}"'.format(name))
if new_name:
element.click()
if self.wait_until_element(locators['roles.name']):
self.field_update('roles.name', new_name)
if add_permission:
strategy, value = locators['roles.dropdown']
self.click((strategy, value % name))
self.click(locators['roles.add_permission'])
if resource_type:
Select(
self.find_element(
locators['roles.select_resource_type'])
).select_by_visible_text(resource_type)
if permission_list:
self.configure_entity(
permission_list, FILTER['role_permission'])
if organization:
self.click(tab_locators['roles.tab_org'])
self.configure_entity(organization, FILTER['role_org'])
self.click(common_locators['submit'])
|
gpl-3.0
| -7,250,490,926,521,367,000
| 36.411765
| 76
| 0.584513
| false
| 4.282828
| false
| false
| false
|
lehins/django-wepay
|
djwepay/models.py
|
1
|
14141
|
"""All models are direct mappings to the WePay objects. By default only the
fields that correspond to the values returned from WePay lookup calls
(ex. `/account <https://www.wepay.com/developer/reference/account#lookup>`_) are
included in the models. All fields follow the rules outlined in `Storing Data
<https://www.wepay.com/developer/reference/storing_data>`_, unless otherwise
specified in object's documentation. For that reason values, which have there
names end with '_uri' (ex. ``account_uri``) are not included as model fields,
instead they are added as dynamic cached object properties, which are inherited
from Api objects defined in :mod:`djwepay.api`.
"""
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from djwepay.api import *
from djwepay.fields import MoneyField
from djwepay.managers import *
from json_field import JSONField
__all__ = [
'App', 'User', 'Account', 'Checkout', 'Preapproval', 'Withdrawal',
'CreditCard', 'SubscriptionPlan', 'Subscription', 'SubscriptionCharge',
'get_wepay_model_name', 'get_wepay_model'
]
@python_2_unicode_compatible
class BaseModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
ordering = ['-date_created']
def save(self, *args, **kwargs):
''' On save, update timestamps '''
self.date_modified = timezone.now()
if not self.date_created:
self.date_created = self.date_modified
return super(BaseModel, self).save(*args, **kwargs)
def __str__(self):
return "%s: %s - %s" % (self._meta.verbose_name, self.pk, self.state)
class App(AppApi, BaseModel):
"""
This model stores all of the relevant WePay application information. Only one
instance of it at a time is supported per django application, which is
controlled by :ref:`WEPAY_APP_ID` setting.
"""
# fields returned with a lookup call
client_id = models.BigIntegerField(primary_key=True)
status = models.CharField(max_length=255)
state = models.CharField(max_length=255)
api_version = models.CharField(max_length=255)
theme_object = JSONField(null=True, blank=True)
gaq_domains = JSONField(null=True, blank=True)
# Administrative objects attached to account, they are null=True just
# for initialization of the App, but are required for proper functionality.
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='apps', null=True,
help_text="Account attached to App where you can collect money.")
user = models.ForeignKey(
get_wepay_model_name('user'), related_name='apps', null=True,
help_text="Owner of this App")
client_secret = models.CharField(max_length=255)
objects = AppManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('app')
db_table = 'djwepay_app'
verbose_name = 'WePay App'
class User(UserApi, BaseModel):
user_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), related_name='users', null=True)
user_name = models.CharField(max_length=255)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
state = models.CharField(max_length=255)
# access_token=None means it has been revoked.
access_token = models.CharField(null=True, max_length=255)
token_type = "BEARER"
expires_in = models.BigIntegerField(null=True, blank=True)
objects = UserManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('user')
db_table = 'djwepay_user'
verbose_name = 'WePay User'
@property
def full_email(self):
return "%s <%s>" % (self.user_name, self.email)
class Account(AccountApi, BaseModel):
account_id = models.BigIntegerField(primary_key=True)
user = models.ForeignKey(
get_wepay_model_name('user'), related_name='accounts', null=True)
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
description = models.CharField(max_length=255)
reference_id = models.CharField(max_length=255, blank=True)
gaq_domains = JSONField(null=True, blank=True)
theme_object = JSONField(null=True, blank=True)
type = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
balances = JSONField(null=True, blank=True)
statuses = JSONField(null=True, blank=True)
action_reasons = JSONField(null=True, blank=True)
country = models.CharField(max_length=2)
currencies = JSONField(null=True, blank=True)
def _get_owner_user_id(self):
return self.user_id
def _set_owner_user_id(self, value):
if self.user is None or self.user_id != value:
try:
user = User.objects.get(user_id=value)
self.user = user
except User.DoesNotExist: pass
owner_user_id = property(_get_owner_user_id, _set_owner_user_id)
objects = AccountManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('account')
db_table = 'djwepay_account'
verbose_name = 'WePay Account'
class Checkout(CheckoutApi, BaseModel):
checkout_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='checkouts')
preapproval = models.ForeignKey(
get_wepay_model_name('preapproval'), related_name='checkouts', null=True)
state = models.CharField(max_length=255)
soft_descriptor = models.CharField(max_length=255)
short_description = models.CharField(max_length=255)
long_description = models.CharField(max_length=2047, blank=True)
currency = "USD"
amount = MoneyField(null=True)
fee = MoneyField(null=True)
gross = MoneyField(null=True)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
reference_id = models.CharField(max_length=255, blank=True)
payer_email = models.EmailField(max_length=255, blank=True)
payer_name = models.CharField(max_length=255, blank=True)
cancel_reason = models.CharField(max_length=255, blank=True)
refund_reason = models.CharField(max_length=255, blank=True)
auto_capture = models.BooleanField(default=True)
require_shipping = models.BooleanField(default=False)
shipping_address = JSONField(null=True)
tax = MoneyField(null=True)
amount_refunded = MoneyField(null=True)
amount_charged_back = MoneyField(null=True)
create_time = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('checkout')
db_table = 'djwepay_checkout'
verbose_name = 'WePay Checkout'
class Preapproval(PreapprovalApi, BaseModel):
preapproval_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), null=True, related_name='preapprovals')
account = models.ForeignKey(
get_wepay_model_name('account'), null=True, related_name='preapprovals')
short_description = models.CharField(max_length=255)
long_description = models.CharField(max_length=2047, blank=True)
currency = "USD"
amount = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
period = models.CharField(max_length=255)
frequency = models.IntegerField(null=True)
start_time = models.BigIntegerField(null=True)
end_time = models.BigIntegerField(null=True)
reference_id = models.CharField(max_length=255)
shipping_address = JSONField(null=True)
shipping_fee = MoneyField(null=True)
tax = MoneyField(null=True)
auto_recur = models.BooleanField(default=False)
payer_name = models.CharField(max_length=255)
payer_email = models.EmailField(max_length=255, blank=True)
create_time = models.BigIntegerField(null=True)
next_due_time = models.BigIntegerField(null=True)
last_checkout = models.ForeignKey(
get_wepay_model_name('checkout'), null=True, related_name='+')
last_checkout_time = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
objects = PreapprovalManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('preapproval')
db_table = 'djwepay_preapproval'
verbose_name = 'WePay Preapproval'
class Withdrawal(WithdrawalApi, BaseModel):
withdrawal_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='withdrawals')
state = models.CharField(max_length=255)
amount = MoneyField(null=True)
note = models.CharField(max_length=255)
recipient_confirmed = models.NullBooleanField()
type = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
capture_time = models.BigIntegerField(null=True)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('withdrawal')
db_table = 'djwepay_withdrawal'
verbose_name = 'WePay Preapproval'
class CreditCard(CreditCardApi, BaseModel):
credit_card_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), related_name='credit_cards')
credit_card_name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
user_name = models.CharField(max_length=255)
email = models.CharField(max_length=255, blank=True)
reference_id = models.CharField(max_length=255, blank=True)
create_time = models.BigIntegerField(null=True)
input_source = models.CharField(max_length=255, blank=True)
virtual_terminal_mode = models.CharField(max_length=255, blank=True)
expiration_month = models.IntegerField(null=True)
expiration_year = models.IntegerField(null=True)
last_four = models.CharField(max_length=255, blank=True)
class Meta(BaseModel.Meta):
abstract = is_abstract('credit_card')
db_table = 'djwepay_credit_card'
verbose_name = 'WePay Credit Card'
class SubscriptionPlan(SubscriptionPlanApi, BaseModel):
subscription_plan_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='subscription_plans')
name = models.CharField(max_length=255)
short_description = models.CharField(max_length=2047)
currency = models.CharField(max_length=3)
amount = MoneyField(null=True)
period = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
number_of_subscriptions = models.BigIntegerField(null=True)
trial_length = models.BigIntegerField(null=True)
setup_fee = MoneyField(null=True)
reference_id = models.CharField(max_length=255)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription_plan')
db_table = 'djwepay_subscription_plan'
verbose_name = 'WePay Subscription Plan'
class Subscription(SubscriptionApi, BaseModel):
subscription_id = models.BigIntegerField(primary_key=True)
subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'), related_name='subscriptions')
payer_name = models.CharField(max_length=255)
payer_email = models.CharField(max_length=255)
currency = models.CharField(max_length=255)
amount = MoneyField(null=True)
period = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
payment_method_id = models.BigIntegerField(null=True)
payment_method_type = models.CharField(max_length=255)
quantity = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
trial_days_remaining = models.BigIntegerField(null=True)
transition_expire_time = models.BigIntegerField(null=True)
transition_prorate = models.NullBooleanField()
transition_quantity = models.BigIntegerField(null=True)
transition_subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'),
related_name='transition_subscriptions')
reference_id = models.CharField(max_length=255)
objects = SubscriptionManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription')
db_table = 'djwepay_subscription'
verbose_name = 'WePay Subscription'
class SubscriptionCharge(SubscriptionChargeApi, BaseModel):
subscription_charge_id = models.BigIntegerField(primary_key=True)
subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'), related_name='subscription_charges')
subscription = models.ForeignKey(
get_wepay_model_name('subscription'), related_name='subscription_charges')
type = models.CharField(max_length=255)
amount = MoneyField(null=True)
currency = models.CharField(max_length=3)
fee = MoneyField(null=True)
app_fee = MoneyField(null=True)
gross = MoneyField(null=True)
quantity = models.BigIntegerField(null=True)
amount_refunded = MoneyField(null=True)
amount_charged_back = MoneyField(null=True)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
end_time = models.BigIntegerField(null=True)
prorate_time = models.BigIntegerField(null=True)
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription_charge')
db_table = 'djwepay_subscription_charge'
verbose_name = 'WePay Subscription Charge'
|
mit
| -1,835,955,764,543,517,700
| 39.173295
| 87
| 0.702779
| false
| 3.668223
| false
| false
| false
|
tobykurien/MakerDroid
|
assetsrc/public.mp3/skeinforge/skeinforge_tools/craft_plugins/comb.py
|
1
|
18588
|
"""
This page is in the table of contents.
Comb is a script to comb the extrusion hair of a gcode file.
The comb manual page is at:
http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Comb
Comb bends the extruder travel paths around holes in the slices, to avoid stringers. It moves the extruder to the inside of perimeters before turning the extruder on so any start up ooze will be inside the shape.
==Operation==
The default 'Activate Comb' checkbox is off. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Minimum Departure Distance over Perimeter Width===
Default is zero.
Defines the ratio of the minimum distance that the extruder will travel and loop before leaving a perimeter. A high value means the extruder will loop many times before leaving, so that the ooze will finish within the perimeter, a low value means the extruder will not loop and the stringers will be thicker. Since it sometimes loops when there's no need, the default is zero.
===Running Jump Space over Perimeter Width===
Default is zero.
Defines the ratio of the running jump space that is added before going from one island to another to the perimeter width. The default is zero because sometimes an unnecessary running jump space is added, if you want to use it a reasonable value is five. For an extruder with acceleration code, an extra space before leaving the island means that it will be going at high speed as it exits the island, which means the stringer across the islands will be thinner. If the extruder does not have acceleration code, the speed will not be greater so there would be no benefit and 'Running Jump Space over Perimeter Width' should be left at zero.
==Examples==
The following examples comb the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and comb.py.
> python comb.py
This brings up the comb dialog.
> python comb.py Screw Holder Bottom.stl
The comb tool is parsing the file:
Screw Holder Bottom.stl
..
The comb tool has created the file:
.. Screw Holder Bottom_comb.gcode
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import comb
>>> comb.main()
This brings up the comb dialog.
>>> comb.writeOutput( 'Screw Holder Bottom.stl' )
The comb tool is parsing the file:
Screw Holder Bottom.stl
..
The comb tool has created the file:
.. Screw Holder Bottom_comb.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import settings
from skeinforge.skeinforge_utilities import skeinforge_craft
from skeinforge.skeinforge_utilities import skeinforge_polyfile
from skeinforge.skeinforge_utilities import skeinforge_profile
import sys
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getCraftedText( fileName, text, combRepository = None ):
"Comb a gcode linear move text."
return getCraftedTextFromText( gcodec.getTextIfEmpty( fileName, text ), combRepository )
def getCraftedTextFromText( gcodeText, combRepository = None ):
"Comb a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'comb' ):
return gcodeText
if combRepository == None:
combRepository = settings.getReadRepository( CombRepository() )
if not combRepository.activateComb.value:
return gcodeText
return CombSkein().getCraftedGcode( combRepository, gcodeText )
def getNewRepository():
"Get the repository constructor."
return CombRepository()
def writeOutput( fileName = '' ):
"Comb a gcode linear move file."
fileName = fabmetheus_interpret.getFirstTranslatorFileNameUnmodified( fileName )
if fileName != '':
skeinforge_craft.writeChainTextWithNounMessage( fileName, 'comb' )
class CombRepository:
"A class to handle the comb settings."
def __init__( self ):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository( 'skeinforge.skeinforge_tools.craft_plugins.comb.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Comb', self, '' )
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute( 'http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Comb' )
self.activateComb = settings.BooleanSetting().getFromValue( 'Activate Comb', self, False )
self.minimumDepartureDistanceOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.0, 'Minimum Departure Distance over Perimeter Width (ratio):', self, 50.0, 0.0 )
self.runningJumpSpaceOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.0, 'Running Jump Space over Perimeter Width (ratio):', self, 10.0, 0.0 )
self.executeTitle = 'Comb'
def execute( self ):
"Comb button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
class CombSkein:
"A class to comb a skein of extrusions."
def __init__( self ):
self.betweenTable = {}
self.betweenTable = {}
self.boundaryLoop = None
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.layer = None
self.layerTable = {}
self.layerZ = None
self.lineIndex = 0
self.lines = None
self.nextLayerZ = None
self.oldLocation = None
self.oldZ = None
self.operatingFeedRatePerMinute = None
self.travelFeedRatePerMinute = None
def addGcodePathZ( self, feedRateMinute, path, z ):
"Add a gcode path, without modifying the extruder, to the output."
for point in path:
self.distanceFeedRate.addGcodeMovementZWithFeedRate( feedRateMinute, point, z )
def addIfTravel( self, splitLine ):
"Add travel move around loops if the extruder is off."
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
if not self.extruderActive and self.oldLocation != None:
if len( self.getBoundaries() ) > 0:
highestZ = max( location.z, self.oldLocation.z )
self.addGcodePathZ( self.travelFeedRatePerMinute, self.getPathsBetween( self.oldLocation.dropAxis( 2 ), location.dropAxis( 2 ) ), highestZ )
self.oldLocation = location
def addRunningJumpPath( self, end, loop, pathAround ):
"Get the running jump path from the perimeter to the intersection or running jump space."
if self.combRepository.runningJumpSpaceOverPerimeterWidth.value < 1.0:
return
if len( pathAround ) < 2:
return
loop = intercircle.getLargestInsetLoopFromLoopNoMatterWhat( loop, self.combInset )
penultimatePoint = pathAround[ - 2 ]
lastPoint = pathAround[ - 1 ]
nearestEndDistanceIndex = euclidean.getNearestDistanceIndex( end, loop )
nearestEndIndex = ( nearestEndDistanceIndex.index + 1 ) % len( loop )
nearestEnd = euclidean.getNearestPointOnSegment( loop[ nearestEndDistanceIndex.index ], loop[ nearestEndIndex ], end )
nearestEndMinusLast = nearestEnd - lastPoint
nearestEndMinusLastLength = abs( nearestEndMinusLast )
if nearestEndMinusLastLength <= 0.0:
return
nearestEndMinusLastSegment = nearestEndMinusLast / nearestEndMinusLastLength
betweens = self.getBetweens()
if self.getIsRunningJumpPathAdded( betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, self.runningJumpSpace ):
return
doubleCombInset = 2.0 * self.combInset
shortJumpSpace = 0.5 * self.runningJumpSpace
if shortJumpSpace < doubleCombInset:
return
if self.getIsRunningJumpPathAdded( betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, shortJumpSpace ):
return
shortJumpSpace = 0.25 * self.runningJumpSpace
if shortJumpSpace < doubleCombInset:
return
self.getIsRunningJumpPathAdded( betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, shortJumpSpace )
def addToLoop( self, location ):
"Add a location to loop."
if self.layer == None:
if not self.oldZ in self.layerTable:
self.layerTable[ self.oldZ ] = []
self.layer = self.layerTable[ self.oldZ ]
if self.boundaryLoop == None:
self.boundaryLoop = [] #starting with an empty array because a closed loop does not have to restate its beginning
self.layer.append( self.boundaryLoop )
if self.boundaryLoop != None:
self.boundaryLoop.append( location.dropAxis( 2 ) )
def getBetweens( self ):
"Set betweens for the layer."
if self.layerZ in self.betweenTable:
return self.betweenTable[ self.layerZ ]
if self.layerZ not in self.layerTable:
return []
self.betweenTable[ self.layerZ ] = []
for boundaryLoop in self.layerTable[ self.layerZ ]:
self.betweenTable[ self.layerZ ] += intercircle.getInsetLoopsFromLoop( self.betweenInset, boundaryLoop )
return self.betweenTable[ self.layerZ ]
def getBoundaries( self ):
"Get boundaries for the layer."
if self.layerZ in self.layerTable:
return self.layerTable[ self.layerZ ]
return []
def getCraftedGcode( self, combRepository, gcodeText ):
"Parse gcode text and store the comb gcode."
self.combRepository = combRepository
self.lines = gcodec.getTextLines( gcodeText )
self.parseInitialization( combRepository )
for lineIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ lineIndex ]
self.parseBoundariesLayers( combRepository, line )
for lineIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ lineIndex ]
self.parseLine( line )
return self.distanceFeedRate.output.getvalue()
def getIsAsFarAndNotIntersecting( self, begin, end ):
"Determine if the point on the line is at least as far from the loop as the center point."
if begin == end:
print( 'this should never happen but it does not really matter, begin == end in getIsAsFarAndNotIntersecting in comb.' )
print( begin )
return True
return not euclidean.isLineIntersectingLoops( self.getBetweens(), begin, end )
def getIsRunningJumpPathAdded( self, betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, runningJumpSpace ):
"Add a running jump path if possible, and return if it was added."
jumpStartPoint = lastPoint - nearestEndMinusLastSegment * runningJumpSpace
if euclidean.isLineIntersectingLoops( betweens, penultimatePoint, jumpStartPoint ):
return False
pathAround[ - 1 ] = jumpStartPoint
return True
def getPathBetween( self, betweenFirst, betweenSecond, isLeavingPerimeter, loopFirst ):
"Add a path between the perimeter and the fill."
loopFirst = intercircle.getLargestInsetLoopFromLoopNoMatterWhat( loopFirst, self.combInset )
nearestFirstDistanceIndex = euclidean.getNearestDistanceIndex( betweenFirst, loopFirst )
nearestSecondDistanceIndex = euclidean.getNearestDistanceIndex( betweenSecond, loopFirst )
firstBeginIndex = ( nearestFirstDistanceIndex.index + 1 ) % len( loopFirst )
secondBeginIndex = ( nearestSecondDistanceIndex.index + 1 ) % len( loopFirst )
nearestFirst = euclidean.getNearestPointOnSegment( loopFirst[ nearestFirstDistanceIndex.index ], loopFirst[ firstBeginIndex ], betweenFirst )
nearestSecond = euclidean.getNearestPointOnSegment( loopFirst[ nearestSecondDistanceIndex.index ], loopFirst[ secondBeginIndex ], betweenSecond )
clockwisePath = [ nearestFirst ]
widdershinsPath = [ nearestFirst ]
loopBeforeLeaving = euclidean.getAroundLoop( firstBeginIndex, firstBeginIndex, loopFirst )
if nearestFirstDistanceIndex.index == nearestSecondDistanceIndex.index:
if euclidean.getPathLength( widdershinsPath ) < self.minimumDepartureDistance:
widdershinsPath = [ nearestFirst ] + loopBeforeLeaving
reversedLoop = loopBeforeLeaving[ : ]
reversedLoop.reverse()
clockwisePath = [ nearestFirst ] + reversedLoop
else:
widdershinsLoop = euclidean.getAroundLoop( firstBeginIndex, secondBeginIndex, loopFirst )
widdershinsPath += widdershinsLoop
clockwiseLoop = euclidean.getAroundLoop( secondBeginIndex, firstBeginIndex, loopFirst )
clockwiseLoop.reverse()
clockwisePath += clockwiseLoop
clockwisePath.append( nearestSecond )
widdershinsPath.append( nearestSecond )
if euclidean.getPathLength( widdershinsPath ) > euclidean.getPathLength( clockwisePath ):
loopBeforeLeaving.reverse()
widdershinsPath = clockwisePath
if isLeavingPerimeter:
totalDistance = euclidean.getPathLength( widdershinsPath )
loopLength = euclidean.getPolygonLength( loopBeforeLeaving )
while totalDistance < self.minimumDepartureDistance:
widdershinsPath = [ nearestFirst ] + loopBeforeLeaving + widdershinsPath[ 1 : ]
totalDistance += loopLength
return widdershinsPath
def getPathsBetween( self, begin, end ):
"Insert paths between the perimeter and the fill."
aroundBetweenPath = []
points = [ begin ]
lineX = []
switchX = []
segment = euclidean.getNormalized( end - begin )
segmentYMirror = complex( segment.real, - segment.imag )
beginRotated = segmentYMirror * begin
endRotated = segmentYMirror * end
y = beginRotated.imag
boundaries = self.getBoundaries()
for boundaryIndex in xrange( len( boundaries ) ):
boundary = boundaries[ boundaryIndex ]
boundaryRotated = euclidean.getPointsRoundZAxis( segmentYMirror, boundary )
euclidean.addXIntersectionIndexesFromLoopY( boundaryRotated, boundaryIndex, switchX, y )
switchX.sort()
maximumX = max( beginRotated.real, endRotated.real )
minimumX = min( beginRotated.real, endRotated.real )
for xIntersection in switchX:
if xIntersection.x > minimumX and xIntersection.x < maximumX:
point = segment * complex( xIntersection.x, y )
points.append( point )
lineX.append( xIntersection )
points.append( end )
lineXIndex = 0
pathBetweenAdded = False
while lineXIndex < len( lineX ) - 1:
lineXFirst = lineX[ lineXIndex ]
lineXSecond = lineX[ lineXIndex + 1 ]
loopFirst = boundaries[ lineXFirst.index ]
isLeavingPerimeter = False
if lineXSecond.index != lineXFirst.index:
isLeavingPerimeter = True
pathBetween = self.getPathBetween( points[ lineXIndex + 1 ], points[ lineXIndex + 2 ], isLeavingPerimeter, loopFirst )
if isLeavingPerimeter:
if not pathBetweenAdded:
self.addRunningJumpPath( points[ lineXIndex + 3 ], boundaries[ lineXSecond.index ], pathBetween )
pathBetweenAdded = True
else:
pathBetween = self.getSimplifiedAroundPath( points[ lineXIndex ], points[ lineXIndex + 3 ], loopFirst, pathBetween )
pathBetweenAdded = True
aroundBetweenPath += pathBetween
lineXIndex += 2
return aroundBetweenPath
def getSimplifiedAroundPath( self, begin, end, loop, pathAround ):
"Get the simplified path between the perimeter and the fill."
pathAround = self.getSimplifiedBeginPath( begin, loop, pathAround )
return self.getSimplifiedEndPath( end, loop, pathAround )
def getSimplifiedBeginPath( self, begin, loop, pathAround ):
"Get the simplified begin path between the perimeter and the fill."
if len( pathAround ) < 2:
return pathAround
pathIndex = 0
while pathIndex < len( pathAround ) - 1:
if not self.getIsAsFarAndNotIntersecting( begin, pathAround[ pathIndex + 1 ] ):
return pathAround[ pathIndex : ]
pathIndex += 1
return pathAround[ - 1 : ]
def getSimplifiedEndPath( self, end, loop, pathAround ):
"Get the simplified end path between the perimeter and the fill."
if len( pathAround ) < 2:
return pathAround
pathIndex = len( pathAround ) - 1
while pathIndex > 0:
if not self.getIsAsFarAndNotIntersecting( end, pathAround[ pathIndex - 1 ] ):
return pathAround[ : pathIndex + 1 ]
pathIndex -= 1
return pathAround[ : 1 ]
def parseBoundariesLayers( self, combRepository, line ):
"Parse a gcode line."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon( line )
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if firstWord == 'M103':
self.boundaryLoop = None
elif firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine( None, splitLine )
self.addToLoop( location )
elif firstWord == '(<layer>':
self.boundaryLoop = None
self.layer = None
self.oldZ = float( splitLine[ 1 ] )
def parseInitialization( self, combRepository ):
"Parse gcode initialization and store the parameters."
for self.lineIndex in xrange( len( self.lines ) ):
line = self.lines[ self.lineIndex ]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon( line )
firstWord = gcodec.getFirstWord( splitLine )
self.distanceFeedRate.parseSplitLine( firstWord, splitLine )
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addLine( '(<procedureDone> comb </procedureDone>)' )
return
elif firstWord == '(<perimeterWidth>':
perimeterWidth = float( splitLine[ 1 ] )
self.combInset = 1.2 * perimeterWidth
self.betweenInset = 0.4 * perimeterWidth
self.uTurnWidth = 0.5 * self.betweenInset
self.minimumDepartureDistance = combRepository.minimumDepartureDistanceOverPerimeterWidth.value * perimeterWidth
self.runningJumpSpace = combRepository.runningJumpSpaceOverPerimeterWidth.value * perimeterWidth
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRatePerMinute = 60.0 * float( splitLine[ 1 ] )
self.distanceFeedRate.addLine( line )
def parseLine( self, line ):
"Parse a gcode line and add it to the comb skein."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon( line )
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if firstWord == 'G1':
self.addIfTravel( splitLine )
self.layerZ = self.nextLayerZ
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
elif firstWord == '(<layer>':
self.nextLayerZ = float( splitLine[ 1 ] )
if self.layerZ == None:
self.layerZ = self.nextLayerZ
self.distanceFeedRate.addLine( line )
def main():
"Display the comb dialog."
if len( sys.argv ) > 1:
writeOutput( ' '.join( sys.argv[ 1 : ] ) )
else:
settings.startMainLoopFromConstructor( getNewRepository() )
if __name__ == "__main__":
main()
|
gpl-3.0
| 2,445,846,317,411,144,000
| 43.257143
| 642
| 0.755918
| false
| 3.327009
| false
| false
| false
|
wildfish/django-nodetest
|
nodetest/utils.py
|
1
|
1140
|
from os.path import join
from uuid import uuid4
from shutil import copyfile
_js_repl = """;(function () {
var repl = require('repl');
var os = require('os');
var empty = '(' + os.EOL + ')';
repl.start({
prompt: "NODE> ",
eval: function (cmd, context, filename, callback) {
if (cmd === ".scope") cmd = empty;
if (cmd === empty) return callback();
var result = eval(cmd);
callback(null, result)
}
})
})();
"""
def _make_temp_name(js_src):
return '{}/__{}.js'.format(
js_src.rsplit('/', 1)[0],
uuid4().hex
)
def make_temp_file(root_dir, js_src):
temp_name = _make_temp_name(js_src)
src = join(root_dir, js_src)
dst = join(root_dir, temp_name)
full_dest = copyfile(src, dst)
return {
'absolute_path': full_dest,
'relative_path': temp_name
}
def parse_repl(src):
with open(src, 'r') as src_file:
file_content = src_file.read()
file_content = file_content.replace('/*REPL*/', _js_repl)
with open(src, 'w') as src_file:
src_file.write(file_content)
|
bsd-3-clause
| -7,775,578,083,173,779,000
| 22.75
| 65
| 0.536842
| false
| 3.275862
| false
| false
| false
|
programa-stic/barf-project
|
barf/core/reil/emulator/memory.py
|
1
|
7471
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import random
REIL_MEMORY_ENDIANNESS_LE = 0x0 # Little Endian
REIL_MEMORY_ENDIANNESS_BE = 0x1 # Big Endian
class ReilMemory(object):
"""A REIL memory model (byte addressable).
"""
def __init__(self, address_size):
# TODO: Set endianness through a parameter.
# TODO: Check that all addresses have size address_size.
# TODO: Use endianness parameter.
# Memory's address size.
self.__address_size = address_size
# Memory's endianness.
self.__endianness = REIL_MEMORY_ENDIANNESS_LE
# Dictionary that implements the memory itself.
self._memory = {}
@property
def address_size(self):
return self.__address_size
# Read methods
# ======================================================================== #
def read(self, address, size):
"""Read arbitrary size content from memory.
"""
value = 0x0
for i in range(0, size):
value |= self._read_byte(address + i) << (i * 8)
return value
def _read_byte(self, address):
"""Read a byte from memory.
"""
# Initialize memory location with a random value.
if address not in self._memory:
self._memory[address] = random.randint(0x00, 0xff)
return self._memory[address]
# Write methods
# ======================================================================== #
def write(self, address, size, value):
"""Write arbitrary size content to memory.
"""
for i in range(0, size):
self.__write_byte(address + i, (value >> (i * 8)) & 0xff)
def __write_byte(self, address, value):
"""Write byte in memory.
"""
self._memory[address] = value & 0xff
# Misc methods
# ======================================================================== #
def reset(self):
# Dictionary that implements the memory itself.
self._memory = {}
# Magic methods
# ======================================================================== #
def __str__(self):
lines = []
for addr in sorted(self._memory.keys()):
lines += ["0x%08x : 0x%08x" % (addr, self._memory[addr])]
return "\n".join(lines)
class ReilMemoryEx(ReilMemory):
"""Reil memory extended class"""
def __init__(self, address_size):
super(ReilMemoryEx, self).__init__(address_size)
# Previous state of memory.
self.__memory_prev = {}
# Write operations counter.
self.__write_count = 0
# Read methods
# ======================================================================== #
def read_inverse(self, value, size):
"""Return a list of memory addresses that contain the specified
value.
"""
addr_candidates = [addr for addr, val in self._memory.items() if val == (value & 0xff)]
addr_matches = []
for addr in addr_candidates:
match = True
for i in range(0, size):
byte_curr = (value >> (i * 8)) & 0xff
try:
match = self._memory[addr + i] == byte_curr
except KeyError:
match = False
if not match:
break
if match:
addr_matches += [addr]
return addr_matches
def try_read(self, address, size):
"""Try to read memory content at specified address.
If any location was not written before, it returns a tuple
(False, None). Otherwise, it returns (True, memory content).
"""
value = 0x0
for i in range(0, size):
addr = address + i
if addr in self._memory:
value |= self._read_byte(addr) << (i * 8)
else:
return False, None
return True, value
def try_read_prev(self, address, size):
"""Try to read previous memory content at specified address.
If any location was not written before, it returns a tuple
(False, None). Otherwise, it returns (True, memory content).
"""
value = 0x0
for i in range(0, size):
addr = address + i
if addr in self.__memory_prev:
_, val_byte = self.__try_read_byte_prev(addr)
value |= val_byte << (i * 8)
else:
return False, None
return True, value
def __try_read_byte_prev(self, address):
"""Read previous value for memory location.
Return a tuple (True, Byte) in case of successful read,
(False, None) otherwise.
"""
# Initialize memory location with a random value
if address not in self.__memory_prev:
return False, None
return True, self.__memory_prev[address]
# Write methods
# ======================================================================== #
def write(self, address, size, value):
"""Write arbitrary size content to memory.
"""
for i in range(0, size):
self.__write_byte(address + i, (value >> (i * 8)) & 0xff)
self.__write_count += 1
def __write_byte(self, address, value):
"""Write byte in memory.
"""
# Save previous address content.
if address in self._memory:
self.__memory_prev[address] = self._memory[address]
self._memory[address] = value & 0xff
# Misc methods
# ======================================================================== #
def reset(self):
super(ReilMemoryEx, self).reset()
# Previous state of memory.
self.__memory_prev = {}
# Write operations counter.
self.__write_count = 0
def get_addresses(self):
"""Get accessed addresses.
"""
return list(self._memory.keys())
def get_write_count(self):
"""Get number of write operations performed on the memory.
"""
return self.__write_count
|
bsd-2-clause
| 517,048,683,388,702
| 30.259414
| 95
| 0.549458
| false
| 4.575015
| false
| false
| false
|
PySimulator/PySimulator
|
PySimulator/Plugins/SimulationResult/__init__.py
|
1
|
1172
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name)) and name[0] != '.']
PlugInNames = get_immediate_subdirectories(os.path.abspath(os.path.dirname(__file__)))
plugin = []
for i in range(len(PlugInNames)):
try:
mod = __import__(PlugInNames[i] + "." + PlugInNames[i], locals(), globals(), [PlugInNames[i] + "." + PlugInNames[i]])
plugin.append(mod)
except ImportError as e:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + e.message + "'"
except SyntaxError as e:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + str(e) + "'"
except Exception as e:
info = str(e)
if info == '' or info is None:
print PlugInNames[i] + " plug-in could not be loaded."
else:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + info + "'"
fileExtension = []
description = []
for p in plugin:
fileExtension.append(p.fileExtension)
description.append(p.description)
|
lgpl-3.0
| -7,544,794,805,492,171,000
| 38
| 125
| 0.625641
| false
| 3.362069
| false
| false
| false
|
tsw-apropos/mapbiographer
|
mapBiographer/polygon_tool.py
|
1
|
7016
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
lmbMapToolPolygon
A QGIS plugin
Effectively onduct direct to digital map biographies and traditional land
use studies
-------------------
begin : 2014-05-13
copyright : (C) 2014 by Apropos Information Systems Inc.
email : info@aproposinfosystems.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* any later version. *
* *
* Derived from vertexTracerTool.py & freehandeditingtool.py *
* *
***************************************************************************/
"""
from PyQt4 import QtCore, QtGui
from qgis.core import *
from qgis.gui import *
import qgis.utils
import time
class lmbMapToolPolygon(QgsMapTool):
rbFinished = QtCore.pyqtSignal('QgsGeometry*')
def __init__(self, canvas):
# get canvas
QgsMapTool.__init__(self,canvas)
self.canvas = canvas
# control variables
self.started = False
self.firstTimeOnSegment = True
# related to temp output but function unclear
self.prevPoint = None
# custom cursor
self.cursor = QtGui.QCursor(QtGui.QPixmap(["16 16 3 1",
" c None",
". c #FF0000",
"+ c #000000",
" ",
" +.+ ",
" ++.++ ",
" +.....+ ",
" +. .+ ",
" +. . .+ ",
" +. . .+ ",
" ++. . .++",
" ... ...+... ...",
" ++. . .++",
" +. . .+ ",
" +. . .+ ",
" ++. .+ ",
" ++.....+ ",
" ++.++ ",
" +.+ "]))
#
# track when delete is released to permit deletion of last point
def keyReleaseEvent(self, event):
# remove the last added point when the delete key is pressed
if event.key() == QtCore.Qt.Key_Backspace:
self.rb.removeLastPoint()
#
# canvas move events
def canvasMoveEvent(self,event):
if self.started:
#Get the click
x = event.pos().x()
y = event.pos().y()
eventPoint = QtCore.QPoint(x,y)
layer = self.canvas.currentLayer()
if layer <> None:
point = QgsMapToPixel.toMapCoordinates(self.canvas.getCoordinateTransform(), x, y)
self.rb.movePoint(point)
#
# canvas release events
def canvasReleaseEvent(self,event):
# left click
if event.button() == 1:
# select the current layer
layer = self.canvas.currentLayer()
# if it is the start of a polygon set the rubberband up
if self.started == False:
self.rb = QgsRubberBand(self.canvas, layer.geometryType())
self.rb.setColor(QtGui.QColor('#ff0000'))
self.rb.setWidth(1)
self.rb.setOpacity(0.5)
self.started = True
# get coordinates if we are connecting to an editable layer
if layer <> None:
x = event.pos().x()
y = event.pos().y()
point = self.canvas.getCoordinateTransform().toMapCoordinates(x, y)
# put rubber band at cursor
self.rb.movePoint(point)
# set new point
self.appendPoint(point)
# right click
elif event.button() == 2:
self.sendGeometry()
#
# append point
def appendPoint(self, point):
# only add point if different from previous
if not (self.prevPoint == point) :
self.rb.addPoint(point)
self.prevPoint = QgsPoint(point)
#
# send geometry
def sendGeometry(self):
layer = self.canvas.currentLayer()
coords = []
#
# NOTE: code from vertex tracer skipped first point by using range of
# 1 to # of vertices. Changed to zero to include all points and have a
# complete feature.
# Also skip last point when right click was pressed to avoid extra points
# being placed
#
[coords.append(self.rb.getPoint(0,i)) for i in range(0,self.rb.numberOfVertices()-1)]
coords_tmp = coords[:]
coords = []
crsSrc = QgsCoordinateReferenceSystem(qgis.utils.iface.mapCanvas().mapSettings().destinationCrs())
crsDest = QgsCoordinateReferenceSystem(layer.crs())
xform = QgsCoordinateTransform(crsSrc,crsDest)
for point in coords_tmp:
transformedPoint = xform.transform(point)
coords.append(transformedPoint)
coords_tmp = coords[:]
coords = []
lastPt = None
for pt in coords_tmp:
if (lastPt <> pt) :
coords.append(pt)
lastPt = pt
g = QgsGeometry().fromPolygon([coords])
if g <> None and g.isGeosValid():
self.rbFinished.emit(g)
self.started = False
#
# activate tool
def activate(self):
self.canvas.setCursor(self.cursor)
#
# deactivate tool
def deactivate(self):
try:
self.rb.reset()
except AttributeError:
pass
#
# send false if queried if zoom tool
def isZoomTool(self):
return False
#
# send false if queried if transient (performs zoom or pan operation)
def isTransient(self):
return False
#
# send true if queried if edit tool
def isEditTool(self):
return True
|
gpl-2.0
| 3,999,823,729,145,701,000
| 34.434343
| 106
| 0.428877
| false
| 4.968839
| false
| false
| false
|
rahul-x-verma/Polaris
|
polaris/static/algorithm/map.py
|
1
|
1660
|
from pprint import pprint
class Map():
"""
Stores a map of the transit system in a graph data structure. Each vertex
represents one bus stopping at a given geographical location. Each edge
represents the time between one stop and the next (including waiting time if
applicable).
"""
def __init__(self):
"""
Creates an empty map.
"""
self.adjacency_list = {}
self.vertices = {}
def insert(self, stop):
"""
Adds a vertex and its neighbors to the graph.
"""
self.vertices[stop.uid] = stop
self.adjacency_list[stop] = {}
for neighbor in stop.neighbors:
self.adjacency_list[stop][neighbor[0]] = neighbor[1]
def find_path(self, start, end):
"""
Uses breadth-first search to find the buses necessary to go between two
stops.
"""
S = [start]
pi = {}
while end not in pi.keys():
if not S:
return []
curr = S.pop()
for vertex in self.adjacency_list[curr]:
if vertex not in pi.keys():
S.append(vertex)
pi[vertex] = curr
if pi[end]:
result = [end]
while (start != end):
end = pi[end]
result = [end] + result
return result
else:
return []
def distance(self, v1, v2):
return self.adjacency_list[v1][v2]
def show(self):
"""
Returns a human-readable version of the map.
"""
pprint(self.adjacency_list)
|
apache-2.0
| -4,744,783,799,445,522,000
| 26.213115
| 80
| 0.506627
| false
| 4.450402
| false
| false
| false
|
sarthak2108/AI-Projects
|
Minesweeper/convert2CNF.py
|
1
|
4718
|
import sys
def parse_file(filepath):
############################################
# Read the layout file to the board array.
# Note how the order in which the rows are
# read is reversed in the final array. This
# accomodates the requirement that positions
# arenumbered from the bottom left.
############################################
board = []
fin = open(filepath)
line = fin.readline()
tokens = line.replace('\n', '').split(' ')
height = int(tokens[0])
width = int(tokens[1])
reverse_board = []
for line in fin:
tokens = line.replace('\n', '').split(',')
row = []
for each_token in tokens:
if each_token == 'X':
row.append(each_token)
else:
row.append(int(each_token))
reverse_board.append(row)
fin.close()
while len(reverse_board) != 0:
board.append(reverse_board.pop())
return board
def convert2CNF(board, output):
############################################
# Interpret the number of constraints.
#
# We count the total number of clauses and
# variables which are necessary in formatting
# the input file for MINISAT. Each varialbe
# is named after the board position it
# represents. A positive sign means it has a
# bomb, while a negative sign idicates
# otherwise.
#
# We use the following trick to reduce the
# exponential number of clauses generated in
# converting DNF to CNF to something that is
# polynomial.
# We simply compute how many combinations
# of eight adjacent positions there is which
# are gauranteed to have a t least one bomb.
# We only consider the minimum number of
# positions required to guarantee a bomb, as
# all the other rules are entailed from them.
# This drastically reduces the computational
# cost from exponential to polynomail.
############################################
from itertools import permutations
height = len(board)
width = len(board[0])
number_of_variables = height * width
number_of_clauses = 0
clauses = []
for i in range(height):
for j in range(width):
if board[i][j] != 'X':
position = (i * width) + j + 1
pos = get_adjacent_positions(i , j, height, width)
if board[i][j] == 0:
number_of_clauses += 1
clauses.append([-position])
for p in pos:
clauses.append([-p])
number_of_clauses += 1
else:
permute = []
for k in range(len(pos)):
if k < board[i][j] - 1:
permute.append(0)
else:
permute.append(1)
permuted = list(set(list(permutations(permute))))
number_of_clauses += (len(permuted) + 1)
clauses.append([-position])
for p in permuted:
clause = []
for bits in range(len(p)):
if p[bits] == 1:
clause.append(pos[bits])
clauses.append(clause)
fout = open(output, 'w')
string = 'p cnf ' + str(number_of_variables) + ' ' + str(number_of_clauses)
fout.write(string)
for clause in clauses:
string = '\n'
for literal in clause:
string += str(literal) + ' '
string += '0'
fout.write(string)
fout.close()
def get_adjacent_positions(i, j, height, width):
############################################
# Determines the adjacent positions of a
# particular position of the board array.
############################################
pos = []
if i - 1 >= 0:
if j - 1 >= 0:
pos.append(((i - 1) * width) + (j - 1) + 1)
pos.append(((i - 1) * width) + j + 1)
if j + 1 < width:
pos.append(((i - 1) * width) + (j + 1) + 1)
if j - 1 >= 0:
pos.append((i * width) + (j - 1) + 1)
if j + 1 < width:
pos.append((i * width) + (j + 1) + 1)
if i + 1 < height:
if j - 1 >= 0:
pos.append(((i + 1) * width) + (j - 1) + 1)
pos.append(((i + 1) * width) + j + 1)
if j + 1 < width:
pos.append(((i + 1) * width) + (j + 1) + 1)
return pos
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Layout or output file not specified.'
exit(-1)
board = parse_file(sys.argv[1])
convert2CNF(board, sys.argv[2])
|
mit
| 3,087,753,827,598,349,000
| 35.015267
| 79
| 0.481984
| false
| 4.102609
| false
| false
| false
|
ctn-waterloo/nengo_pushbot
|
examples/robot_control_keyboard.py
|
1
|
1082
|
# control the motors of the robot
# also contains code for connecting to SpiNNaker
import nengo
spinnaker = False
import nengo_pushbot
import numpy as np
model = nengo.Network(label='pushbot')
with model:
input = nengo.Node([0,0], label='keyboard')
#a = nengo.Ensemble(500, dimensions=2, label='a')
if spinnaker:
bot = nengo_pushbot.PushBotNetwork('1,0,EAST')
else:
bot = nengo_pushbot.PushBotNetwork('10.162.177.49')
bot.show_image()
nengo.Connection(input, bot.motor, synapse=0.01, transform=[[-1, -1], [-0.3, 0.3]])
if __name__ == '__main__':
import nengo_gui.javaviz
jv = nengo_gui.javaviz.View(model)
if spinnaker:
import nengo_spinnaker
config = nengo_spinnaker.Config()
config[input].f_of_t = True
config[input].f_period = 2*np.pi
sim = nengo_spinnaker.Simulator(model)
else:
sim = nengo.Simulator(model)
jv.update_model(sim)
jv.view()
sim.run(5000)
#import nengo_spinnaker
#sim = nengo_spinnaker.Simulator(model)
#sim.run(10)
|
mit
| -546,890,297,734,732,860
| 21.081633
| 87
| 0.631238
| false
| 2.854881
| false
| false
| false
|
Arkapravo/morse-0.6
|
src/morse/geolandloader/geoShapefileLoader.py
|
1
|
13736
|
import shapelib, dbflib
import Blender
from Blender import *
import math
from math import sqrt
#
# The the shapefile module
#
# SHAPELIB Object Types
#----------------------------------------------
#define SHPT_NULL 0
#----------------------------------------------
#2D Shape Types (pre ArcView 3.x):
#
#define SHPT_POINT 1 Points
#define SHPT_ARC 3 Arcs (Polylines, possible in parts)
#define SHPT_POLYGON 5 Polygons (possible in parts)
#define SHPT_MULTIPOINT 8 MultiPoint (related points)
#----------------------------------------------
# 3D Shape Types (may include "measure" values for vertices):
#
#define SHPT_POINTZ 11
#define SHPT_ARCZ 13
#define SHPT_POLYGONZ 15
#define SHPT_MULTIPOINTZ 18
#----------------------------------------------
# 2D + Measure Types:
#
#define SHPT_POINTM 21
#define SHPT_ARCM 23
#define SHPT_POLYGONM 25
#define SHPT_MULTIPOINTM 28
#----------------------------------------------
# Complex (TIN-like) with Z, and Measure:
#
#define SHPT_MULTIPATCH 31
#----------------------------------------------
# --------------------------- def read_shapefile(filename):
# open the shapefile
#shp = shapelib.ShapeFile(filename)
# the info method returns a tuple (num_shapes, type, min, max) where
# num_shapes is the number of shapes, type is the type code (one of
# the SHPT* constants defined in the shapelib module) and min and
# max are 4-element lists with the min. and max. values of the
# vertices.
#logging.info(shp.info())
# read_object reads a shape
#obj = shp.read_object(0)
# The vertices method returns the shape as a list of lists of tuples.
#logging.info(obj.vertices()[0][:10])
# The extents returns a tuple with two 4-element lists with the min.
# and max. values of the vertices.
#logging.info(obj.extents())
# The type attribute is the type code (one of the SHPT* constants
# defined in the shapelib module)
#logging.info(obj.type)
# The id attribute is the shape id
#logging.info(obj.id)
# the cobject method returns a PyCObject containing the shapelib
# SHPHandle. This is useful for passing shapefile objects to
# C-Python extensions.
#logging.info(shp.cobject())
# --------------------------- end of def read_shapefile(filename):
#--------------------------------------------------#
def distance2D(A, B):
x = B.co[0] - A.co[0]
y = B.co[1] - A.co[1]
return sqrt((x*x + y*y))
#--------------------------------------------------#
def LoadBuildings(shp, hostObject):
# Default parameters for ground altitude and building height
BAlt = 321
BAltOffset = -5
BHeight = 12
RoofHeight = 15
# Preparing Texture
buildingTex = Texture.New('buildingTex')
buildingTex.setType('Image')
img = Image.Load('/tmp/building.jpg')
buildingTex.image = img
# Default Material used for Buildings
buildingMat = Material.New('buildingMat')
buildingMat.rgbCol = [0.78,0.75, 0.4]
buildingMat.emit = 0.3
buildingMat.setSpec(0.0)
buildingMat.setTexture(0, buildingTex)
# Building barracks
Nshapes = (shp.info())[0]
logging.info('Loading ', Nshapes, ' BUildings')
for i in range(Nshapes):
# Oshp is a shape object
Oshp = shp.read_object(i)
#for v in Oshp.vertices()[0]
#--- Building the building with the shapefile's vertices
groundCoverage = Oshp.vertices()[0]
N = len(groundCoverage)
logging.info('Building ', (i+1), 'with ', N, ' vertices')
buildingMesh = Blender.Mesh.New('buildingMesh');
#--- Average the altitude of building first floor
if (N > 0):
thisBuildingZ = hostObject.findZOfClosestPoint(groundCoverage[0]) - BAltOffset
else:
thisBuildingZ = hostObject.meanZ - BAltOffset
for i in range(N-1):
# Get the Z of the closest vertices in DTM to adjust BZ
# for j = TODO
# Extend the vertices of the current building's mesh
buildingMesh.verts.extend(groundCoverage[i][0] - hostObject.UTMXOrigin,
groundCoverage[i][1] - hostObject.UTMYOrigin ,
thisBuildingZ)
#--- Filling the face of the building's ground
if (N == 5) or (N == 4):
ff = NMesh.Face([buildingMesh.verts]);
buildingMesh.faces.extend(ff);
#--- Creating the walls
for i in range(N-1):
buildingMesh.verts.extend(groundCoverage[i][0] - hostObject.UTMXOrigin,
groundCoverage[i][1] - hostObject.UTMYOrigin ,
thisBuildingZ + BHeight)
#--- Filling the faces of the building's walls
for i in range(N-1):
if i < N-2:
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[i+(N-1)], buildingMesh.verts[i+N], buildingMesh.verts[i+1]]);
else:
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[i+(N-1)], buildingMesh.verts[N-1], buildingMesh.verts[0]]);
buildingMesh.faces.extend(ff);
#--- Building the roof the roof the roof is on Fire... ah lala
if (N == 5):
# A two points roof
longWallIs01 = 0
roofLength = distance2D(buildingMesh.verts[0], buildingMesh.verts[1])
if roofLength > distance2D(buildingMesh.verts[1], buildingMesh.verts[2]):
longWallIs01 = 1
else:
longWallIs01 = 0
roofLength = distance2D(buildingMesh.verts[1], buildingMesh.verts[2])
# Adding faces to the roof
if longWallIs01 == 1:
xroof = ((buildingMesh.verts[0]).co[0] + (buildingMesh.verts[N-2]).co[0]) / 2.0
yroof = ((buildingMesh.verts[0]).co[1] + (buildingMesh.verts[N-2]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
xroof = ((buildingMesh.verts[1]).co[0] + (buildingMesh.verts[2]).co[0]) / 2.0
yroof = ((buildingMesh.verts[1]).co[1] + (buildingMesh.verts[2]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
ff = NMesh.Face([buildingMesh.verts[N-1], buildingMesh.verts[N], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[(2*(N-1))] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[N+2] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[N+1] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N+1], buildingMesh.verts[N+2], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[(2*(N-1))+1] ])
buildingMesh.faces.extend(ff)
else:
xroof = ((buildingMesh.verts[0]).co[0] + (buildingMesh.verts[1]).co[0]) / 2.0
yroof = ((buildingMesh.verts[0]).co[1] + (buildingMesh.verts[1]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
xroof = ((buildingMesh.verts[2]).co[0] + (buildingMesh.verts[3]).co[0]) / 2.0
yroof = ((buildingMesh.verts[2]).co[1] + (buildingMesh.verts[3]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
ff = NMesh.Face([buildingMesh.verts[N], buildingMesh.verts[N+1], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[(2*(N-1))] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[N] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N+1], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[N+2] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N+2], buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[(2*(N-1))+1] ])
buildingMesh.faces.extend(ff)
else:
# A one points roof
#xroof and yroof have already been precalculated
xroof = 0
yroof = 0
for i in range((N-1)):
xroof = xroof + (buildingMesh.verts[i]).co[0]
yroof = yroof + (buildingMesh.verts[i]).co[1]
#--- Average building 2D center
xroof = xroof / float(N-1)
yroof = yroof / float(N-1)
#--- Adding roof top points at verts index= (2*(N-1))
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
#--- Making faces around the roof
for i in range((N-1), (2*(N-1))):
if i == ((2*(N-1))-1):
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))] ])
else:
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[i+1], buildingMesh.verts[(2*(N-1))] ])
buildingMesh.faces.extend(ff);
#--- Adding material
buildingMesh.materials = [buildingMat]
#--- Creating new Object in current scene
scene = Blender.Scene.GetCurrent()
buildingObject = scene.objects.new(buildingMesh)
Blender.Window.Redraw()
#----------------------------------------------------------#
def LoadRoads(shp, hostObject):
Nshapes = (shp.info())[0]
for i in range(Nshapes):
# Oshp is a shape object
Oshp = shp.read_object(i)
# We can read the vertices from the shape object
#logging.info(Oshp.vertices()[0])
#----------------------------------------------------------#
def make_shapefile(filename):
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, [[(10, 10), (20, 10), (20, 20), (10, 10)]])
logging.info(obj.extents())
logging.info(obj.vertices())
outfile = shapelib.create(filename, shapelib.SHPT_POLYGON)
outfile.write_object(-1, obj)
del outfile
#----------------------------------------------------------#
#
# Test the DBF file module.
#
def make_dbf(file):
# create a new dbf file and add three fields.
dbf = dbflib.create(file)
dbf.add_field("NAME", dbflib.FTString, 20, 0)
dbf.add_field("INT", dbflib.FTInteger, 10, 0)
dbf.add_field("FLOAT", dbflib.FTDouble, 10, 4)
#----------------------------------------------------------#
def add_dbf_records(file):
# add some records to file
dbf = dbflib.open(file, "r+b")
# Records can be added as a dictionary...
dbf.write_record(0, {'NAME': "Weatherwax", "INT":1, "FLOAT":3.1415926535})
# ... or as a sequence
dbf.write_record(1, ("Ogg", 2, -1000.1234))
#----------------------------------------------------------#
def list_dbf(file):
# logging.info(the contents of a dbf file to stdout)
dbf = dbflib.DBFFile(file)
logging.info("%d records, %d fields" % (dbf.record_count(), dbf.field_count()))
format = ""
for i in range(dbf.field_count()):
type, name, len, decc = dbf.field_info(i)
if type == 0:
format = format + " %%(%s)%ds" % (name, len)
elif type == 1:
format = format + " %%(%s)%dd" % (name, len)
elif type == 2:
format = format + " %%(%s)%dg" % (name, len)
logging.info(format)
for i in range(dbf.record_count()):
logging.info(format % dbf.read_record(i))
#----------------------------------------------------------#
# Main method to load shapefiles according to
# the meaning of their contents which can be indicated
# by shpNature.
#
# filename: Absolute Path to the shapefile.
# hostObject: Mesh in which the information should be added
# shpNature: int indicating the nature of the shape description (Roads, buildings, etc.)
#
# We define the following values for shpNature
# shpNature : 0 : Unknown content
# shpNature : any not defined value is considered as 0.
#
# shpNature : 20 : Drivable surface
# shpNature : 21 : Common Road in hard concrete (highways, streets etc.)
# shpNature : 22 : Country tracks
#
# shpNature : 30 : Buildings with flat roofs
# shpNature : 31 : Buildings with flat roofs
#
# shpNature : 40 : Vegetation high grass
# shpNature : 41 : Vegetation woods with nice trees: feuillus
# shpNature : 42 : Vegetation woods with nice trees: coniferes
#
#----------------------------------------------------------#
def LoadShapefile(filename, shpNature, hostObject):
# The shapelib object
shp = []
# open the shapefile
if os.path.isfile(filename):
(dirname, filerelname) = os.path.split(filename)
(bodyname, fileext) = os.path.splitext(filerelname)
logging.info('(DD) Is FILE OK')
# In order to open a shapefile the shapelib needs to have the shx corresponding files
if os.path.isfile(os.path.join(dirname, bodyname+'.SHX')) or os.path.isfile(os.path.join(dirname, bodyname+'.shx')):
logging.info('(DD) IS SHALELIB OK')
shp = shapelib.ShapeFile(filename)
else:
return 0
if shp:
Tshp = (shp.info())[1]
logging.info('(II) Reading shapefile with type ', Tshp ,' contents (', filerelname,') as ', shpNature)
#-------- Loading Roads
if (shpNature >= 20) and (shpNature <= 29):
LoadRoads(shp, hostObject)
#-------- Loading Buildings
if (shpNature >= 30) and (shpNature <= 39):
LoadBuildings(shp, hostObject)
|
bsd-3-clause
| -1,722,526,847,447,420,700
| 41.395062
| 150
| 0.564939
| false
| 3.491612
| false
| false
| false
|
moonboy13/brew-journal
|
brew_journal/recipies/migrations/0002_auto_20160224_0318.py
|
1
|
1120
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipies', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='account',
field=models.ForeignKey(related_name='account', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='recipehops',
name='recipe',
field=models.ForeignKey(related_name='recipe_hops', to='recipies.Recipe', null=True),
),
migrations.AddField(
model_name='recipemalts',
name='recipe',
field=models.ForeignKey(related_name='recipe_malts', to='recipies.Recipe', null=True),
),
migrations.AlterField(
model_name='recipe',
name='last_brew_date',
field=models.DateTimeField(null=True, blank=True),
),
]
|
apache-2.0
| -6,611,607,104,091,185,000
| 30.111111
| 100
| 0.590179
| false
| 4.242424
| false
| false
| false
|
josh314/sf-crime
|
universal-probs.py
|
1
|
1553
|
######################################################################
# A benchmark script which assigns same probabilities to each test set member.
# Probabilities are determined by the relative fraction of a category of
# crime to the total number of crimes in the training set.
######################################################################
import pandas as pd
import numpy as np
import gzip
import os.path
import sf_crime_config as conf
#File locations
train_file = conf.train_raw
test_file = conf.test_raw
submission_file = os.path.join(conf.submission_dir, \
'universal-probs-submission.csv.gz')
#load training file to data frame
train = pd.read_csv(train_file,header=0)
print("train imported")
#Aggregate total number of each type of crime in training set
crime_numbers = train.groupby('Category').size()
#Create a row of overall probabilities out of the crime numbers
#This vector is thus normalized to sum up to 1.
crime_ratios = crime_numbers / len(train)
#Convert to list
probs = crime_ratios.values.tolist()
#load test file to data frame
test = pd.read_csv(test_file,header=0)
#Create a matrix of probabilities for each row of test data
#Each row gets the same values -- the overall probs.
probs_array = np.array([probs]*len(test))
#Create empty data frame for submission file
columns = crime_ratios.index.tolist()
df = pd.DataFrame(probs_array, columns=columns)
df.insert(loc=0,column='Id',value=test['Id'])
with gzip.open(submission_file,'wt') as archive:
df.to_csv(archive,index=False)
|
cc0-1.0
| -1,150,541,506,081,730,200
| 34.295455
| 78
| 0.678042
| false
| 3.787805
| true
| false
| false
|
CharKwayTeow/uci-webreg-crawler
|
python/download_all_courses_in_a_department.py
|
1
|
1253
|
#!/usr/bin/python
# This is a script to download course lists of a department in all quarters.
# Usage: python download_all_courses_in_a_department.py department_name
# Example: python download_all_courses_in_a_department.py COMPSCI
import urllib2, sys, os, string
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
url = 'http://websoc.reg.uci.edu/perl/WebSoc'
webfile = urllib2.urlopen(url).read()
department = sys.argv[1]
if not os.path.exists(department):
os.makedirs(department)
print 'Begin to download all courses in', department, ':'
for line in webfile.splitlines():
if "<option value=\"20" in line:
param = find_between(line, "value=\"", "\" style")
term = find_between(line, ">", "<")
query_url = "http://websoc.reg.uci.edu/perl/WebSoc?Submit=Display+Web+Results&YearTerm="+ param + "&Dept=" + department
response = urllib2.urlopen(query_url).read()
#build file name
trans = string.maketrans(' ', '_')
filename = term.translate(trans)
print term
#store to html file
fo = open(department + '/' + filename + ".html", "w+")
fo.write(response)
fo.close()
|
gpl-2.0
| -5,043,633,817,063,758,000
| 28.139535
| 121
| 0.663208
| false
| 3.148241
| false
| false
| false
|
hellohaptik/chatbot_ner
|
ner_v1/detectors/pattern/pnr/pnr_detection.py
|
1
|
12091
|
from __future__ import absolute_import
import re
from ner_v1.detectors.base_detector import BaseDetector
from language_utilities.constant import ENGLISH_LANG
class PNRDetector(BaseDetector):
"""Detects PNR (serial) codes (Passenger Record Number, usually present with train or flight bookings) in given text
and tags them. Usually flight pnr codes are 5 to 8 characters long.
Detects all PNR/serial codes of variable length about 5 to 20 characters in given text and replaces them by
entity_name. Different detectors are used depending on the entity_name used to initialize the PNRDetector object.
A task_dict, a dictonary mapping detector functions to entity_name is used for this.
For example if 'train_pnr' is used to initialize PNRDetector(),
_detect_railway_pnr() would be called to detect pnr codes. In case if entity_name is not present in
task_dict , _detect_serial_pnr() is used to detect pnr codes.
Attributes:
text: string to extract entities from
entity_name: string by which the detected pnr codes would be replaced with on calling detect_entity()
tagged_text: string with pnr codes replaced with tag defined by entity name
processed_text: string with pnr codes detected removed
pnr_list: list of pnr codes detected
original_pnr_text: list to store substrings of the text detected as pnr codes
tag: entity_name prepended and appended with '__'
task_dict : A dictonary mapping detector functions to entity_name. For example if 'train_pnr' is used to
initialize PNRDetector(), _detect_railway_pnr() would be called to detect pnr codes
In case if entity_name is not present in task_dict , _detect_serial_pnr() is used to detect pnr
codes
For Example:
text = "Your flight booking was sucessful. Your pnr is 4sgx3e."
pnr_detector = PNRDetector("pnr_number")
pnr_numbers, original_pnr_numbers = pnr_detector.detect_entity(text)
pnr_detector.tagged_text
Output:
' Your flight booking was sucessful. Your pnr is __pnr__number__. '
pnr_numbers, original_pnr_numbers
Output:
(['4sgx3e'], ['4sgx3e'])
Note:
text and tagged_text will have a extra space prepended and appended after calling detect_entity(text)
More Examples:
text = "Your flight booking was sucessful. Your pnr is 43333."
...
pnr_numbers, original_pnr_numbers
(['43333'], ['43333'])
text = "Your flight booking was sucessful. Your pnr is 433."
...
pnr_numbers, original_pnr_numbers
([], [])
text = "Your flight booking was sucessful. Your pnr is sgxsgx."
...
pnr_numbers, original_pnr_numbers
(['sgxsgx'], ['sgxsgx'])
"""
def __init__(self, entity_name, source_language_script=ENGLISH_LANG, translation_enabled=False):
"""Initializes a PNRDetector object
Args:
entity_name: A string by which the detected pnr codes would be replaced with on calling detect_entity()
source_language_script: ISO 639 code for language of entities to be detected by the instance of this class
translation_enabled: True if messages needs to be translated in case detector does not support a
particular language, else False
"""
# assigning values to superclass attributes
self._supported_languages = [ENGLISH_LANG]
super(PNRDetector, self).__init__(source_language_script, translation_enabled)
self.entity_name = entity_name
self.task_dict = {
'train_pnr': self._detect_railway_pnr,
'Default': self._detect_serial_pnr
}
self.text = ''
self.tagged_text = ''
self.processed_text = ''
self.pnr_list = []
self.original_pnr_text = []
self.tag = '__' + self.entity_name + '__'
@property
def supported_languages(self):
return self._supported_languages
def detect_entity(self, text, **kwargs):
"""Detects pnr codes in the text string
Args:
text: string to extract entities from
**kwargs: it can be used to send specific arguments in future
Returns:
A tuple of two lists with first list containing the detected pnr codes and second list containing their
corresponding substrings in the given text.
For example:
(['4sgx3e'], ['4sgx3e'])
Additionally this function assigns these lists to self.pnr_list and self.original_pnr_text attributes
respectively.
"""
self.text = ' ' + text + ' '
self.processed_text = self.text
self.tagged_text = self.text
pnr_data = self.task_dict.get(self.entity_name, self.task_dict['Default'])()
self.pnr_list = pnr_data[0]
self.original_pnr_text = pnr_data[1]
return pnr_data
def _detect_railway_pnr(self):
"""Detects railway pnr codes in the text string
Detects Indian Railways 10 to 12 digits PNR codes in the text
Returns:
A tuple of two lists with first list containing the detected pnr codes and second list containing their
corresponding substrings in the given text.
For example, if text is "My train pnr is 2459547855, can you check the train status for me ?"
It returns
(['2459547855'], ['2459547855'])
Additionally this function assigns these lists to self.pnr_list and self.original_original_pnr_text
attributes respectively.
"""
# print 'detection for default task'
railway_pnr_list = []
original_list = []
railway_pnr_list, original_list = self._detect_railway_pnr_format(railway_pnr_list, original_list)
self._update_processed_text(original_list)
railway_pnr_list, original_list = self._detect_railway_pnr_long_format(railway_pnr_list, original_list)
self._update_processed_text(original_list)
return railway_pnr_list, original_list
def _detect_railway_pnr_format(self, railway_pnr_list=None, original_list=None):
"""
Detects Indian Railways 10 to 12 digits pnr codes from self.text conforming to formats defined by
regex pattern.
This function is called by _detect_railway_pnr()
Args:
railway_pnr_list: Optional, list to store detected pnr codeses
original_list: Optional, list to store corresponding original substrings of text which were detected as
pnr codeses
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['2459547855'], ['2459547855'])
"""
if railway_pnr_list is None:
railway_pnr_list = []
if original_list is None:
original_list = []
patterns = re.findall(r'\b([0-9]{10,12})\b', self.processed_text.lower())
for pattern in patterns:
railway_pnr_list.append(pattern)
original_list.append(pattern)
return railway_pnr_list, original_list
def _detect_railway_pnr_long_format(self, railway_pnr_list=None, original_list=None):
"""
Detects railway PNR 10 digit number with special characters
Args:
railway_pnr_list: Optional, list to store detected pnr codeses
original_list: Optional, list to store corresponding original substrings of text which were detected as
pnr codeses
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['2459547855'], ['2459547855'])
"""
if railway_pnr_list is None:
railway_pnr_list = []
if original_list is None:
original_list = []
patterns = re.findall(r'\b([0-9\-\s\(\)\.]{10,20})\b', self.processed_text.lower())
for pattern in patterns:
clean_pnr = self._clean_pnr(pattern)
if len(clean_pnr) == 10:
railway_pnr_list.append(clean_pnr)
original_list.append(pattern)
return railway_pnr_list, original_list
def _clean_pnr(self, pnr):
"""
This function clean special character from pnr text
Args:
pnr: PNR containing special characters
Returns:
pnr: PNR with special characters removed
"""
return re.sub('[\-\s\.\(\)]+', '', pnr)
def _detect_serial_pnr(self):
"""
Detects generic serial/pnr codes from self.text conforming to formats defined by regex pattern.
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['4sgx3e'], ['4sgx3e'])
"""
# print 'detection for default task'
pnr_list = []
original_list = []
pnr_list, original_list = self._detect_serial_key(pnr_list, original_list)
self._update_processed_text(original_list)
return pnr_list, original_list
def _detect_serial_key(self, pnr_list=None, original_list=None):
"""
Detects generic serial/pnr codes from self.text conforming to formats defined by regex pattern.
This function is called by _detect_railway_pnr()
Args:
pnr_list: Optional, list to store detected pnr codeses
original_list: Optional, list to store corresponding original substrings of text which were detected as
pnr codeses
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['4sgx3e'], ['4sgx3e'])
"""
if pnr_list is None:
pnr_list = []
if original_list is None:
original_list = []
pnr = None
pattern = re.compile(r'\s(([0-9]+[a-zA-Z]|[a-zA-Z]+[0-9])[A-Za-z0-9]*)\s').search(self.processed_text.lower())
pattern2 = re.compile(r'\se([0-9]{4,20})\s').search(self.processed_text.lower())
pattern3 = re.compile(r'\s([A-Z]{4,20})\s').search(self.processed_text.lower())
pattern4 = re.compile(r'\s([A-Za-z0-9]*[^AaEeIiOoUu\+\-,!@#\$\^&\*\(\);/\|<>\s]{4,10}[A-Za-z0-9]+)[\s\.]') \
.search(self.processed_text.lower())
if pattern and len(pattern.group(1)) > 3:
pnr = pattern.group(1)
elif pattern2:
pnr = pattern2.group(1)
elif pattern3:
pnr = pattern3.group(1)
elif pattern4:
pnr = pattern4.group(1)
if pnr:
pnr_list.append(pnr)
original_list.append(pnr)
return pnr_list, original_list
def _update_processed_text(self, original_pnr_strings):
"""
Replaces detected pnr codes with tag generated from entity_name used to initialize the object with
A final string with all pnr codes replaced will be stored in object's tagged_text attribute
A string with all pnr codes removed will be stored in object's processed_text attribute
Args:
original_pnr_strings: list of substrings of original text to be replaced with tag created from entity_name
"""
for detected_text in original_pnr_strings:
self.tagged_text = self.tagged_text.replace(detected_text, self.tag)
self.processed_text = self.processed_text.replace(detected_text, '')
|
gpl-3.0
| 1,228,918,726,222,171,000
| 40.12585
| 120
| 0.619055
| false
| 3.979921
| false
| false
| false
|
fos/fos-legacy
|
fos/core/actor.py
|
1
|
8156
|
import numpy as np
from fos.actor.primitives import AABBPrimitive
from pyglet.gl import GLfloat
from pyglet.gl import *
class Actor(object):
""" Define a visualization object in Fos """
def __init__(self,
affine = None,
aabb = None,
force_center_data = False,
**kwargs):
""" Create an actor
Parameters
----------
affine : 4x4 array
the affine is expected to be normal, i.e. it has only
rotational and translational components, but no shears
the affine is applied to the input vertices data to position
the actor in the world space. If set to none, an affine is
generated to positions the actor optimally for the camera view
aabb : (corner1, corner2)
the axis-aligned bounding box. axis-aligned means aligned
with the world coordinate system axes
corner1 : 3x1 array
bottom-left-front point of the box when look into z direction
corner2 : 3x1 array
top-right-back point of the box
If set to None, an axis aligned boundingbox is computed
using the input vertices
force_center_data : boolean
if set to true, the mean of vertices location is subtracted from
all the vertices. this is useful to compute a better bounding
box and if the data has no meaningful affine
obb : (center, orientation, halfwidths)
center : 3x1 array
the center point of the aabb
orientation : 3x3 array
orthogonal unit vectors
halfwidths : 3x1 array
box halfwidths along each axis
"""
# data related information
self.vertices = None
self.living = False
self.show_aabb = True
# self.connectivity = None
# self.field = None # scalar, vector, tensor
# self.colormap = None
# self.texture = None
# movement related information. use the
# self.velocity = None
# self.acceleration = None
# event related information
# self.event_queue = None
# mouse or keyboard events on the actor
# self.event_handlers = None
# related: menu options for the actor
def setup(self):
""" Data preparation """
# display lists, vbo
# prepare different resolutions
pass
def update(self, dt):
""" Update the actor
dt from the global timer """
pass
def draw_aabb(self):
""" Draw the actor """
if self.show_aabb:
glPushMatrix()
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glLineWidth(1.0)
glColor3f(1.0, 1.0, 0.0)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, self.aabb.vertices_ptr)
glDrawElements(self.aabb.mode,self.aabb.indices_nr,self.aabb.type,self.aabb.indices_ptr)
glDisableClientState(GL_VERTEX_ARRAY)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopMatrix()
def delete(self):
""" Removing the geometry """
pass
def info(self):
""" Show information about the actor """
# debug mode
print "this actor is at ", self
print "number of vertices", len(self.vertices)
print "is the actor living ?", self.living
if not self.aabb is None:
print "has boundary box", self.aabb
def to_raytracer_file(self):
""" Save the geometry to a file readable by a raytracer """
pass
def process_pickray(self, near, far):
""" Process the pick ray like intersecting with the actor """
pass
def process_keys(self,symbol,modifiers):
pass
def process_mouse_motion(self,x,y,dx,dy):
pass
def make_aabb(self, aabb = None, margin = 30):
""" Make the axis aligned bounding box.
Parameters
----------
aabb : 2-tuple of numpy arrays of shape(3,)
Defining the box by left-bottom-front and the top-right-back
coordinate points. If None, a bounding box based on the
vertices is computed.
margin : float
A margin to be added to the computed bounding box
"""
# if no aabb is given, compute one
if aabb == None:
# compute an axis aligned bounding box
# based on the vertices
coord1 = np.array([self.vertices[:,0].min(),
self.vertices[:,1].min(),
self.vertices[:,2].min()], dtype = np.float32)
coord2 = np.array([self.vertices[:,0].max(),
self.vertices[:,1].max(),
self.vertices[:,2].max()], dtype = np.float32)
self.aabb = AABBPrimitive(blf = coord1, trb = coord2, margin = margin)
else:
assert len(aabb) == 2
# otherwise set to given aabb
self.aabb = AABBPrimitive(blf = aabb[0], trb = aabb[1], margin = margin)
def get_aabb_coords(self):
"""
Returns AABB coordinates blf and trb in world space (using the affine)
"""
ab1 = self.aabb.coord[0]
ab2 = self.aabb.coord[1]
r1 = np.dot(self.affine, np.array( [ab1[0], ab1[1], ab1[2], 1.0] ) )
r2 = np.dot(self.affine, np.array( [ab2[0], ab2[1], ab2[2], 1.0] ) )
return (r1[:3], r2[:3])
def make_obb(self):
pass
# just reuse the aabb points
# leftbottom, righttop = self.aabb
#
# center = np.mean( np.vstack( (leftbottom, righttop) ), axis = 0)
# halfwidths = (leftbottom - righttop) / 2.0
# # in our default network, this obb is axis-aligned, thus the
# # obb is the identity
# orientation = np.eye( 3, 3 )
#
# self.obb = (center, halfwidths, orientation)
def bounding_sphere(self):
""" Compute the bounding sphere """
pass
# can use PCA?
def bouding_ellipsoid(self):
""" Compute the bounding elipsoid """
pass
# can use PCA?
## affine logic
###
def set_affine(self, affine):
# update the affine
print "update affine", self.affine
self.affine = affine
self._update_glaffine()
def scale(self, scale_factor):
""" Scales the actor by scale factor.
Multiplies the diagonal of the affine for
the first 3 elements """
self.affine[0,0] *= scale_factor
self.affine[1,1] *= scale_factor
self.affine[2,2] *= scale_factor
self._update_glaffine()
def translate(self, dx, dy, dz):
""" Translate the actor.
Remember the OpenGL has right-handed
coordinate system """
self.affine[0,3] += dx
self.affine[1,3] += dy
self.affine[2,3] += dz
self._update_glaffine()
def set_position(self, x, y, z):
""" Position the actor.
Remember the OpenGL has right-handed
coordinate system """
self.affine[0,3] += x
self.affine[1,3] += y
self.affine[2,3] += z
self._update_glaffine()
def _update_glaffine(self):
self.glaffine = (GLfloat * 16)(*tuple(self.affine.T.ravel()))
# life processes
###
def start(self, lifespan = 10, tickingtime = 2.0):
print "the actor is alive"
self.living = True
self.internal_timestamp = 0.0
# call self.update every tickingtime
def stop(self):
print "the actor stops living"
self.living = False
def cont(self):
print "continue to live happily"
self.living = True
|
bsd-3-clause
| 8,687,800,132,622,186,000
| 31.624
| 100
| 0.538009
| false
| 4.119192
| false
| false
| false
|
zesty/sgraph
|
test_sgraph.py
|
1
|
1807
|
#! env python3
import unittest
import sgraph
class TestSgraph(unittest.TestCase):
def setUp(self):
graph = []
with open('graph') as f: # one per line FIXME fixture?
for edge in f.readlines():
src, dest, *cost = list(edge.strip())
cost = int(''.join(cost)) # bc maybe size >= 10; FIXME multi-char cities needs diff input format
graph.append((src, dest, cost))
self.sgl = sgraph.SGraph(graph)
def test01(self):
self.assertEqual(9, self.sgl.route_distance(['A', 'B', 'C']))
def test02(self):
self.assertEqual(5, self.sgl.route_distance(['A', 'D']))
def test03(self):
self.assertEqual(13, self.sgl.route_distance(['A', 'D', 'C']))
def test04(self):
self.assertEqual(22, self.sgl.route_distance(['A', 'E', 'B', 'C', 'D']))
def test05(self):
self.assertRaises(sgraph.SGraph.NoSuchRoute, self.sgl.route_distance, ['A', 'E', 'D'])
try:
x = self.sgl.route_distance(['A', 'E', 'D'])
print(str(x)) # never
except sgraph.SGraph.NoSuchRoute as e:
self.assertEqual('NO SUCH ROUTE', str(e))
def test06(self):
self.assertEqual(2, self.sgl.count_routes_max_stops('C', 'C', 3))
def test07(self):
self.assertEqual(3, self.sgl.count_routes_exact_stops('A', 'C', 4))
def test08(self):
self.assertEqual(9, self.sgl.shortest_route('A', 'C'))
def test09(self):
self.assertEqual(9, self.sgl.shortest_route('B', 'B'))
def test10(self):
self.assertEqual(7, self.sgl.count_routes_max_distance('C', 'C', 30))
def test11(self):
self.assertEqual(float('inf'), self.sgl.shortest_route('A', 'A'))
if __name__ == '__main__':
unittest.main()
|
mit
| 7,265,645,486,266,781,000
| 30.155172
| 113
| 0.570006
| false
| 3.226786
| true
| false
| false
|
tanayseven/Voix
|
flask/lib/python2.7/site-packages/whoosh/matching/wrappers.py
|
1
|
14976
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import sys
from whoosh.compat import xrange
from whoosh.matching import mcore
class WrappingMatcher(mcore.Matcher):
"""Base class for matchers that wrap sub-matchers.
"""
def __init__(self, child, boost=1.0):
self.child = child
self.boost = boost
def __repr__(self):
return "%s(%r, boost=%s)" % (self.__class__.__name__, self.child,
self.boost)
def copy(self):
kwargs = {}
if hasattr(self, "boost"):
kwargs["boost"] = self.boost
return self.__class__(self.child.copy(), **kwargs)
def depth(self):
return 1 + self.child.depth()
def _replacement(self, newchild):
return self.__class__(newchild, boost=self.boost)
def replace(self, minquality=0):
# Replace the child matcher
r = self.child.replace(minquality)
if r is not self.child:
# If the child changed, return a new wrapper on the new child
return self._replacement(r)
else:
return self
def max_quality(self):
return self.child.max_quality()
def id(self):
return self.child.id()
def all_ids(self):
return self.child.all_ids()
def is_active(self):
return self.child.is_active()
def reset(self):
self.child.reset()
def children(self):
return [self.child]
def supports(self, astype):
return self.child.supports(astype)
def value(self):
return self.child.value()
def value_as(self, astype):
return self.child.value_as(astype)
def spans(self):
return self.child.spans()
def skip_to(self, id):
return self.child.skip_to(id)
def next(self):
self.child.next()
def supports_block_quality(self):
return self.child.supports_block_quality()
def skip_to_quality(self, minquality):
return self.child.skip_to_quality(minquality / self.boost)
def block_quality(self):
return self.child.block_quality() * self.boost
def weight(self):
return self.child.weight() * self.boost
def score(self):
return self.child.score() * self.boost
class MultiMatcher(mcore.Matcher):
"""Serializes the results of a list of sub-matchers.
"""
def __init__(self, matchers, idoffsets, current=0):
"""
:param matchers: a list of Matcher objects.
:param idoffsets: a list of offsets corresponding to items in the
``matchers`` list.
"""
self.matchers = matchers
self.offsets = idoffsets
self.current = current
self._next_matcher()
def __repr__(self):
return "%s(%r, %r, current=%s)" % (self.__class__.__name__,
self.matchers, self.offsets,
self.current)
def is_active(self):
return self.current < len(self.matchers)
def reset(self):
for mr in self.matchers:
mr.reset()
self.current = 0
def children(self):
return [self.matchers[self.current]]
def _next_matcher(self):
matchers = self.matchers
while (self.current < len(matchers)
and not matchers[self.current].is_active()):
self.current += 1
def copy(self):
return self.__class__([mr.copy() for mr in self.matchers],
self.offsets, current=self.current)
def depth(self):
if self.is_active():
return 1 + max(mr.depth() for mr in self.matchers[self.current:])
else:
return 0
def replace(self, minquality=0):
m = self
if minquality:
# Skip sub-matchers that don't have a high enough max quality to
# contribute
while (m.is_active()
and m.matchers[m.current].max_quality() < minquality):
m = self.__class__(self.matchers, self.offsets, m.current + 1)
m._next_matcher()
if not m.is_active():
return mcore.NullMatcher()
# TODO: Possible optimization: if the last matcher is current, replace
# this with the last matcher, but wrap it with a matcher that adds the
# offset. Have to check whether that's actually faster, though.
return m
def max_quality(self):
return self.matchers[self.current].max_quality()
def id(self):
current = self.current
return self.matchers[current].id() + self.offsets[current]
def all_ids(self):
offsets = self.offsets
for i, mr in enumerate(self.matchers):
for id in mr.all_ids():
yield id + offsets[i]
def spans(self):
return self.matchers[self.current].spans()
def supports(self, astype):
return self.matchers[self.current].supports(astype)
def value(self):
return self.matchers[self.current].value()
def value_as(self, astype):
return self.matchers[self.current].value_as(astype)
def next(self):
if not self.is_active():
raise mcore.ReadTooFar
self.matchers[self.current].next()
if not self.matchers[self.current].is_active():
self._next_matcher()
def skip_to(self, id):
if not self.is_active():
raise mcore.ReadTooFar
if id <= self.id():
return
matchers = self.matchers
offsets = self.offsets
r = False
while self.current < len(matchers) and id > self.id():
mr = matchers[self.current]
sr = mr.skip_to(id - offsets[self.current])
r = sr or r
if mr.is_active():
break
self._next_matcher()
return r
def supports_block_quality(self):
return all(mr.supports_block_quality() for mr
in self.matchers[self.current:])
def block_quality(self):
return self.matchers[self.current].block_quality()
def weight(self):
return self.matchers[self.current].weight()
def score(self):
return self.matchers[self.current].score()
def ExcludeMatcher(child, excluded, boost=1.0):
return FilterMatcher(child, excluded, exclude=True, boost=boost)
class FilterMatcher(WrappingMatcher):
"""Filters the postings from the wrapped based on whether the IDs are
present in or absent from a set.
"""
def __init__(self, child, ids, exclude=False, boost=1.0):
"""
:param child: the child matcher.
:param ids: a set of IDs to filter by.
:param exclude: by default, only IDs from the wrapped matcher that are
**in** the set are used. If this argument is True, only IDs from
the wrapped matcher that are **not in** the set are used.
"""
super(FilterMatcher, self).__init__(child)
self._ids = ids
self._exclude = exclude
self.boost = boost
self._find_next()
def __repr__(self):
return "%s(%r, %r, %r, boost=%s)" % (self.__class__.__name__,
self.child, self._ids,
self._exclude, self.boost)
def reset(self):
self.child.reset()
self._find_next()
def copy(self):
return self.__class__(self.child.copy(), self._ids, self._exclude,
boost=self.boost)
def _replacement(self, newchild):
return self.__class__(newchild, self._ids, exclude=self._exclude,
boost=self.boost)
def _find_next(self):
child = self.child
ids = self._ids
r = False
if self._exclude:
while child.is_active() and child.id() in ids:
r = child.next() or r
else:
while child.is_active() and child.id() not in ids:
r = child.next() or r
return r
def next(self):
self.child.next()
self._find_next()
def skip_to(self, id):
self.child.skip_to(id)
self._find_next()
def all_ids(self):
ids = self._ids
if self._exclude:
return (id for id in self.child.all_ids() if id not in ids)
else:
return (id for id in self.child.all_ids() if id in ids)
def all_items(self):
ids = self._ids
if self._exclude:
return (item for item in self.child.all_items()
if item[0] not in ids)
else:
return (item for item in self.child.all_items() if item[0] in ids)
class InverseMatcher(WrappingMatcher):
"""Synthetic matcher, generates postings that are NOT present in the
wrapped matcher.
"""
def __init__(self, child, limit, missing=None, weight=1.0, id=0):
super(InverseMatcher, self).__init__(child)
self.limit = limit
self._weight = weight
self.missing = missing or (lambda id: False)
self._id = id
self._find_next()
def copy(self):
return self.__class__(self.child.copy(), self.limit,
weight=self._weight, missing=self.missing,
id=self._id)
def _replacement(self, newchild):
return self.__class__(newchild, self.limit, missing=self.missing,
weight=self._weight, id=self._id)
def is_active(self):
return self._id < self.limit
def reset(self):
self.child.reset()
self._id = 0
self._find_next()
def supports_block_quality(self):
return False
def _find_next(self):
child = self.child
missing = self.missing
# If the current docnum isn't missing and the child matcher is
# exhausted (so we don't have to worry about skipping its matches), we
# don't have to do anything
if not child.is_active() and not missing(self._id):
return
# Catch the child matcher up to where this matcher is
if child.is_active() and child.id() < self._id:
child.skip_to(self._id)
# While self._id is missing or is in the child matcher, increase it
while child.is_active() and self._id < self.limit:
if missing(self._id):
self._id += 1
continue
if self._id == child.id():
self._id += 1
child.next()
continue
break
def id(self):
return self._id
def all_ids(self):
return mcore.Matcher.all_ids(self)
def next(self):
if self._id >= self.limit:
raise mcore.ReadTooFar
self._id += 1
self._find_next()
def skip_to(self, id):
if self._id >= self.limit:
raise mcore.ReadTooFar
if id < self._id:
return
self._id = id
self._find_next()
def weight(self):
return self._weight
def score(self):
return self._weight
class RequireMatcher(WrappingMatcher):
"""Matches postings that are in both sub-matchers, but only uses scores
from the first.
"""
def __init__(self, a, b):
from whoosh.matching.binary import IntersectionMatcher
self.a = a
self.b = b
self.child = IntersectionMatcher(a, b)
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def supports_block_quality(self):
return self.a.supports_block_quality()
def replace(self, minquality=0):
if not self.child.is_active():
# If one of the sub-matchers is inactive, go inactive
return mcore.NullMatcher()
elif minquality and self.a.max_quality() < minquality:
# If the required matcher doesn't have a high enough max quality
# to possibly contribute, return an inactive matcher
return mcore.NullMatcher()
new_a = self.a.replace(minquality)
new_b = self.b.replace()
if not new_a.is_active():
return mcore.NullMatcher()
elif new_a is not self.a or new_b is not self.b:
# If one of the sub-matchers changed, return a new Require
return self.__class__(new_a, self.b)
else:
return self
def max_quality(self):
return self.a.max_quality()
def block_quality(self):
return self.a.block_quality()
def skip_to_quality(self, minquality):
skipped = self.a.skip_to_quality(minquality)
self.child._find_next()
return skipped
def weight(self):
return self.a.weight()
def score(self):
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class ConstantScoreMatcher(WrappingMatcher):
def __init__(self, child, score=1.0):
super(ConstantScoreMatcher, self).__init__(child)
self._score = score
def copy(self):
return self.__class__(self.child.copy(), score=self._score)
def _replacement(self, newchild):
return self.__class__(newchild, score=self._score)
def block_quality(self):
return self._score
def score(self):
return self._score
|
gpl-3.0
| -2,723,485,462,847,553,500
| 28.892216
| 78
| 0.584001
| false
| 4.060738
| false
| false
| false
|
chimkentec/KodiMODo_rep
|
plugin.video.tree.tv.dev/core/auth.py
|
1
|
1953
|
# -*- coding: utf-8 -*-
import pickle, re
import xbmcup.app, xbmcup.system, xbmcup.net
from defines import *
class Auth:
def __init__(self):
self.success = '"ok"'
self.cookie_file = xbmcup.system.fs('sandbox://'+COOKIE_FILE)
self.login = xbmcup.app.setting['username']
self.password = xbmcup.app.setting['password']
#xbmcup.system.fs.delete('sandbox://'+COOKIE_FILE)
def autorize(self):
try:
if(self.login == '' or self.password == ''):
self.reset_auth()
return False
url = '%s/users/index/auth?mail=%s&pass=%s&social=0&_=1422391861285' % (SITE_URL, self.login, self.password)
response = xbmcup.net.http.get(url)
except xbmcup.net.http.exceptions.RequestException:
return False
else:
return self._check_response(response)
def _check_response(self, response):
is_logged = response.text == self.success
if(is_logged):
self.save_cookies(response.cookies)
xbmcup.app.setting['is_logged'] = 'true'
else:
xbmcup.system.fs.delete('sandbox://'+COOKIE_FILE)
return is_logged
def save_cookies(self, cookiejar):
with open(self.cookie_file, 'wb') as f:
pickle.dump(cookiejar, f)
def get_cookies(self):
if(xbmcup.system.fs.exists('sandbox://'+COOKIE_FILE)):
with open(self.cookie_file, 'rb') as f:
return pickle.load(f)
return {}
def reset_auth(self, reset_settings=False):
xbmcup.app.setting['is_logged'] = 'false'
if reset_settings == True:
xbmcup.app.setting['username'] = ''
xbmcup.app.setting['password'] = ''
xbmcup.system.fs.delete('sandbox://'+COOKIE_FILE)
def check_auth(self, page):
reg = re.compile('/users/index/logout', re.S).findall(page)
return len(reg) > 0
|
gpl-3.0
| 2,525,044,675,271,386,600
| 31.566667
| 120
| 0.576549
| false
| 3.643657
| false
| false
| false
|
PrestigeDox/Watashi-SelfBot
|
cogs/converter.py
|
1
|
2237
|
import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
class Converter:
def __init__(self, bot):
self.bot = bot
self.aiohttp_session = bot.aiohttp_session
self.url = 'https://google.com/search'
self.headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR '
'2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8; '
'InfoPath.3; .NET4.0C; .NET4.0E) chromeframe/8.0.552.224',
'Accept-Language': 'en-us',
'Cache-Control': 'no-cache'
}
@commands.command()
async def convert(self, ctx, *, query=None):
""" Calculate some expressions! """
# Handle no query being provided
if query is None:
return await ctx.error('Please provide a query!')
from_unit = query.split()[0]
to_unit = query.split()[1]
try:
val = float(query.split()[2])
except ValueError:
return await ctx.error('Invalid query.')
# Doing this in the f-string later would become f-string-ception and that doesn't work
qstr = quote_plus(f'{val} {from_unit} to {to_unit}')
# Tries its best to imitate a real browser visit, an old user-agent is used to make scraping easier
async with self.aiohttp_session.get(f'{self.url}?q={qstr}&source=hp', headers=self.headers) as r:
html = await r.text()
# Beautiful soup
soup = BeautifulSoup(html, 'lxml')
# The span inside div._Qeb has the result for the expression, if it doesnt exist google doesn't like
# your expression or its just invalid
if not soup.select('div#ires div._Qeb span'):
return await ctx.error('Could not convert expression.')
# Values with units
from_val = soup.select("div#ires div._Qeb span")[0].text.split()[0]
to_val = soup.select("div#ires div._Peb")[0].text.split()[0]
await ctx.message.edit(content=f"{from_val}{from_unit} = {to_val}{to_unit}")
def setup(bot):
bot.add_cog(Converter(bot))
|
mit
| 4,840,375,041,808,833,000
| 37.568966
| 115
| 0.596334
| false
| 3.495313
| false
| false
| false
|
FirmlyReality/docklet
|
tools/upgrade_file2db.py
|
2
|
3353
|
import sys
sys.path.append("../src/")
import os,json
from datetime import datetime
from model import db, VCluster, Container, PortMapping, Image, BillingHistory
timeFormat = "%Y-%m-%d %H:%M:%S"
dockletPath = "/opt/docklet/global"
usersdir = dockletPath + "/users/"
try:
VCluster.query.all()
except Exception as err:
print("Create database...")
db.create_all()
print("Update vcluster...")
for user in os.listdir(usersdir):
tmppath = usersdir+user+"/clusters/"
if not os.path.exists(tmppath):
continue
print("Update User: "+str(user))
clusterfiles = os.listdir(tmppath)
for cluname in clusterfiles:
cluFile = open(tmppath+cluname,"r")
cluinfo = json.loads(cluFile.read())
vcluster = VCluster(cluinfo['clusterid'],cluname,user,cluinfo['status'],cluinfo['size'],cluinfo['nextcid'],cluinfo['proxy_server_ip'],cluinfo['proxy_public_ip'])
vcluster.create_time = datetime.strptime(cluinfo['create_time'],timeFormat)
vcluster.start_time = cluinfo['start_time']
for coninfo in cluinfo['containers']:
lastsavet = datetime.strptime(coninfo['lastsave'],timeFormat)
con = Container(coninfo['containername'], coninfo['hostname'], coninfo['ip'], coninfo['host'], coninfo['image'], lastsavet, coninfo['setting'])
vcluster.containers.append(con)
for pminfo in cluinfo['port_mapping']:
pm = PortMapping(pminfo['node_name'], pminfo['node_ip'], int(pminfo['node_port']), int(pminfo['host_port']))
vcluster.port_mapping.append(pm)
if "billing_history" in cluinfo.keys():
for nodename in cluinfo['billing_history'].keys():
bhinfo = cluinfo['billing_history'][nodename]
bh = BillingHistory(nodename,bhinfo['cpu'],bhinfo['mem'],bhinfo['disk'],bhinfo['port'])
vcluster.billing_history.append(bh)
try:
db.session.add(vcluster)
db.session.commit()
except Exception as err:
print(err)
cluFile.close()
print("Update Images...")
for shareStr in ['private/','public/']:
print("Update "+shareStr+" Images...")
for user in os.listdir(dockletPath+"/images/"+shareStr):
print("Update User: "+user)
tmppath = dockletPath+"/images/"+shareStr+user+"/"
files = os.listdir(tmppath)
images = []
for file in files:
if file[0] == "." or file[-3] != ".":
continue
images.append(file[:-3])
for img in images:
infofile = open(tmppath+"."+img+".info","r")
imginfo = infofile.read().split('\n')
infofile.close()
desfile = open(tmppath+"."+img+".description","r")
desinfo = desfile.read()
dbimage = Image.query.filter_by(imagename=img,ownername=user).first()
if dbimage is None:
dbimage = Image(img,False,False,user,desinfo)
dbimage.create_time = datetime.strptime(imginfo[0],timeFormat)
if shareStr == 'public/':
dbimage.hasPublic = True
else:
dbimage.hasPrivate = True
try:
db.session.add(dbimage)
db.session.commit()
except Exception as err:
print(err)
print("Finished!")
|
bsd-3-clause
| -6,557,784,391,905,363,000
| 40.9125
| 169
| 0.593498
| false
| 3.742188
| false
| false
| false
|
taigaio/taiga-back
|
taiga/projects/notifications/signals.py
|
1
|
5168
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.utils import timezone
from taiga.events import events
from taiga.events import middleware as mw
from . import choices
from . import models
from . import serializers
def _filter_recipients(project, user, recipients):
notify_policies = models.NotifyPolicy.objects.filter(
user_id__in=recipients,
project=project,
web_notify_level=True).exclude(user_id=user.id).all()
return [notify_policy.user_id for notify_policy in notify_policies]
def _push_to_web_notifications(event_type, data, recipients,
serializer_class=None):
if not serializer_class:
serializer_class = serializers.ObjectNotificationSerializer
serializer = serializer_class(data)
for user_id in recipients:
with transaction.atomic():
models.WebNotification.objects.create(
event_type=event_type.value,
created=timezone.now(),
user_id=user_id,
data=serializer.data,
)
session_id = mw.get_current_session_id()
events.emit_event_for_user_notification(user_id,
session_id=session_id,
event_type=event_type.value,
data=serializer.data)
def on_assigned_to(sender, user, obj, **kwargs):
event_type = choices.WebNotificationType.assigned
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[obj.assigned_to.id])
_push_to_web_notifications(event_type, data, recipients)
def on_assigned_users(sender, user, obj, new_assigned_users, **kwargs):
event_type = choices.WebNotificationType.assigned
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[user_id for user_id in new_assigned_users])
_push_to_web_notifications(event_type, data, recipients)
def on_watchers_added(sender, user, obj, new_watchers, **kwargs):
event_type = choices.WebNotificationType.added_as_watcher
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user, new_watchers)
_push_to_web_notifications(event_type, data, recipients)
def on_members_added(sender, user, project, new_members, **kwargs):
serializer_class = serializers.NotificationDataSerializer
event_type = choices.WebNotificationType.added_as_member
data = {
"project": project,
"user": user,
}
recipients = _filter_recipients(project, user,
[member.user_id for member in new_members
if member.user_id])
_push_to_web_notifications(event_type, data, recipients, serializer_class)
def on_mentions(sender, user, obj, mentions, **kwargs):
content_type = ContentType.objects.get_for_model(obj)
valid_content_types = ['issue', 'task', 'userstory']
if content_type.model in valid_content_types:
event_type = choices.WebNotificationType.mentioned
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[user.id for user in mentions])
_push_to_web_notifications(event_type, data, recipients)
def on_comment_mentions(sender, user, obj, mentions, **kwargs):
event_type = choices.WebNotificationType.mentioned_in_comment
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[user.id for user in mentions])
_push_to_web_notifications(event_type, data, recipients)
def on_comment(sender, user, obj, watchers, **kwargs):
event_type = choices.WebNotificationType.comment
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user, watchers)
_push_to_web_notifications(event_type, data, recipients)
|
agpl-3.0
| -2,407,889,315,559,505,000
| 35.394366
| 80
| 0.625967
| false
| 4.056515
| false
| false
| false
|
kislerdm/alibava_analysis-tool
|
ilcinstall_eutel-git/ilcsoft/fastjet.py
|
2
|
2846
|
##################################################
#
# FastJet module
#
# Author: Andre Sailer, CERN
# based on GSL module by J. Engels, Desy
# Date: Jul, 2010
#
##################################################
# custom imports
from baseilc import BaseILC
from marlinpkg import MarlinPKG
from util import *
class FastJetClustering(MarlinPKG):
""" Responsible for the FastJetClustering installation process. """
def __init__(self, userInput):
MarlinPKG.__init__(self, "FastJetClustering", userInput )
# required modules
self.reqmodules = [ "Marlin", "MarlinUtil", "CLHEP", "GEAR", "GSL", "LCIO", "FastJet" ]
self.download.root = "marlinreco"
class FastJet(BaseILC):
""" Responsible for the FastJet installation process. """
def __init__(self, userInput):
BaseILC.__init__(self, userInput, "FastJet", "FastJet")
# no cmake build support
self.hasCMakeBuildSupport = False
self.download.supportHEAD = False
self.download.supportedTypes = ["wget"]
self.reqfiles = [[ "lib/libfastjet.so", "lib/libfastjet.a", "lib/libfastjet.dylib" ]]
def setMode(self, mode):
BaseILC.setMode(self, mode)
self.download.url = "http://www.lpthe.jussieu.fr/~salam/fastjet/repo/fastjet-" + self.version + ".tar.gz"
def downloadSources(self):
BaseILC.downloadSources(self)
# move sources to a subdirectory
os.renames( self.version, self.name )
os.renames( self.name, self.version + "/" + self.name )
# create build directory
trymakedir( self.installPath + "/build" )
def compile(self):
""" compile FastJet """
os.chdir( self.installPath + "/build" )
if( self.rebuild ):
os.system( "make distclean" )
if( os.system( "../" + self.name + "/configure --prefix=" + self.installPath + " --enable-shared 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to configure!!" )
if( os.system( "make ${MAKEOPTS} 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to compile!!" )
if( os.system( "make install 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to install!!" )
def cleanupInstall(self):
BaseILC.cleanupInstall(self)
os.chdir( self.installPath + "/build" )
os.system( "make clean" )
def postCheckDeps(self):
BaseILC.postCheckDeps(self)
self.env["FastJet_HOME"] = self.installPath
self.envpath["PATH"].append( "$FastJet_HOME/bin" )
self.envpath["LD_LIBRARY_PATH"].append( "$FastJet_HOME/lib" )
|
gpl-2.0
| -3,859,160,402,606,320,000
| 32.880952
| 156
| 0.543921
| false
| 3.667526
| false
| false
| false
|
cosminbasca/rdftools
|
rdftools/datagen/lubm_horizontal.py
|
1
|
3072
|
#
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from base import LubmGenerator, UniTriplesDistribution
from rdftools.gcityhash import city64
from rdftools.log import logger
from rdftools.tools import ParserVisitorTool
import io
__author__ = 'basca'
def _part((s, p, o), perm):
val = ''
for c in perm:
if c == 's':
val += '%s' % s
elif c == 'p':
val += '%s' % p
elif c == 'o':
val += '%s' % o
return val
PERMUTATIONS = ('s', 'p', 'o', 'sp', 'so', 'po', 'spo')
class HashPartitioner(ParserVisitorTool):
def __init__(self, source_file, num_sites=0, permutation=None, **kwargs):
super(HashPartitioner, self).__init__(source_file, **kwargs)
if num_sites == 0:
raise ValueError('num_partitions cannot be 0')
self.num_sites = num_sites
if permutation not in PERMUTATIONS:
raise ValueError('permutaion must be one of {0}, instead got {1}'.format(PERMUTATIONS, permutation))
self._permutation = permutation
self.site_index = []
def on_visit(self, s, p, o, c):
site_idx = city64(_part((s, p, o), self._permutation)) % self.num_sites
self.site_index.append(site_idx)
def get_results(self, *args, **kwargs):
return self.site_index
"""
distribution process:
1) horizontal partitioning of all data (based on stars)
"""
class LubmHorizontal(LubmGenerator):
def __init__(self, output_path, sites, permutation='s', **kwargs):
super(LubmHorizontal, self).__init__(output_path, sites, **kwargs)
self._permutation = permutation
@property
def _distributor_type(self):
return UniHorizontal
def _distributor_kwargs(self, uni_id, uni_rdf):
return dict(permutation=self._permutation)
class UniHorizontal(UniTriplesDistribution):
def _distribute_triples(self, triples, permutation='s'):
logger.info('[distributing] university %s by %s', self.uni_name, permutation)
site_index = HashPartitioner(self.uni_rdf, num_sites=self.num_sites, permutation=permutation)()
site_triples = defaultdict(list)
sites = [0 for i in xrange(self.num_sites)]
for i, triple in enumerate(triples):
sites[site_index[i]] += 1
site_triples[site_index[i]].append(triple)
logger.info('university %s total triples = %s, distribution = %s', self.uni_rdf, len(triples), sites)
return site_triples
|
apache-2.0
| 1,727,386,674,842,596,900
| 32.769231
| 112
| 0.654297
| false
| 3.665871
| false
| false
| false
|
jergosh/slr_pipeline
|
bin/process_slr_sub.py
|
1
|
3289
|
from glob import glob
import os
from os import path
import itertools
import re
from Bio import AlignIO
import pandas
import sys
import copy
import argparse
from slr import *
species_RE = re.compile("([A-Z]+)")
yeast_RE = re.compile("Y[A-P][LR][0-9]{3}[WC]")
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.izip_longest(*args, fillvalue=fillvalue)
argparser = argparse.ArgumentParser()
argparser.add_argument('--clade', metavar='clade', type=str, required=True)
argparser.add_argument('--slrroot', metavar='slr_root', type=str, required=True)
argparser.add_argument('--alnroot', metavar='aln_root', type=str, required=True)
argparser.add_argument('--outfile', metavar='out_file', type=str, required=True)
args = argparser.parse_args()
clade = args.clade
alndir = args.alnroot
slrroot = args.slrroot
slr_all = args.outfile
all_ids = []
all_data = [] # pandas.DataFrame(columns=colnames)
for aln_fn in glob(path.join(alndir, clade, "*", "*_prank.best.fas")):
basename = path.basename(aln_fn).rpartition('_')[0]
prefix = basename.partition('_')[0][:2]
# TODO Make sure colspecs work in all cases
# What if there are multiple human IDs in a single (split) tree?
# Are we allowed to potentially double count things like that?
aln = AlignIO.read(aln_fn, 'fasta')
# TODO refactor this into a function
# One way to get around this would be to decide separately which sequences are 'of interest'
for seqr in aln:
if args.clade == "yeast":
if yeast_RE.match(seqr.id) is None:
continue
else:
species = species_RE.match(seqr.id).groups()[0]
if species[:-1] != "ENS":
continue
all_ids.append(seqr.id)
for subset in [ "1", "2" ]:
slr_fn = path.join(slrroot, clade, prefix, basename+'_'+subset+'_matched.res')
if not path.exists(slr_fn):
print slr_fn, "doesn't exist!"
continue
slr = pandas.read_fwf(open(slr_fn), colspecs=colspecs, comment="\n")
idx = [ i for (i, codon) in enumerate(grouper(seqr.seq, 3)) if ''.join(codon) != '---' ]
slr_subset = copy.deepcopy(slr.ix[idx, :])
slr_subset.ix[:, 0] = idx
slr_subset.ix[:, 0] += 1
slr_out = file(path.join(slrroot, clade, prefix, seqr.id + '_' + basename + '_matched.res'), 'w')
slr_subset.to_csv(slr_out, quoting=False, index=False, sep='\t')
# slr_subset.insert(0, 'dataset', pandas.Series([basename]*slr_subset.shape[0]))
# slr_subset.insert(0, 'stable_id', pandas.Series([seqr.id]*slr_subset.shape[0]))
slr_subset['dataset'] = pandas.Series([basename+'_'+subset]*slr_subset.shape[0], index=slr_subset.index)
slr_subset['stable_id'] = pandas.Series([seqr.id]*slr_subset.shape[0], index=slr_subset.index)
slr_subset['human_idx'] = pandas.Series(range(1, slr_subset.shape[0]+1), index=slr_subset.index)
all_data.append(slr_subset)
all_data = pandas.concat(all_data)
all_data.rename(columns={"# Site": "Site"}, inplace=True)
all_data.to_csv(slr_all, quoting=False, index=False, sep='\t')
print min(all_data["Pval"]), max(all_data["Pval"])
|
gpl-2.0
| -5,675,201,279,259,004,000
| 36.375
| 116
| 0.626634
| false
| 3.111637
| false
| false
| false
|
glidernet/python-ogn-client
|
ogn/parser/utils.py
|
1
|
3369
|
from datetime import datetime, timedelta, timezone
import math
FEETS_TO_METER = 0.3048 # ratio feets to meter
FPM_TO_MS = FEETS_TO_METER / 60 # ratio fpm to m/s
KNOTS_TO_MS = 0.5144 # ratio knots to m/s
KPH_TO_MS = 0.27778 # ratio kph to m/s
HPM_TO_DEGS = 180 / 60 # ratio between half turn per minute and degrees/s
INCH_TO_MM = 25.4 # ratio inch to mm
def fahrenheit_to_celsius(fahrenheit):
return (fahrenheit - 32.0) * 5.0 / 9.0
def parseAngle(dddmmhht):
return float(dddmmhht[:3]) + float(dddmmhht[3:]) / 60
def createTimestamp(time_string, reference_timestamp):
if time_string[-1] == "z":
dd = int(time_string[0:2])
hh = int(time_string[2:4])
mm = int(time_string[4:6])
result = datetime(reference_timestamp.year,
reference_timestamp.month,
dd,
hh, mm, 0,
tzinfo=timezone.utc if reference_timestamp.tzinfo is not None else None)
# correct wrong month
if result > reference_timestamp + timedelta(days=14):
result = (result.replace(day=1) - timedelta(days=14)).replace(day=result.day)
elif result < reference_timestamp - timedelta(days=14):
result = (result.replace(day=28) + timedelta(days=14)).replace(day=result.day)
else:
hh = int(time_string[0:2])
mm = int(time_string[2:4])
ss = int(time_string[4:6])
result = datetime(reference_timestamp.year,
reference_timestamp.month,
reference_timestamp.day,
hh, mm, ss,
tzinfo=timezone.utc if reference_timestamp.tzinfo is not None else None)
if result > reference_timestamp + timedelta(hours=12):
# shift timestamp to previous day
result -= timedelta(days=1)
elif result < reference_timestamp - timedelta(hours=12):
# shift timestamp to next day
result += timedelta(days=1)
return result
MATH_PI = 3.14159265359
class CheapRuler():
"""Extreme fast distance calculating for distances below 500km."""
def __init__(self, lat):
c = math.cos(lat * MATH_PI / 180)
c2 = 2 * c * c - 1
c3 = 2 * c * c2 - c
c4 = 2 * c * c3 - c2
c5 = 2 * c * c4 - c3
self.kx = 1000 * (111.41513 * c - 0.09455 * c3 + 0.00012 * c5) # longitude correction
self.ky = 1000 * (111.13209 - 0.56605 * c2 + 0.0012 * c4) # latitude correction
def distance(self, a, b):
"""Distance between point a and b. A point is a tuple(lon,lat)."""
dx = (a[0] - b[0]) * self.kx
dy = (a[1] - b[1]) * self.ky
return math.sqrt(dx * dx + dy * dy)
def bearing(self, a, b):
"""Returns the bearing from point a to point b."""
dx = (b[0] - a[0]) * self.kx
dy = (b[1] - a[1]) * self.ky
if dx == 0 and dy == 0:
return 0
result = math.atan2(-dy, dx) * 180 / MATH_PI + 90
return result if result >= 0 else result + 360
def normalized_quality(distance, signal_quality):
"""Signal quality normalized to 10km."""
return signal_quality + 20.0 * math.log10(distance / 10000.0) if distance > 0 else None
|
agpl-3.0
| 4,766,096,451,540,963,000
| 34.463158
| 98
| 0.551202
| false
| 3.430754
| false
| false
| false
|
modelblocks/modelblocks-release
|
resource-dundee/scripts/process_dundee.py
|
1
|
12539
|
import sys
import os
from numpy import nan
import pandas as pd
import argparse
#sys.stdin.reconfigure(encoding='latin-1',errors='replace') #'utf-8',errors='replace') #'ignore')
if __name__ == '__main__':
argparser = argparse.ArgumentParser('''
Extract eye-tracking time series from Dundee eye-tracking corpus source.
''')
argparser.add_argument('dundee_dir', help='Path to directory containing Dundee files.')
argparser.add_argument('lineitems_path', help='Path to file with space-tokenized stimulus sentences in order, one per line.')
argparser.add_argument('-v', '--verbose', action='store_true', help='Report verbose log')
argparser.add_argument('-w', '--warn', action='store_true', help='Report warnings to stderr')
args = argparser.parse_args()
textdata = []
if args.verbose:
sys.stderr.write('Processing stimulus data...\n')
sys.stderr.flush()
k = 0
with open(args.lineitems_path, 'r') as f:
for i, line in enumerate(f):
for j, w in enumerate(line.strip().split()):
textdata.append({
'word': w,
'sentid': i,
'sentpos': j + 1,
'startofsentence': int(j == 0)
})
k += 1
k = 0
start_ix = []
for p in sorted([x for x in os.listdir(args.dundee_dir) if x.endswith('wrdp.dat')]):
start_ix.append(k)
with open(args.dundee_dir + '/' + p, 'r', encoding='latin-1') as f:
for i, line in enumerate(f):
line = line.replace('(', '-LRB-').replace(')', '-RRB-')
fields = line.strip().split()
w = fields[0]
doc_id = int(fields[1]) - 1
screen_id = int(fields[2]) - 1
line_id = int(fields[3]) - 1
word_pos_in_line = int(fields[4]) - 1
word_pos_in_screen = int(fields[5]) - 1
word_pos_in_text = int(fields[12]) - 1
if word_pos_in_text == 0:
start_of_file = True
start_of_screen = True
start_of_line = True
elif word_pos_in_screen == 0:
start_of_file = False
start_of_screen = True
start_of_line = True
elif word_pos_in_line == 0:
start_of_file = False
start_of_screen = False
start_of_line = True
else:
start_of_file = False
start_of_screen = False
start_of_line = False
if args.warn and textdata[k]['word'] != w:
sys.stderr.write('WARNING: Saw mismatched words "%s" and "%s" at position %d.\n' % (textdata[k]['word'], w, k))
sys.stderr.flush()
textdata[k]['startoffile'] = int(start_of_file)
textdata[k]['startofscreen'] = int(start_of_screen)
textdata[k]['startofline'] = int(start_of_line)
k += 1
for kp1 in range(1, len(textdata) + 1):
if kp1 == len(textdata):
end_of_file = 1
end_of_screen = 1
end_of_line = 1
end_of_sentence = 1
else:
end_of_file = textdata[kp1]['startoffile']
end_of_screen = textdata[kp1]['startofscreen']
end_of_line = textdata[kp1]['startofline']
end_of_sentence = textdata[kp1]['startofsentence']
textdata[kp1-1]['endoffile'] = end_of_file
textdata[kp1-1]['endofscreen'] = end_of_screen
textdata[kp1-1]['endofline'] = end_of_line
textdata[kp1-1]['endofsentence'] = end_of_sentence
if args.verbose:
sys.stderr.write('Processing fixation data...\n')
sys.stderr.flush()
out = []
# Loop through fixations in order
for i, p in enumerate(sorted([x for x in os.listdir(args.dundee_dir) if x.endswith('ma1p.dat')])):
out_file = []
with open(args.dundee_dir + '/' + p, 'r', encoding='latin-1') as f:
subject = p[:2]
doc_id = int(p[2:4]) - 1
word_id_prev = -1
max_word_id = -1
time = 0
fdurSP = 0
fdurSPsummed = 0
fdurFP = 0
fdurGP = 0
fdurTT = 0
fp_cur = None
fp_blink_cur = None
gp_cur = None
gp_blink_cur = None
sp_cur = None
tt_cur = None
prev_was_blink = False
prev_was_offscreen = False
blinkFP = False
blinkGP = False
s = start_ix[doc_id]
npass = {}
wordid2firstfix = {}
nfix = 0
for j, line in enumerate(f):
line = line.replace('(', '-LRB-').replace(')', '-RRB-').replace('"', "'")
if j > 0:
fields = line.strip().split()
word_cur = fields[0]
word_id_cur = int(fields[6]) - 1
fdur_cur = float(fields[7])
isfix = False
isblink = False
isoffscreen = False
if word_cur.startswith('*'):
if word_cur == '*Blink':
isblink = True
elif word_cur == '*Off-screen':
isoffscreen = True
else:
raise ValueError('Unrecognized star (*) token: %s' % word_cur)
else:
if word_id_cur >= 0:
isfix = True
if isfix:
k = s + word_id_cur
if k in npass:
npass[k] += 1
else:
npass[k] = 1
if word_id_cur not in wordid2firstfix:
wordid2firstfix[word_id_cur] = nfix
if args.warn and textdata[k]['word'] != word_cur:
sys.stderr.write('WARNING: Saw mismatched words "%s" and "%s" at global position %d, file %s, line %d.\n' % (
textdata[k]['word'], word_cur, k, p, j))
sys.stderr.flush()
out_cur = {
'subject': subject,
'docid': doc_id,
'fdurSP': fdur_cur,
'blinkbeforefix': int(prev_was_blink),
'blinkafterfix': 0,
'offscreenbeforefix': int(prev_was_offscreen),
'offscreenafterfix': 0,
'wdelta': word_id_cur - word_id_prev,
'npass': npass[k],
'inregression': int(word_id_cur < max_word_id),
'time': time
}
out_file.append(out_cur)
tt_cur = out_file[wordid2firstfix[word_id_cur]]
if word_id_cur != word_id_prev:
sp_cur = out_cur
sp_blink_cur = out_cur
if word_id_cur > max_word_id:
fp_cur = out_cur
gp_cur = out_cur
fp_blink_cur = out_cur
gp_blink_cur = out_cur
elif word_id_cur < max_word_id:
fp_cur = None
fp_blink_cur = None
out_cur.update(textdata[k])
word_id_prev = word_id_cur
prev_was_blink = False
prev_was_offscreen = False
max_word_id = max(max_word_id, word_id_cur)
nfix += 1
else:
prev_was_blink = prev_was_blink or isblink
prev_was_offscreen = prev_was_offscreen or isoffscreen
if word_id_cur > 0 and isblink:
out_file[-1]['blinkafterfix'] = 1
if word_id_cur > 0 and isoffscreen:
out_file[-1]['offscreenafterfix'] = 1
sp_cur = None
sp_blink_cur = None
fp_cur = None
fp_blink_cur = None
gp_cur = None
gp_blink_cur = None
if sp_cur is not None:
if 'fdurSPsummed' in sp_cur:
sp_cur['fdurSPsummed'] += fdur_cur
else:
sp_cur['fdurSPsummed'] = fdur_cur
if sp_blink_cur is not None:
if 'blinkdurSPsummed' not in sp_blink_cur:
sp_blink_cur['blinkdurSPsummed'] = 0
sp_blink_cur['blinkduringSPsummed'] = 0
if isblink:
sp_blink_cur['blinkdurSPsummed'] += fdur_cur
sp_blink_cur['blinkduringSPsummed'] = 1
if fp_cur is not None:
if 'fdurFP' in fp_cur:
fp_cur['fdurFP'] += fdur_cur
else:
fp_cur['fdurFP'] = fdur_cur
if fp_blink_cur is not None:
if 'blinkdurFP' not in fp_blink_cur:
fp_blink_cur['blinkdurFP'] = 0
fp_blink_cur['blinkduringFP'] = 0
if isblink:
fp_blink_cur['blinkdurFP'] += fdur_cur
fp_blink_cur['blinkduringFP'] = 1
if gp_cur is not None:
if 'fdurGP' in gp_cur:
gp_cur['fdurGP'] += fdur_cur
else:
gp_cur['fdurGP'] = fdur_cur
if gp_blink_cur is not None:
if 'blinkdurGP' not in gp_blink_cur:
gp_blink_cur['blinkdurGP'] = 0
gp_blink_cur['blinkduringGP'] = 0
if isblink:
gp_blink_cur['blinkdurGP'] += fdur_cur
gp_blink_cur['blinkduringGP'] = 1
if tt_cur is not None:
if 'fdurTT' in tt_cur:
tt_cur['fdurTT'] += fdur_cur
else:
tt_cur['fdurTT'] = fdur_cur
time += fdur_cur / 1000
out += out_file
if args.verbose:
sys.stderr.write('Computing tabular output...\n')
sys.stderr.flush()
out = pd.DataFrame(out)
out.docid += 1
out['prevwasfix'] = (out['wdelta'] == 1).astype('int')
out['nextwasfix'] = (out['wdelta'] == -1).astype('int')
if args.verbose:
sys.stderr.write('Writing output...\n')
sys.stderr.flush()
toprint = [
'word',
'subject',
'docid',
'sentpos',
'sentid',
'time',
'wdelta',
'prevwasfix',
'nextwasfix',
'startoffile',
'endoffile',
'startofscreen',
'endofscreen',
'startofline',
'endofline',
'startofsentence',
'endofsentence',
'blinkbeforefix',
'blinkafterfix',
'offscreenbeforefix',
'offscreenafterfix',
'inregression',
'fdurSP',
'fdurSPsummed',
'blinkdurSPsummed',
'blinkduringSPsummed',
'fdurFP',
'blinkdurFP',
'blinkduringFP',
'fdurGP',
'blinkdurGP',
'blinkduringGP',
'fdurTT'
]
out[toprint].to_csv(sys.stdout, sep=' ', index=False, na_rep='NaN')
|
gpl-3.0
| -8,582,374,189,916,280,000
| 36.820433
| 137
| 0.412473
| false
| 4.23186
| false
| false
| false
|
jeremiedecock/snippets
|
python/hashlib_md5_sha/md5sum_file.py
|
1
|
2678
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import hashlib
import os
CHUNK_SIZE = 2**12
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description='Print or check MD5 checksums.')
parser.add_argument("filepaths", nargs='+', metavar="FILE", help="file to read")
args = parser.parse_args()
# COMPUTE HASHS ###########################################################
for file_path in args.filepaths:
if os.path.isfile(file_path):
with open(file_path, 'rb') as fd:
try:
hash_generator = hashlib.md5()
#hash_generator = hashlib.sha1()
#hash_generator = hashlib.sha256()
#hash_generator = hashlib.sha512()
data = fd.read(CHUNK_SIZE)
while len(data) > 0:
hash_generator.update(data)
data = fd.read(CHUNK_SIZE)
except:
print("{}: unknown error".format(file_path)) # TODO
finally:
fd.close()
hash_str = hash_generator.hexdigest()
print("{} {}".format(hash_str, file_path))
else:
if os.path.isdir(file_path):
print('"{}" is a directory'.format(file_path))
else:
print("unable to read {}".format(file_path))
if __name__ == '__main__':
main()
|
mit
| 7,653,002,989,695,408,000
| 37.782609
| 84
| 0.591181
| false
| 4.46
| false
| false
| false
|
RulersOfAsgard/ALAMO-worker
|
alamo_worker/plugins/__init__.py
|
1
|
6930
|
# -*- coding: utf-8 -*-
import asyncio
import logging
from datetime import datetime
from typing import Dict, List, Optional
import pkg_resources
from alamo_common import aiostats
from pytz import utc
from stevedore import NamedExtensionManager
from zmq.asyncio import ZMQEventLoop
from alamo_worker.conf import settings
from alamo_worker.plugins.evaluate import ResultEvaluator
logger = logging.getLogger(__name__)
EXCEPTION_MESSAGES = {
'TimeoutError': 'Timeout occurred during request.',
'EnvironmentFetchException': 'An error occurred in check {check_id}.',
'HttpProcessingError': 'Invalid response from {url}, status_code={code}, '
'message={message}.',
'ClientResponseError': 'Invalid response from {url}, status_code={code}, '
'message={message}, headers={headers}, '
'history={history}, request_info={request_info}.',
'NoSuchService': 'No such service in Consul for {url}.',
'ConnectionRefusedError': 'Connection to {host}:{port} refused.',
'ConnectionResetError': 'Connection with {host}:{port} was reset.',
'gaierror': 'Unknown hostname {host}',
}
class BasePlugin(object):
"""Base plugin implementation.
``_type`` is used to determine type of the plugin.
"""
default_exception_pattern = 'Could not execute plugin for check {check_id}.' # noqa
_type = None
is_coroutine = False
def __init__(self, *args, **kwargs):
if self._type is None:
msg = ('Class ``{}`` does not provide '
'"_type" attribute.').format(self.__class__.__name__)
raise NotImplementedError(msg)
self.is_coroutine = asyncio.iscoroutinefunction(self.execute)
def supported_types(self) -> List:
return [] if self._type is None else [self._type]
def init(self, *args):
"""Additional plugin initialization."""
pass
def execute(
self, check: Dict, source: Dict, **context
):
raise NotImplementedError
async def _check_health(self):
"""Should raise an exception if health check failed"""
raise NotImplementedError
async def healthy(self):
status = True
try:
await self._check_health()
except Exception as e:
logger.error(
'Health check failed for %s plugin: %s', self._type, e
)
status = False
return self._type, status
def exception_repr(
self,
e: Exception,
check_id: int = None,
url: str = '',
method: str = '',
host: str = '',
port: int = None
):
pattern = EXCEPTION_MESSAGES.get(
e.__class__.__name__, self.default_exception_pattern)
code = getattr(e, 'code', '')
message = getattr(e, 'message', '')
headers = getattr(e, 'headers', '')
history = getattr(e, 'history', '')
request_info = getattr(e, 'request_info', '')
return pattern.format(
check_id=check_id, url=url, code=code, message=message,
headers=headers, history=history, request_info=request_info,
method=method, host=host, port=port,
)
class PluginManager(object):
"""Plugin manager for alamo worker.
This class always return the same object (Singleton pattern).
``_plugins`` keeps plugin objects
``_classes`` keeps plugin (class) reference to loaded plugin
"""
runner = None
plugin_namespace = 'pl.allegro.tech.monitoring.alamo_worker.plugins'
def __init__(self):
self._evaluator = ResultEvaluator()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_inst'):
cls._inst = super(PluginManager, cls).__new__(cls, *args, **kwargs)
return cls._inst
def load(self):
"""Load and instantiate plugins."""
for plug in pkg_resources.iter_entry_points(self.plugin_namespace):
logger.info("Loading %s", plug)
self.runner = NamedExtensionManager(
namespace=self.plugin_namespace,
names=settings.PLUGINS,
invoke_on_load=True
)
@aiostats.timer(metric_name='manager.dispatch')
async def dispatch(self,
loop: ZMQEventLoop,
payload: Dict) -> Optional[Dict]:
"""Dispatch which of available plugins should perform
data processing.
:param zmq.asyncio.ZMQEventLoop loop: asyncio loop
:param dict payload:
"""
check_id = payload.get('id')
project_id = payload.get('service_id')
payload['execution_time'] = datetime.now(tz=utc).isoformat()
payload['worker_fqdn'] = settings.WORKER_FQDN
plugins = {}
for plugin in self.runner:
for supported in plugin.obj.supported_types():
plugins[supported] = plugin.obj
context = {
'check_id': check_id,
'project_id': project_id
}
try:
self._evaluator.prepare_triggers(payload)
tasks = []
for source in payload['sources']:
source_type = source.get('type') or source.get('source_type')
context = {
'plugin': source_type,
**context
}
logger.info(
'Processing check: id="%s", source="%s"',
check_id,
source,
extra=context
)
try:
source_plugin = plugins[source_type]
except KeyError:
logger.error(
'Could not find plugin ``%s``.', source_type,
extra=context
)
continue
if source_plugin.is_coroutine:
tasks.append(
source_plugin.execute(payload, source, **context)
)
else:
tasks.append(loop.run_in_executor(
None, source_plugin.execute, payload, source
))
if not tasks:
return
results = await asyncio.gather(
*tasks, loop=loop
) # type: List[AbstractResult]
return self._evaluator.evaluate(payload, results)
except Exception as e:
aiostats.increment.incr(
'manager.errors.{}'.format(e.__class__.__name__)
)
logger.error(
'Critical error occur for check `%s`. `%s`', check_id, e,
extra=context
)
async def get_plugin_health(self):
tasks = [plugin.obj.healthy() for plugin in self.runner]
return dict(await asyncio.gather(*tasks))
|
apache-2.0
| 5,531,675,341,325,306,000
| 32.478261
| 88
| 0.546898
| false
| 4.547244
| false
| false
| false
|
miltonsarria/dsp-python
|
qt/ejemploQt2.py
|
1
|
3671
|
from __future__ import unicode_literals
import os
import random
import sys
from numpy import arange, sin, pi
import matplotlib
#matplotlib.use("Qt4Agg")
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas)
from matplotlib.backends.qt_compat import QtCore, QtGui
from matplotlib.figure import Figure
progname = os.path.basename(sys.argv[0])
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [random.randint(0, 10) for i in range(4)]
self.axes.cla()
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
class ApplicationWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtGui.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtGui.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QtGui.QWidget(self)
l = QtGui.QVBoxLayout(self.main_widget)
sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(sc)
l.addWidget(dc)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().showMessage("All hail matplotlib!", 2000)
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QtGui.QMessageBox.about(self, "About",
"""embedding_in_qt4.py example
Copyright 2005 Florent Rougon, 2006 Darren Dale
This program is a simple example of a Qt4 application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation."""
)
qApp = QtGui.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
#qApp.exec_()
|
mit
| 8,202,107,306,772,943,000
| 28.604839
| 77
| 0.629256
| false
| 3.641865
| false
| false
| false
|
enigmampc/catalyst
|
catalyst/gens/utils.py
|
1
|
1990
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numbers
from hashlib import md5
from datetime import datetime
from catalyst.protocol import DATASOURCE_TYPE
from six import iteritems, b
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc
def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime)
def assert_datasource_unframe_protocol(event):
"""Assert that an event is valid output of zp.DATASOURCE_UNFRAME."""
assert event.type in DATASOURCE_TYPE
|
apache-2.0
| 8,942,561,567,197,044,000
| 31.096774
| 79
| 0.711558
| false
| 3.964143
| false
| false
| false
|
bambooforest/segments
|
src/segments/tree.py
|
1
|
2093
|
from segments.errors import replace
class TreeNode(object):
"""
Private class that creates the tree data structure from the orthography profile for
parsing.
"""
def __init__(self, char, sentinel=False):
self.char = char
self.children = {}
self.sentinel = sentinel
class Tree(object):
def __init__(self, graphemes):
def _multigraph(node, line):
# Internal function to add a multigraph starting at node.
for char in line:
node = node.children.setdefault(char, TreeNode(char))
node.sentinel = True
self.root = TreeNode('', sentinel=True)
for grapheme in graphemes:
_multigraph(self.root, grapheme)
def parse(self, line, error=replace):
res, idx = self._parse(self.root, line, 0)
rem = line[idx:]
while rem:
# Chop off one character and try parsing the remainder:
res.append(error(rem[0]))
rem = rem[1:]
r, i = self._parse(self.root, rem, 0)
res.extend(r)
rem = rem[i:]
return res
def _parse(self, root, line, idx):
"""
:param root: Tree node.
:param line: String to parse.
:param idx: Global counter of characters parsed.
:return: (list of parsed graphemes, incremented character count)
"""
# Base (or degenerate..) case.
if len(line) == 0:
return [], idx
parse = []
curr = 0
node = root
cidx = idx
while curr < len(line):
node = node.children.get(line[curr])
curr += 1
if not node:
break
if node.sentinel:
subparse, cidx = self._parse(root, line[curr:], idx + curr)
# Always keep the latest valid parse, which will be
# the longest-matched (greedy match) graphemes.
parse = [line[:curr]]
parse.extend(subparse)
if parse:
idx = cidx
return parse, idx
|
apache-2.0
| -2,984,269,760,610,760,700
| 29.779412
| 87
| 0.530817
| false
| 4.169323
| false
| false
| false
|
gam17/QAD
|
qad_mbuffer_fun.py
|
1
|
1950
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ok
QAD Quantum Aided Design plugin
funzioni per stirare oggetti grafici
-------------------
begin : 2013-11-11
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.core import *
from . import qad_utils
from .qad_msg import QadMsg
from .qad_variables import QadVariables
from .qad_multi_geom import *
#===============================================================================
# buffer
#===============================================================================
def buffer(qadGeom, distance):
"""
Returns a buffer region around this geometry having the given distance.
"""
g = qadGeom.asGeom()
nSegments = QadVariables.get(QadMsg.translate("Environment variables", "ARCMINSEGMENTQTY"), 12)
bufferedGeom = g.buffer(distance, nSegments)
if bufferedGeom.isEmpty(): return None
return fromQgsGeomToQadGeom(bufferedGeom)
|
gpl-3.0
| -2,696,228,024,097,002,000
| 36.27451
| 98
| 0.410769
| false
| 5.227882
| false
| false
| false
|
qedsoftware/commcare-hq
|
corehq/tests/noseplugins/timing.py
|
1
|
3440
|
"""A test timing plugin for nose
Usage: ./manage.py test --with-timing --timing-file=/path/to/timing.csv
"""
import csv
import sys
import time
from nose.plugins import Plugin
from corehq.tests.noseplugins.uniformresult import uniform_description
class TimingPlugin(Plugin):
"""A plugin to measure times of testing events
Measure elapsed time before setup, during setup, during test, and
during teardown events. Outputs the results as CSV.
"""
name = "timing"
def options(self, parser, env):
"""Register commandline options.
"""
super(TimingPlugin, self).options(parser, env)
parser.add_option('--timing-file', action='store',
dest='timing_file',
metavar="FILE",
default=env.get('NOSE_TIMING_FILE'),
help='Timing output file (CSV); default is STDOUT')
parser.add_option('--pretty-timing', action='store_true',
dest='pretty_output',
default=env.get('NOSE_PRETTY_TIMING'),
help='Print timing info in a format that is better '
'for reviewing in text mode (not CSV).')
def configure(self, options, conf):
"""Configure plugin.
"""
super(TimingPlugin, self).configure(options, conf)
self.conf = conf
self.timing_file = options.timing_file
self.pretty_output = options.pretty_output
def begin(self):
self.output = (open(self.timing_file, "w")
if self.timing_file else sys.__stdout__)
if not self.pretty_output:
self.csv = csv.writer(self.output)
self.csv.writerow(["event", "name", "elapsed time", "start time"])
self.event_start = time.time()
global PLUGIN_INSTANCE
PLUGIN_INSTANCE = self
def finalize(self, result):
if self.output is not None:
self.output.close()
def end_event(self, event, context):
now = time.time()
name = uniform_description(context)
if self.pretty_output:
self.output.write("{time:>-6,.2f} {event} {name}\n".format(
event=event,
name=name,
time=now - self.event_start,
))
else:
self.csv.writerow([
event,
name,
now - self.event_start,
self.event_start,
])
self.event_start = now
def startContext(self, context):
# called before context setup
self.end_event("before", context)
def startTest(self, case):
# called before test is started
self.end_event("setup", case.test)
def stopTest(self, case):
# called on test completion
self.end_event("run", case.test)
def stopContext(self, context):
# called after context teardown
self.end_event("teardown", context)
PLUGIN_INSTANCE = None
def end_event(name, context):
"""Signal the end of a custom timing event
Use to add arbitrary "events" anywhere in the code to isolate
sources of slowness during profiling. This function terminates the
given event name and immediately begins the next (as yet unnamed)
event. Requires the `TimingPlugin` must to be enabled.
"""
PLUGIN_INSTANCE.end_event(name, context)
|
bsd-3-clause
| -7,585,139,054,977,179,000
| 32.076923
| 78
| 0.580233
| false
| 4.305382
| true
| false
| false
|
cudevmaxwell/SwiftBulkUploader
|
swiftbulkuploader/prepareupload.py
|
1
|
3048
|
import sys
import olrcdb
import os
import datetime
from bulkupload import env_vars_set
# Globals
COUNT = 0
FAILED = 0
REQUIRED_VARIABLES = [
"MYSQL_HOST",
"MYSQL_USER",
"MYSQL_PASSWD",
"MYSQL_DB",
]
def prepare_upload(connect, directory, table_name):
'''Given a database connection, directory and table_name,
-Create the table in the database
-populate the table with (path, uploaded=false)
where each path is a file in the given directory.'''
global COUNT, FAILED
# Loop through all items in the directory.
for filename in os.listdir(directory):
file_path = os.path.join(directory, filename)
# Add file name to the list.
if os.path.isfile(file_path):
try:
connect.insert_path(file_path, table_name)
COUNT += 1
except:
# Try again with the alternative query.
try:
connect.insert_path(file_path, table_name, True)
COUNT += 1
except:
FAILED += 1
error_log = open(table_name + '.prepare.error.log', 'a')
error_log.write("\rFailed: {0}\n".format(file_path))
error_log.close()
sys.stdout.flush()
sys.stdout.write("\r{0} parsed. ".format(COUNT))
#Output status to a file.
final_count = open(table_name + ".prepare.out", 'w+')
final_count.write("\r{0} parsed. ".format(COUNT))
final_count.close()
# Recursive call for sub directories.
else:
prepare_upload(connect, file_path, table_name)
if __name__ == "__main__":
# Check for proper parameters
if len(sys.argv) != 3:
sys.stderr.write(
'Usage: python prepareupload.py path-to-directory table-name\n'
)
sys.exit(1)
else:
table_name = sys.argv[2]
directory = sys.argv[1]
# Check required environment variables have been set
if not env_vars_set():
set_env_message = "The following environment variables need to be " \
"set:\n"
set_env_message += " \n".join(REQUIRED_VARIABLES)
set_env_message += "\nPlease set these environment variables to " \
"connect to the OLRC."
print(set_env_message)
exit(0)
#Open error log:
error_log = open(table_name + '.prepare.error.log', 'w+')
error_log.write("From execution {0}:\n".format(
str(datetime.datetime.now())
))
error_log.close()
connect = olrcdb.DatabaseConnection()
connect.create_table(table_name)
prepare_upload(connect, directory, table_name)
sys.stdout.flush()
sys.stdout.write("\r{0} parsed. ".format(COUNT))
if FAILED != 0:
sys.stdout.write("\n{0} FAILED. See error.log.".format(FAILED))
#Log the final count
final_count = open(table_name + ".prepare.out", 'w+')
final_count.write("\r{0} parsed. ".format(COUNT))
final_count.close()
|
bsd-3-clause
| 6,526,452,972,449,950,000
| 28.038095
| 77
| 0.574475
| false
| 3.814768
| false
| false
| false
|
dcelisgarza/applied_math
|
solar_system/animatep2.py
|
1
|
1987
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as anm
#plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
plt.close('all')
data = np.loadtxt('solar_system.dat')
data2 = data[:,0:15]
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlim3d([np.min(data2[:,0::3]), np.max(data2[:,0::3])])
ax.set_xlabel('X')
ax.set_ylim3d([np.min(data2[:,1::3]), np.max(data2[:,1::3])])
ax.set_ylabel('Y')
ax.set_zlim3d([np.min(data2[:,2::3]), np.max(data2[:,2::3])])
ax.set_zlabel('Z')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, np.size(data2[0,:])/3))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c)
for c in colors], [])
ax.view_init(30, 0)
data3 = np.reshape(data2,(np.size(data2[0,:])/3,np.size(data2[:,0]),3))
n = 0
for i in np.arange(0,int(np.size(data2[0,:])/3),1):
data3[i,:,0:3] = data2[:,i+n:i+n+3]
n = n + 2
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return pts + lines,
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
#i = (2 * i) % data3.shape[1]
for line, pt, xi in zip(lines, pts, data3):
x, y, z = xi[:i,0:3].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return pts + lines
anim = anm.FuncAnimation(fig, animate, init_func=init,
frames=int(np.size(data2[:,0])), interval=1, blit=True)
writer = anm.writers['ffmpeg'](fps=30)
anim.save('inner_sol_sys.mp4', writer = writer)#, 'ffmpeg_file', fps=15, extra_args=['-vcodec', 'libx264']
|
mit
| -6,319,089,473,703,924,000
| 26.985915
| 106
| 0.574233
| false
| 2.614474
| false
| false
| false
|
City-of-Helsinki/smbackend
|
observations/models.py
|
1
|
6698
|
import binascii
import os
import rest_framework.authentication
import rest_framework.authtoken.models
from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from polymorphic.models import PolymorphicModel
from rest_framework import exceptions
from services import models as services_models
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
class ObservableProperty(models.Model):
"""Specifies the detailed interpretation of observations.
Includes the unit of measurement.
Observations can only be made on units which have a service that
is linked to an ObservableProperty. For example, only units which
are ice-skating fields can have observations with the property
"ice condition" or something similar.
"""
# TODO move back to sequential id field
id = models.CharField(max_length=50, primary_key=True)
name = models.CharField(max_length=100, null=False, blank=False, db_index=True)
measurement_unit = models.CharField(max_length=20, null=True, blank=False)
expiration = models.DurationField(blank=True, null=True)
# todo: change to services
services = models.ManyToManyField(
services_models.Service, related_name="observable_properties"
)
observation_type = models.CharField(max_length=80, null=False, blank=False)
def __str__(self):
return "%s (%s)" % (self.name, self.id)
def get_observation_model(self):
return apps.get_model(self.observation_type)
def get_observation_type(self):
return self.get_observation_model().get_type()
def create_observation(self, **validated_data):
return self.get_observation_model().objects.create(**validated_data)
def get_internal_value(self, value):
return self.get_observation_model().get_internal_value(self, value)
class AllowedValue(models.Model):
# Currently only works for categorical observations
identifier = models.CharField(max_length=50, null=True, blank=False, db_index=True)
quality = models.CharField(
max_length=50, null=True, blank=False, db_index=True, default="unknown"
)
name = models.CharField(max_length=100, null=True, blank=False, db_index=True)
description = models.TextField(null=False, blank=False)
property = models.ForeignKey(
ObservableProperty,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="allowed_values",
)
class Meta:
unique_together = (("identifier", "property"),)
class Observation(PolymorphicModel):
"""An observation is a measured/observed value of
a property of a unit at a certain time.
"""
value = models.ForeignKey(
AllowedValue,
blank=False,
null=True,
on_delete=models.PROTECT,
related_name="instances",
)
time = models.DateTimeField(
db_index=True, help_text="Exact time the observation was made"
)
unit = models.ForeignKey(
services_models.Unit,
blank=False,
null=False,
on_delete=models.PROTECT,
help_text="The unit the observation is about",
related_name="observation_history",
)
units = models.ManyToManyField(
services_models.Unit, through="UnitLatestObservation"
)
auth = models.ForeignKey("PluralityAuthToken", null=True, on_delete=models.PROTECT)
property = models.ForeignKey(
ObservableProperty,
blank=False,
null=False,
on_delete=models.PROTECT,
help_text="The property observed",
)
class Meta:
ordering = ["-time"]
class CategoricalObservation(Observation):
def get_external_value(self):
return self.value.identifier
@staticmethod
def get_type():
return "categorical"
@staticmethod
def get_internal_value(oproperty, value):
if value is None:
return None
return oproperty.allowed_values.get(identifier=value)
class DescriptiveObservation(Observation):
def get_external_value(self):
return self.value
@staticmethod
def get_type():
return "descriptive"
@staticmethod
def get_internal_value(oproperty, value):
return AllowedValue.objects.create(property=oproperty, **value)
class UnitLatestObservation(models.Model):
unit = models.ForeignKey(
services_models.Unit,
null=False,
blank=False,
related_name="latest_observations",
on_delete=models.CASCADE,
)
property = models.ForeignKey(
ObservableProperty, null=False, blank=False, on_delete=models.CASCADE
)
observation = models.ForeignKey(
Observation, null=False, blank=False, on_delete=models.CASCADE
)
class Meta:
unique_together = (("unit", "property"),)
class PluralityAuthToken(models.Model):
"""
A token class which can have multiple active tokens per user.
"""
key = models.CharField(max_length=40, primary_key=False, db_index=True)
user = models.ForeignKey(
AUTH_USER_MODEL,
related_name="auth_tokens",
null=False,
on_delete=models.PROTECT,
)
created = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
class Meta:
# Work around for a bug in Django:
# https://code.djangoproject.com/ticket/19422
#
# Also see corresponding ticket:
# https://github.com/tomchristie/django-rest-framework/issues/705
abstract = "rest_framework.authtoken" not in settings.INSTALLED_APPS
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(PluralityAuthToken, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
class PluralityTokenAuthentication(rest_framework.authentication.TokenAuthentication):
model = PluralityAuthToken
def authenticate_credentials(self, key):
user, token = super(
PluralityTokenAuthentication, self
).authenticate_credentials(key)
if not token.active:
raise exceptions.AuthenticationFailed(_("Token inactive or deleted."))
return token.user, token
class UserOrganization(models.Model):
organization = models.ForeignKey(
services_models.Department, on_delete=models.CASCADE
)
user = models.OneToOneField(
AUTH_USER_MODEL,
related_name="organization",
null=False,
on_delete=models.CASCADE,
)
|
agpl-3.0
| -9,209,405,566,981,158,000
| 29.724771
| 87
| 0.674231
| false
| 4.099143
| false
| false
| false
|
analyst-collective/dbt
|
test/unit/test_linker.py
|
1
|
5807
|
import os
import tempfile
import unittest
from unittest import mock
from dbt import compilation
try:
from queue import Empty
except ImportError:
from Queue import Empty
from dbt.graph.selector import NodeSelector
from dbt.graph.cli import parse_difference
def _mock_manifest(nodes):
config = mock.MagicMock(enabled=True)
manifest = mock.MagicMock(nodes={
n: mock.MagicMock(
unique_id=n,
package_name='pkg',
name=n,
empty=False,
config=config,
fqn=['pkg', n],
) for n in nodes
})
manifest.expect.side_effect = lambda n: mock.MagicMock(unique_id=n)
return manifest
class LinkerTest(unittest.TestCase):
def setUp(self):
self.linker = compilation.Linker()
def test_linker_add_node(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
actual_nodes = self.linker.nodes()
for node in expected_nodes:
self.assertIn(node, actual_nodes)
self.assertEqual(len(actual_nodes), len(expected_nodes))
def test_linker_write_graph(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
manifest = _mock_manifest('ABC')
(fd, fname) = tempfile.mkstemp()
os.close(fd)
try:
self.linker.write_graph(fname, manifest)
assert os.path.exists(fname)
finally:
os.unlink(fname)
def assert_would_join(self, queue):
"""test join() without timeout risk"""
self.assertEqual(queue.inner.unfinished_tasks, 0)
def _get_graph_queue(self, manifest, include=None, exclude=None):
graph = compilation.Graph(self.linker.graph)
selector = NodeSelector(graph, manifest)
spec = parse_difference(include, exclude)
return selector.get_graph_queue(spec)
def test_linker_add_dependency(self):
actual_deps = [('A', 'B'), ('A', 'C'), ('B', 'C')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
queue = self._get_graph_queue(_mock_manifest('ABC'))
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'C')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('C')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'A')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertTrue(queue.empty())
queue.mark_done('A')
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_add_disjoint_dependencies(self):
actual_deps = [('A', 'B')]
additional_node = 'Z'
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.linker.add_node(additional_node)
queue = self._get_graph_queue(_mock_manifest('ABCZ'))
# the first one we get must be B, it has the longest dep chain
first = queue.get(block=False)
self.assertEqual(first.unique_id, 'B')
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
second = queue.get(block=False)
self.assertIn(second.unique_id, {'A', 'Z'})
self.assertFalse(queue.empty())
queue.mark_done(second.unique_id)
self.assertFalse(queue.empty())
third = queue.get(block=False)
self.assertIn(third.unique_id, {'A', 'Z'})
with self.assertRaises(Empty):
queue.get(block=False)
self.assertNotEqual(second.unique_id, third.unique_id)
self.assertTrue(queue.empty())
queue.mark_done(third.unique_id)
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_dependencies_limited_to_some_nodes(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
queue = self._get_graph_queue(_mock_manifest('ABCD'), ['B'])
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertTrue(queue.empty())
queue.mark_done('B')
self.assert_would_join(queue)
queue_2 = queue = self._get_graph_queue(_mock_manifest('ABCD'), ['A', 'B'])
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertFalse(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
queue_2.mark_done('B')
self.assertFalse(queue_2.empty())
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'A')
self.assertTrue(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
self.assertTrue(queue_2.empty())
queue_2.mark_done('A')
self.assert_would_join(queue_2)
def test__find_cycles__cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'A')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNotNone(self.linker.find_cycles())
def test__find_cycles__no_cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNone(self.linker.find_cycles())
|
apache-2.0
| 4,797,753,715,547,958,000
| 30.906593
| 83
| 0.585845
| false
| 3.633917
| true
| false
| false
|
dgdell/enigma2
|
lib/python/Plugins/SystemPlugins/SoftwareManager/SoftwareTools.py
|
1
|
9326
|
# -*- coding: iso-8859-1 -*-
from enigma import eConsoleAppContainer
from Components.Console import Console
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.Sources.List import List
from Components.Ipkg import IpkgComponent
from Components.Network import iNetwork
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_METADIR
from Tools.HardwareInfo import HardwareInfo
from time import time
class SoftwareTools(PackageInfoHandler):
lastDownloadDate = None
NetworkConnectionAvailable = None
list_updating = False
available_updates = 0
available_updatelist = []
available_packetlist = []
installed_packetlist = {}
def __init__(self):
aboutInfo = about.getImageVersionString()
if aboutInfo.startswith("dev-"):
self.ImageVersion = 'Experimental'
else:
self.ImageVersion = 'Stable'
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
PackageInfoHandler.__init__(self, self.statusCallback, neededTag = 'ALL_TAGS', neededFlag = self.ImageVersion)
self.directory = resolveFilename(SCOPE_METADIR)
self.list = List([])
self.NotifierCallback = None
self.Console = Console()
self.UpdateConsole = Console()
self.cmdList = []
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
def statusCallback(self, status, progress):
pass
def startSoftwareTools(self, callback = None):
if callback is not None:
self.NotifierCallback = callback
iNetwork.checkNetworkState(self.checkNetworkCB)
def checkNetworkCB(self,data):
if data is not None:
if data <= 2:
self.NetworkConnectionAvailable = True
self.getUpdates()
else:
self.NetworkConnectionAvailable = False
self.getUpdates()
def getUpdates(self, callback = None):
if self.lastDownloadDate is None:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
else:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
if self.list_updating and callback is not None:
self.NotifierCallback = callback
self.startIpkgListAvailable()
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback(False)
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.startIpkgListAvailable()
pass
def startIpkgListAvailable(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list"
self.UpdateConsole.ePopen(cmd, self.IpkgListAvailableCB, callback)
def IpkgListAvailableCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.list_updating:
self.available_packetlist = []
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
self.available_packetlist.append([name, version, descr])
if callback is None:
self.startInstallMetaPackage()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startInstallMetaPackage(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if self.NetworkConnectionAvailable == True:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " install enigma2-meta enigma2-plugins-meta enigma2-skins-meta"
self.UpdateConsole.ePopen(cmd, self.InstallMetaPackageCB, callback)
else:
self.InstallMetaPackageCB(True)
def InstallMetaPackageCB(self, result, retval = None, extra_args = None):
(callback) = extra_args
if result:
self.fillPackagesIndexList()
if callback is None:
self.startIpkgListInstalled()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startIpkgListInstalled(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.UpdateConsole.ePopen(cmd, self.IpkgListInstalledCB, callback)
def IpkgListInstalledCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
for package in self.packagesIndexlist[:]:
if not self.verifyPrerequisites(package[0]["prerequisites"]):
self.packagesIndexlist.remove(package)
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
if attributes.has_key("packagetype"):
if attributes["packagetype"] == "internal":
self.packagesIndexlist.remove(package)
if callback is None:
self.countUpdates()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def countUpdates(self, callback = None):
self.available_updates = 0
self.available_updatelist = []
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
packagename = attributes["packagename"]
for x in self.available_packetlist:
if x[0] == packagename:
if self.installed_packetlist.has_key(packagename):
if self.installed_packetlist[packagename] != x[1]:
self.available_updates +=1
self.available_updatelist.append([packagename])
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
elif self.NotifierCallback is not None:
self.NotifierCallback(True)
self.NotifierCallback = None
def startIpkgUpdate(self, callback = None):
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " update"
self.Console.ePopen(cmd, self.IpkgUpdateCB, callback)
def IpkgUpdateCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.Console:
if len(self.Console.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
def cleanupSoftwareTools(self):
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback = None
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
if self.UpdateConsole is not None:
if len(self.UpdateConsole.appContainers):
for name in self.UpdateConsole.appContainers.keys():
self.UpdateConsole.kill(name)
def verifyPrerequisites(self, prerequisites):
if prerequisites.has_key("hardware"):
hardware_found = False
for hardware in prerequisites["hardware"]:
if hardware == HardwareInfo().device_name:
hardware_found = True
if not hardware_found:
return False
return True
iSoftwareTools = SoftwareTools()
|
gpl-2.0
| 2,123,058,099,230,987,800
| 32.426523
| 112
| 0.710165
| false
| 3.40738
| false
| false
| false
|
questrail/pycan
|
tests/test_kvaser.py
|
1
|
3371
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 The pycan developers. All rights reserved.
# Project site: https://github.com/questrail/pycan
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
import os
import time
import threading
import unittest
import ConfigParser
import pycan.drivers.kvaser as driver
from pycan.common import CANMessage
class KvaserTests(unittest.TestCase):
def tearDown(self):
try:
self.driver.bus_off()
self.driver.shutdown()
time.sleep(2)
except:
pass
def __load_test_config(self):
test_path = os.path.dirname(os.path.abspath(__file__))
config = ConfigParser.ConfigParser()
config.read(os.path.join(test_path, 'test.cfg'))
self.known_can_id = int(config.get('COMMON', 'Known_ID_On_Bus'), 16)
def testPEP8Compliance(self):
# Ensure PEP8 is installed
try:
import pep8
except ImportError:
self.fail(msg="PEP8 not installed.")
# Check the CAN driver
driver_path = os.path.dirname(driver.__file__)
driver_file = os.path.abspath(os.path.join(driver_path, 'kvaser.py'))
pep8_checker = pep8.Checker(driver_file)
violation_count = pep8_checker.check_all()
error_message = "PEP8 violations found: %d" % (violation_count)
self.assertTrue(violation_count == 0, msg = error_message)
def testDriver(self):
# Load the real time test configuration
self.__load_test_config()
# Setup the driver
self.driver = driver.Kvaser()
# Run the driver specific tests if and only if the driver was setup
self.Transmit()
self.Receive()
self.SpecificReceive()
def Transmit(self):
# Note you must also check that the CAN message is being placed
# on the wire at 100ms intervals
messages_to_send = 50
msg1 = CANMessage(0x123456, [1,2,3])
for x in range(messages_to_send):
time.sleep(0.1)
msg = "Failed to send message {x}".format(x=x)
self.assertTrue(self.driver.send(msg1), msg)
self.assertEqual(self.driver.life_time_sent(), messages_to_send)
def Receive(self):
messages_to_receive = 25
# Check that the life time received hasn't been updated yet
self.assertEqual(self.driver.life_time_received(), 0)
# Read back a fixed number of messages and check that the lifetime
# values track the next_message call
read_messages = 0
for x in range(messages_to_receive):
if self.driver.next_message():
self.assertEqual((x+1), self.driver.life_time_received())
def SpecificReceive(self):
messages_to_receive = 10
actual_messaged_received = 0
max_specific_attempts = 1000
# Keep reading from the bus until we find the required messages
read_messages = 0
for x in range(max_specific_attempts):
msg = self.driver.next_message()
if msg.id == self.known_can_id:
actual_messaged_received += 1
if actual_messaged_received == messages_to_receive:
break;
self.assertEqual(actual_messaged_received, messages_to_receive)
|
mit
| -3,411,936,796,912,395,300
| 33.050505
| 77
| 0.621181
| false
| 3.951934
| true
| false
| false
|
awni/tensorflow
|
tensorflow/python/training/training_ops.py
|
1
|
7523
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for training ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.training import gen_training_ops
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
# Shape functions for fused training ops
# --------------------------------------
#
# The fused training ops all have the same basic structure: they take
# one or more variables with the same shape, and emit a reference to
# the original variable (which has the same shape as the first
# input). In addition, they take one or more scalar tensors containing
# hyperparameters.
#
# The sparse ops take the gradients as a Python IndexedSlices, which
# means that the indices are a vector of length N, and the gradient
# values are a tensor whose size is the same as the original variable,
# except for the 0th dimension, which has size N.
def _AssertInputIsScalar(op, index):
"""Raises ValueError if `op.inputs[index]` is not scalar."""
op.inputs[index].get_shape().assert_is_compatible_with(tensor_shape.scalar())
@ops.RegisterShape("ApplyAdadelta")
def _ApplyAdadeltaShape(op):
"""Shape function for the ApplyAdadelta op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
accum_update_shape = op.inputs[2].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # rho
_AssertInputIsScalar(op, 5) # epsilon
grad_shape = op.inputs[6].get_shape().merge_with(accum_shape)
return [grad_shape]
@ops.RegisterShape("ApplyAdagrad")
def _ApplyAdagradShape(op):
"""Shape function for the ApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
return [grad_shape]
@ops.RegisterShape("ApplyFtrl")
def _ApplyFtrlShape(op):
"""Shape function for the ApplyFtrlOp op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
linear_shape = op.inputs[2].get_shape().merge_with(accum_shape)
grad_shape = op.inputs[3].get_shape().merge_with(linear_shape)
_AssertInputIsScalar(op, 4) # lr
_AssertInputIsScalar(op, 5) # l1
_AssertInputIsScalar(op, 6) # l2
_AssertInputIsScalar(op, 7) # lr_power
return [grad_shape]
@ops.RegisterShape("ApplyAdam")
def _ApplyAdamShape(op):
"""Shape function for the ApplyAdam op."""
var_shape = op.inputs[0].get_shape()
m_shape = op.inputs[1].get_shape().merge_with(var_shape)
v_shape = op.inputs[2].get_shape().merge_with(m_shape)
_AssertInputIsScalar(op, 3) # beta1_power
_AssertInputIsScalar(op, 4) # beta2_power
_AssertInputIsScalar(op, 5) # lr
_AssertInputIsScalar(op, 6) # beta1
_AssertInputIsScalar(op, 7) # beta2
_AssertInputIsScalar(op, 8) # epsilon
grad_shape = op.inputs[9].get_shape().merge_with(v_shape)
return [grad_shape]
@ops.RegisterShape("ApplyMomentum")
def _ApplyMomentumShape(op):
"""Shape function for the ApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
_AssertInputIsScalar(op, 4) # momentum
return [grad_shape]
@ops.RegisterShape("ApplyRMSProp")
def _ApplyRMSPropShape(op):
"""Shape function for the ApplyRMSProp op."""
var_shape = op.inputs[0].get_shape()
ms_shape = op.inputs[1].get_shape().merge_with(var_shape)
mom_shape = op.inputs[2].get_shape().merge_with(ms_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # rho
_AssertInputIsScalar(op, 5) # momentum
_AssertInputIsScalar(op, 6) # epsilon
grad_shape = op.inputs[7].get_shape().merge_with(mom_shape)
return [grad_shape]
@ops.RegisterShape("ApplyGradientDescent")
def _ApplyGradientDescentShape(op):
"""Shape function for the ApplyGradientDescent op."""
var_shape = op.inputs[0].get_shape()
_AssertInputIsScalar(op, 1) # alpha
delta_shape = op.inputs[2].get_shape().merge_with(var_shape)
return [delta_shape]
@ops.RegisterShape("SparseApplyAdadelta")
def _SparseApplyAdadeltaShape(op):
"""Shape function for the SparseApplyAdadelta op."""
var_shape = op.inputs[0].get_shape()
accum_grad_shape = op.inputs[1].get_shape().merge_with(var_shape)
accum_update_shape = op.inputs[2].get_shape().merge_with(accum_grad_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # decay_rate
_AssertInputIsScalar(op, 5) # epsilon
grad_shape = op.inputs[6].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_update_shape[1:]))
unused_indices_shape = op.inputs[7].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
return [accum_update_shape]
@ops.RegisterShape("SparseApplyAdagrad")
def _SparseApplyAdagradShape(op):
"""Shape function for the SparseApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
return [accum_shape]
@ops.RegisterShape("SparseApplyFtrl")
def _SparseApplyFtrlShape(op):
"""Shape function for the SparseApplyFtrl op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
linear_shape = op.inputs[2].get_shape().merge_with(accum_shape)
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(linear_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
_AssertInputIsScalar(op, 5) # lr
_AssertInputIsScalar(op, 6) # l1
_AssertInputIsScalar(op, 7) # l2
_AssertInputIsScalar(op, 8) # lr_power
return [linear_shape]
@ops.RegisterShape("SparseApplyMomentum")
def _SparseApplyMomentumShape(op):
"""Shape function for the SparseApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
_AssertInputIsScalar(op, 5) # momentum
return [accum_shape]
|
apache-2.0
| 2,950,394,919,528,449,000
| 38.387435
| 80
| 0.705968
| false
| 3.205369
| false
| false
| false
|
giorgiop/scikit-learn
|
sklearn/utils/fixes.py
|
2
|
13212
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
# Supported since numpy 1.7.0
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument (numpy < 1.7.0)
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from numpy import partition
except ImportError:
warnings.warn('Using `sort` instead of partition.'
'Upgrade numpy to 1.8 for better performace on large number'
'of clusters')
def partition(a, kth, axis=-1, kind='introselect', order=None):
return np.sort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
def parallel_helper(obj, methodname, *args, **kwargs):
"""Helper to workaround Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
if sp_version < (0, 13, 0):
def rankdata(a, method='average'):
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
else:
from scipy.stats import rankdata
|
bsd-3-clause
| 6,369,921,192,213,193,000
| 31.784119
| 84
| 0.577505
| false
| 3.596081
| false
| false
| false
|
kakaroto/amsn2
|
amsn2/ui/front_ends/qt4/contact_list.py
|
1
|
15634
|
# -*- coding: utf-8 -*-
#
# amsn - a python client for the WLM Network
#
# Copyright (C) 2008 Dario Freddi <drf54321@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from amsn2.ui import base
from PyQt4 import Qt
from PyQt4 import QtCore
from PyQt4 import QtGui
from ui_contactlist import Ui_ContactList
from styledwidget import StyledWidget
from image import *
from amsn2.views import StringView, ContactView, GroupView, ImageView, PersonalInfoView
import common
class aMSNContactListWindow(base.aMSNContactListWindow):
def __init__(self, amsn_core, parent):
self._amsn_core = amsn_core
self._parent = parent
self._skin = amsn_core._skin_manager.skin
self._theme_manager = self._amsn_core._theme_manager
self._myview = amsn_core._personalinfo_manager._personalinfoview
self._clwidget = aMSNContactListWidget(amsn_core, self)
self._clwidget.show()
self.__create_controls()
self._clwidget.ui.pixUser.setIconSize(QtCore.QSize(96,96))
self._clwidget.ui.pixUser.setIcon(QtGui.QIcon("amsn2/ui/front_ends/qt4/msn-userimage2.png"))
QtCore.QObject.connect(self._clwidget.ui.pixUser, QtCore.SIGNAL("clicked()"),self._myview.changeDP)
def __create_controls(self):
#status list
for key in self._amsn_core.p2s:
name = self._amsn_core.p2s[key]
_, path = self._theme_manager.get_statusicon("buddy_%s" % name)
if (name == self._amsn_core.p2s['FLN']): continue
self._clwidget.ui.status.addItem(QtGui.QIcon(path), str.capitalize(name), key)
def show(self):
self._clwidget.show()
def hide(self):
self._clwidget.hide()
def set_title(self, text):
self._parent.setTitle(text)
def set_menu(self, menu):
self._parent.setMenu(menu)
def my_info_updated(self, view):
# TODO image, ...
imview = view.dp
if len(imview.imgs) > 0:
pixbuf = QtGui.QPixmap(imview.imgs[0][1])
pixbuf = pixbuf.scaled(96,96,0,1)
self._clwidget.ui.pixUser.setIcon(QtGui.QIcon(pixbuf))
nk = view.nick
self._clwidget.ui.nickName.setHtml(nk.to_HTML_string())
message = view.psm.to_HTML_string()
if len(view.current_media.to_HTML_string()) > 0:
message += ' ' + view.current_media.to_HTML_string()
self._clwidget.ui.statusMessage.setHtml('<i>'+message+'</i>')
for key in self._amsn_core.p2s:
if self._amsn_core.p2s[key] == view.presence:
self._clwidget.ui.status.setCurrentIndex(self._clwidget.ui.status.findData(key))
def get_contactlist_widget(self):
return self._clwidget
class itemDelegate(QtGui.QStyledItemDelegate):
#Dooooon't touch anything here!!! Or it will break into a million pieces and you'll be really sorry!!!
def paint(self, painter, option, index):
if not index.isValid():
return
painter.translate(0, 0)
options = QtGui.QStyleOptionViewItemV4(option)
self.initStyleOption(options, index)
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
doc = QtGui.QTextDocument()
doc.setHtml(options.text)
options.text = ""
QtGui.QApplication.style().drawControl(QtGui.QStyle.CE_ItemViewItem, options, painter, options.widget)
painter.translate(options.rect.left() + self.sizeDp(index) + 3, options.rect.top()) #paint text right after the dp + 3pixels
rect = QtCore.QRectF(0, 0, options.rect.width(), options.rect.height())
doc.drawContents(painter, rect)
painter.restore()
def sizeHint(self, option, index):
options = QtGui.QStyleOptionViewItemV4(option)
self.initStyleOption(options, index)
doc = QtGui.QTextDocument()
doc.setHtml(options.text)
doc.setTextWidth(options.rect.width())
#if group, leave as it, if contactitem, use dp height for calculating sizeHint.
model = index.model()
qv = QtGui.QPixmap(model.data(model.index(index.row(), 0,
index.parent()), QtCore.Qt.DecorationRole))
if qv.isNull():
size = QtCore.QSize(doc.idealWidth(), doc.size().height())
else:
size = QtCore.QSize(doc.idealWidth(), qv.height() + 6)
return size
def sizeDp(self, index):
model = index.model()
qv = QtGui.QPixmap(model.data(model.index(index.row(), 0,
index.parent()), QtCore.Qt.DecorationRole))
return qv.width()
class GlobalFilter(QtCore.QObject):
def __init__(self,parent =None):
QtCore.QObject.__init__(self,parent)
def eventFilter(self, obj, e):
if obj.objectName() == "nickName":
if e.type() == QtCore.QEvent.FocusOut:
obj.emit(QtCore.SIGNAL("nickChange()"))
return False
if e.type() == QtCore.QEvent.KeyPress and (e.key() ==
QtCore.Qt.Key_Enter or
e.key() == QtCore.Qt.Key_Return):
return True
if obj.objectName() == "statusMessage":
if e.type() == QtCore.QEvent.FocusOut:
obj.emit(QtCore.SIGNAL("psmChange()"))
return False
if e.type() == QtCore.QEvent.KeyPress and (e.key() ==
QtCore.Qt.Key_Enter or
e.key() == QtCore.Qt.Key_Return):
return True
return False
class aMSNContactListWidget(StyledWidget, base.aMSNContactListWidget):
def __init__(self, amsn_core, parent):
StyledWidget.__init__(self, parent._parent)
self._amsn_core = amsn_core
self._myview = parent._myview
self.ui = Ui_ContactList()
self.ui.setupUi(self)
delegate = itemDelegate(self)
self.ui.cList.setItemDelegate(delegate)
self._parent = parent
self._mainWindow = parent._parent
self._model = QtGui.QStandardItemModel(self)
self._model.setColumnCount(4)
self._proxyModel = QtGui.QSortFilterProxyModel(self)
self._proxyModel.setSourceModel(self._model)
self.ui.cList.setModel(self._proxyModel)
self._contactDict = dict()
self.groups = []
self.contacts = {}
self._proxyModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._proxyModel.setFilterKeyColumn(-1)
(self.ui.cList.header()).resizeSections(1) #auto-resize column wigth
(self.ui.cList.header()).setSectionHidden(1, True) #hide --> (group/contact ID)
(self.ui.cList.header()).setSectionHidden(2, True) #hide --> (boolean value. Do I really need this?)
(self.ui.cList.header()).setSectionHidden(3, True) #hide --> (contact/group view object)
self.connect(self.ui.searchLine, QtCore.SIGNAL('textChanged(QString)'),
self._proxyModel, QtCore.SLOT('setFilterFixedString(QString)'))
self.connect(self.ui.nickName, QtCore.SIGNAL('nickChange()'), self.__nickChange)
self.connect(self.ui.statusMessage, QtCore.SIGNAL('psmChange()'), self.__psmChange)
self.connect(self.ui.status, QtCore.SIGNAL('currentIndexChanged(int)'), self.__statusChange)
self.connect(self.ui.cList, QtCore.SIGNAL('doubleClicked(QModelIndex)'), self.__clDoubleClick)
self.ui.nickName.installEventFilter(GlobalFilter(self.ui.nickName))
self.ui.statusMessage.installEventFilter(GlobalFilter(self.ui.statusMessage))
def show(self):
self._mainWindow.fadeIn(self)
def hide(self):
pass
def __nickChange(self):
sv = StringView()
sv.append_text(str(self.ui.nickName.toPlainText()))
self._myview.nick = str(sv)
def __psmChange(self):
sv = StringView()
sv.append_text(str(self.ui.statusMessage.toPlainText()))
self._myview.psm = str(sv)
def __statusChange(self, i):
if self.ui.status.count()+1 != len(self._amsn_core.p2s): return
for key in self._amsn_core.p2s:
if key == str(self.ui.status.itemData(i).toString()):
self._myview.presence = self._amsn_core.p2s[key]
def __search_by_id(self, id):
parent = self._model.item(0)
children = []
while (parent is not None):
obj = str(self._model.item(self._model.indexFromItem(parent).row(), 1).text())
if (obj == id): return parent
child = parent.child(0)
nc = 0
while (child is not None):
cobj = str(parent.child(nc, 1).text())
if (cobj == id): children.append(child)
nc = nc + 1
child = self._model.item(self._model.indexFromItem(parent).row()).child(nc)
parent = self._model.item(self._model.indexFromItem(parent).row() + 1)
if parent is None: break
if children: return children
else: return None
def contactlist_updated(self, view):
guids = self.groups
self.groups = []
# New groups
for gid in view.group_ids:
if (gid == 0): gid = '0'
self.groups.append(gid)
if gid not in guids:
self._model.appendRow([QtGui.QStandardItem(gid),
QtGui.QStandardItem(gid),
QtGui.QStandardItem("group"),
QtGui.QStandardItem()])
# Remove unused groups
for gid in guids:
if gid not in self.groups:
gitem = self.__search_by_id(gid)
self._model.removeRow((self._model.indexFromItem(gitem)).row())
try:
del self.contacts[gid]
except KeyError:
pass
#self.groups.remove(gid)
def contact_updated(self, contact):
citems = self.__search_by_id(contact.uid)
if citems is None: return
dp = Image(self._parent._theme_manager, contact.dp)
dp = dp.to_size(28, 28)
#icon = Image(self._parent._theme_manager, contact.icon)
for citem in citems:
gitem = citem.parent()
if gitem is None: continue
gitem.child(self._model.indexFromItem(citem).row(),
0).setData(QtCore.QVariant(dp), QtCore.Qt.DecorationRole)
#gitem.child(self._model.indexFromItem(citem).row(), 0).setData(QVariant(icon), Qt.DecorationRole)
gitem.child(self._model.indexFromItem(citem).row(),
3).setData(QtCore.QVariant(contact), QtCore.Qt.DisplayRole)
cname = StringView()
cname = contact.name.to_HTML_string()
gitem.child(self._model.indexFromItem(citem).row(),
0).setText(QtCore.QString.fromUtf8(cname))
def group_updated(self, group):
if (group.uid == 0): group.uid = '0'
if group.uid not in self.groups: return
gitem = self.__search_by_id(group.uid)
self._model.item(self._model.indexFromItem(gitem).row(),
3).setData(QtCore.QVariant(group), QtCore.Qt.DisplayRole)
gname = StringView()
gname = group.name
self._model.item((self._model.indexFromItem(gitem)).row(),
0).setText('<b>'+QtCore.QString.fromUtf8(gname.to_HTML_string())+'</b>')
try:
cuids = self.contacts[group.uid]
except:
cuids = []
self.contacts[group.uid] = group.contact_ids.copy()
for cid in group.contact_ids:
if cid not in cuids:
gitem = self.__search_by_id(group.uid)
gitem.appendRow([QtGui.QStandardItem(cid),
QtGui.QStandardItem(cid),
QtGui.QStandardItem("contact"),
QtGui.QStandardItem()])
# Remove unused contacts
for cid in cuids:
if cid not in self.contacts[group.uid]:
citems = self.__search_by_id(cid)
for citem in citems:
self._model.removeRow((self._model.indexFromItem(citem)).row())
def group_removed(self, group):
gid = self.__search_by_id(group.uid)
self._model.takeRow(self._model.indexFromItem(gid))
def configure(self, option, value):
pass
def cget(self, option, value):
pass
def size_request_set(self, w, h):
pass
def __clDoubleClick(self, index):
model = index.model()
qvart = model.data(model.index(index.row(), 2, index.parent()))
qvarv = model.data(model.index(index.row(), 3, index.parent()))
type = qvart.toString()
view = qvarv.toPyObject()
#is the double-clicked item a contact?
if type == "contact":
view.on_click(view.uid)
else:
print "Double click on group!"
def contextMenuEvent(self, event):
l = self.ui.cList.selectedIndexes()
index = l[0]
model = index.model()
qvart = model.data(model.index(index.row(), 2, index.parent()))
qvarv = model.data(model.index(index.row(), 3, index.parent()))
type = qvart.toString()
view = qvarv.toPyObject()
if type == "contact":
menuview = view.on_right_click_popup_menu
menu = QtGui.QMenu("Contact Popup", self)
common.create_menu_items_from_view(menu, menuview.items)
menu.popup(event.globalPos())
if type == "group":
menuview = view.on_right_click_popup_menu
menu = QtGui.QMenu("Group Popup", self)
common.create_menu_items_from_view(menu, menuview.items)
menu.popup(event.globalPos())
def set_contact_context_menu(self, cb):
#TODO:
pass
def group_added(self, group):
pi = self._model.invisibleRootItem()
# Adding Group Item
groupItem = QtGui.QStandardItem()
gname = StringView()
gname = group.name
self._model.item(groupItem.row(), 0).setText('<b>'+QtCore.QString.fromUtf8(gname.toHtmlString())+'</b>')
self._model.item(groupItem.row(), 1).setText(QtCore.QString.fromUtf8(str(group.uid)))
pi.appendRow(groupItem)
for contact in group.contacts:
contactItem = QtGui.QStandardItem()
cname = StringView()
cname = contact.name
self._model.item(contactItem.row(), 0).setText(QtCore.QString.fromUtf8(cname.toHtmlString()))
self._model.item(contactItem.row(), 1).setText(QtCore.QString.fromUtf8(str(contact.uid)))
groupItem.appendRow(contactItem)
self._contactDict[contact.uid] = contact
|
gpl-2.0
| -3,533,732,585,038,985,700
| 38.984655
| 132
| 0.592491
| false
| 3.741086
| false
| false
| false
|
hfp/libxsmm
|
samples/deeplearning/sparse_training/fairseq/fairseq/tasks/fairseq_task.py
|
1
|
16132
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import os
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import data_utils, FairseqDataset, iterators, Dictionary
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, args):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return (os.pathsep in getattr(self.args, 'data', ''))
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# For default fairseq task, return same iterator across epochs
# as datasets are not dynamic, can be overridden in task specific
# setting.
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = data_utils.filter_by_size(
indices,
dataset,
max_positions,
raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=getattr(self.args, 'data_buffer_size', 0)
)
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self)
if getattr(args, 'tpu', False):
model.prepare_for_tpu_()
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(self, models, args, seq_gen_cls=None):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False, retain_graph=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss, retain_graph=retain_graph)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
|
bsd-3-clause
| 1,230,206,139,522,722,600
| 36.691589
| 100
| 0.593603
| false
| 4.420937
| false
| false
| false
|
bhrutledge/debugged-django
|
debugged/stream/signals.py
|
1
|
2560
|
from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from debugged.stream.models import StreamEntry, StreamItem
def _get_stream_item(instance):
instance_type = ContentType.objects.get_for_model(instance)
try:
s = StreamItem.objects.get(content_type=instance_type, object_id=instance.id)
except:
s = StreamItem(content_type=instance_type, object_id=instance.id)
return s
def _get_stream_entry(instance):
instance_type = ContentType.objects.get_for_model(instance)
try:
parent = instance.parent
parent_type = ContentType.objects.get_for_model(parent)
parent_id = parent.id
except:
parent = parent_type = parent_id = None
end_date = instance.publish_date + timedelta(minutes=30)
start_date = instance.publish_date - timedelta(minutes=30)
try:
e = StreamEntry.objects.get(item_type=instance_type,
content_type=parent_type, object_id=parent_id,
publish_date__range=(start_date, end_date))
except:
e = StreamEntry(item_type=instance_type,
content_type=parent_type, object_id=parent_id)
return e
def delete_stream_item(sender, instance, **kwargs):
instance_type = ContentType.objects.get_for_model(instance)
try:
item = StreamItem.objects.get(content_type=instance_type.id, object_id=instance.id)
entry = item.entry
item.delete()
if entry.items.count() == 0:
entry.delete()
except:
pass
def update_stream_item(sender, instance, **kwargs):
# TODO: What about StreamItems that already have StreamEntries?
if instance.published:
item = _get_stream_item(instance)
entry = _get_stream_entry(instance)
if entry.publish_date:
entry.publish_date = max(instance.publish_date, entry.publish_date)
else:
entry.publish_date = instance.publish_date
entry.modify_date = datetime.now()
entry.save()
try:
old_entry = item.entry
except:
old_entry = None
item.publish_date = instance.publish_date
item.modify_date = instance.modify_date
item.entry = entry
item.save()
if old_entry and old_entry.items.count() == 0:
old_entry.delete()
else:
delete_stream_item(sender, instance)
|
mit
| -8,103,646,417,448,047,000
| 32.246753
| 91
| 0.607031
| false
| 4.050633
| false
| false
| false
|
0xf2/stackalytics
|
stackalytics/dashboard/web.py
|
1
|
24373
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import operator
import os
import time
import flask
from oslo_config import cfg
from oslo_log import log as logging
import six
from stackalytics.dashboard import config
from stackalytics.dashboard import decorators
from stackalytics.dashboard import helpers
from stackalytics.dashboard import kpi
from stackalytics.dashboard import parameters
from stackalytics.dashboard import reports
from stackalytics.dashboard import vault
from stackalytics.processor import config as processor_cfg
from stackalytics.processor import utils
# Application objects ---------
app = flask.Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('DASHBOARD_CONF', silent=True)
app.register_blueprint(reports.blueprint)
app.register_blueprint(kpi.blueprint)
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(processor_cfg.CONNECTION_OPTS + config.DASHBOARD_OPTS)
# Handlers ---------
@app.route('/')
@decorators.templated()
def overview():
pass
@app.route('/widget')
def widget():
return flask.render_template('widget.html')
# AJAX Handlers ---------
def _get_aggregated_stats(records, metric_filter, keys, param_id,
param_title=None, finalize_handler=None):
param_title = param_title or param_id
result = dict((c, {'metric': 0, 'id': c}) for c in keys)
context = {'vault': vault.get_vault()}
if metric_filter:
for record in records:
metric_filter(result, record, param_id, context)
result[getattr(record, param_id)]['name'] = (
getattr(record, param_title))
else:
for record in records:
record_param_id = getattr(record, param_id)
result[record_param_id]['metric'] += 1
result[record_param_id]['name'] = getattr(record, param_title)
response = [r for r in result.values() if r['metric']]
if finalize_handler:
response = [item for item in map(finalize_handler, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response, item_filter=lambda x: x['id'] != '*independent')
return response
@app.route('/api/1.0/new_companies')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['start_date'])
def get_new_companies(records, **kwargs):
days = int(flask.request.args.get('days') or reports.DEFAULT_DAYS_COUNT)
start_date = int(time.time()) - days * 24 * 60 * 60
result = {}
for record in records:
company_name = record.company_name
date = record.date
if company_name not in result or result[company_name] > date:
result[company_name] = date
response = list(({'name': company_name,
'date': result[company_name],
'date_str': helpers.format_date(result[company_name])})
for company_name in result
if result[company_name] >= start_date)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_companies(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_companies(),
'company_name',
finalize_handler=finalize_handler)
@app.route('/api/1.0/stats/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_modules(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_modules(),
'module', finalize_handler=finalize_handler)
def get_core_engineer_branch(user, modules):
is_core = None
for (module, branch) in (user.get('core') or []):
if module in modules:
is_core = branch
if branch == 'master': # master is preferable, but stables are ok
break
return is_core
@app.route('/api/1.0/stats/engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_engineers(records, metric_filter, finalize_handler, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
if finalize_handler:
record = finalize_handler(record)
user = vault.get_user_from_runtime_storage(record['id'])
record['core'] = get_core_engineer_branch(user, modules)
return record
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_user_ids(),
'user_id', 'author_name',
finalize_handler=postprocessing)
@app.route('/api/1.0/stats/engineers_extended')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['metric'])
def get_engineers_extended(records, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
record = decorators.mark_finalize(record)
if not (record['mark'] or record['review'] or record['commit'] or
record['email'] or record['patch']):
return
user = vault.get_user_from_runtime_storage(record['id'])
record['company'] = helpers.get_current_company(user)
record['core'] = get_core_engineer_branch(user, modules)
return record
def record_processing(result, record, param_id):
result_row = result[getattr(record, param_id)]
record_type = record.record_type
result_row[record_type] = result_row.get(record_type, 0) + 1
if record_type == 'mark':
decorators.mark_filter(result, record, param_id, {})
result = {}
for record in records:
user_id = record.user_id
if user_id not in result:
result[user_id] = {'id': user_id, 'mark': 0, 'review': 0,
'commit': 0, 'email': 0, 'patch': 0,
'metric': 0}
record_processing(result, record, 'user_id')
result[user_id]['name'] = record.author_name
response = result.values()
response = [item for item in map(postprocessing, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/distinct_engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_distinct_engineers(records, **kwargs):
result = {}
for record in records:
result[record.user_id] = {
'author_name': record.author_name,
'author_email': record.author_email,
}
return result
@app.route('/api/1.0/activity')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('activity')
@decorators.record_filter()
def get_activity_json(records, **kwargs):
start_record = int(flask.request.args.get('start_record') or 0)
page_size = int(flask.request.args.get('page_size') or
parameters.DEFAULT_RECORDS_LIMIT)
query_message = flask.request.args.get('query_message')
return helpers.get_activity(records, start_record, page_size,
query_message)
@app.route('/api/1.0/contribution')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('contribution')
@decorators.record_filter(ignore=['metric'])
def get_contribution_json(records, **kwargs):
return helpers.get_contribution_summary(records)
@app.route('/api/1.0/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['company'])
@decorators.jsonify()
@decorators.record_filter(ignore=['company'])
def get_companies_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
companies = set(company
for company in memory_storage.get_index_keys_by_record_ids(
'company_name', record_ids))
if kwargs['_params']['company']:
companies.add(memory_storage.get_original_company_name(
kwargs['_params']['company'][0]))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(companies)]
@app.route('/api/1.0/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['module'])
@decorators.jsonify()
@decorators.record_filter(ignore=['module'])
def get_modules_json(record_ids, **kwargs):
module_id_index = vault.get_vault()['module_id_index']
tags = parameters.get_parameter(kwargs, 'tag', plural_name='tags')
# all modules mentioned in records
module_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'module', record_ids)
add_modules = set([])
for module in six.itervalues(module_id_index):
if set(module['modules']) & module_ids:
add_modules.add(module['id'])
module_ids |= add_modules
# keep only modules with specified tags
if tags:
module_ids = set(module_id for module_id in module_ids
if ((module_id in module_id_index) and
(module_id_index[module_id].get('tag') in tags)))
result = []
for module_id in module_ids:
module = module_id_index[module_id]
result.append({'id': module['id'],
'text': module['module_group_name'],
'tag': module['tag']})
return sorted(result, key=operator.itemgetter('text'))
@app.route('/api/1.0/companies/<company_name>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('company')
def get_company(company_name, **kwargs):
memory_storage_inst = vault.get_memory_storage()
for company in memory_storage_inst.get_companies():
if company.lower() == company_name.lower():
return {
'id': company_name,
'text': memory_storage_inst.get_original_company_name(
company_name)
}
flask.abort(404)
@app.route('/api/1.0/modules/<module_id>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('module')
def get_module(module_id, **kwargs):
project_type = parameters.get_single_parameter(kwargs, 'project_type')
release = parameters.get_single_parameter(kwargs, 'release')
module = helpers.extend_module(module_id, project_type, release)
if not module:
flask.abort(404)
return module
@app.route('/api/1.0/members')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['release', 'project_type', 'module'])
@decorators.jsonify('members')
@decorators.record_filter(ignore=['release', 'project_type', 'module'])
def get_members(records, **kwargs):
response = []
for record in records:
record = vault.extend_record(record)
nr = dict([(k, record[k]) for k in
['author_name', 'date', 'company_name', 'member_uri']])
nr['date_str'] = helpers.format_date(nr['date'])
response.append(nr)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/bp')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_bpd(records, **kwargs):
result = []
for record in records:
if record.record_type in ['bpd', 'bpc']:
record = vault.extend_record(record)
mention_date = record.get('mention_date')
if mention_date:
date = helpers.format_date(mention_date)
else:
date = 'never'
result.append({
'date': date,
'status': record['lifecycle_status'],
'metric': record.get('mention_count') or 0,
'id': record['name'],
'name': record['name'],
'link': helpers.make_blueprint_link(record['module'],
record['name'])
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/languages')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['language'])
@decorators.jsonify()
@decorators.record_filter(ignore=['language'])
def get_languages_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
languages = set(r.value for r in memory_storage.get_records(record_ids))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(languages)]
@app.route('/api/1.0/stats/languages')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['language'])
def get_languages(records, **kwargs):
result = []
languages = collections.defaultdict(int)
for record in records:
if record.record_type in ['tr']:
languages[record.value] += record.loc
for lang, val in six.iteritems(languages):
result.append({
'id': lang,
'name': lang,
'metric': val,
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/users')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['user_id'])
@decorators.jsonify()
@decorators.record_filter(ignore=['user_id'])
def get_users_json(record_ids, **kwargs):
core_in = parameters.get_single_parameter(kwargs, 'core_in') or None
valid_modules = set()
if core_in:
core_in = set(core_in.split(','))
valid_modules = vault.resolve_project_types(
kwargs['_params']['project_type'])
valid_modules = set(m[0] for m in vault.resolve_modules(
valid_modules, kwargs['_params']['release']))
user_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'user_id', record_ids)
if kwargs['_params']['user_id']:
user_ids.add(kwargs['_params']['user_id'][0])
result = []
for user_id in user_ids:
user = vault.get_user_from_runtime_storage(user_id)
r = {'id': user_id, 'text': user.get('user_name') or user['user_id']}
add_flag = not core_in
if core_in and user.get('core'):
core_modules = [module_branch[0] for module_branch in user['core']
if (module_branch[1] in core_in and
module_branch[0] in valid_modules)]
if core_modules:
r['core'] = core_modules
if user['companies']:
r['company_name'] = helpers.get_current_company(user)
add_flag = True
if add_flag:
result.append(r)
result.sort(key=lambda x: x['text'])
return result
@app.route('/api/1.0/users/<user_id>')
@decorators.response()
@decorators.jsonify('user')
def get_user(user_id):
user = vault.get_user_from_runtime_storage(user_id)
if not user:
flask.abort(404)
user = helpers.extend_user(user)
return user
@app.route('/api/1.0/releases')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_releases_json(**kwargs):
releases = [{'id': release['release_name'],
'text': release['release_name'].capitalize()}
for release in vault.get_vault()['releases'].values()]
releases.append({'id': 'all', 'text': 'All'})
releases.reverse()
return (releases, parameters.get_default('release'))
@app.route('/api/1.0/metrics')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_metrics_json(**kwargs):
return (sorted([{'id': m, 'text': t} for m, t in
six.iteritems(parameters.METRIC_LABELS)],
key=operator.itemgetter('text')),
parameters.get_default('metric'))
@app.route('/api/1.0/project_types')
@decorators.response()
@decorators.exception_handler()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_project_types_json(**kwargs):
return ([{'id': pt['id'], 'text': pt['title'],
'child': pt.get('child', False)}
for pt in vault.get_project_types()],
parameters.get_default('project_type'))
@app.route('/api/1.0/affiliation_changes')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('affiliation_changes')
def get_company_changes(**kwargs):
start_days = str(flask.request.args.get('start_days') or
utils.timestamp_to_date(int(time.time()) -
365 * 24 * 60 * 60))
end_days = str(flask.request.args.get('end_days') or
utils.timestamp_to_date(int(time.time())))
start_date = utils.date_to_timestamp_ext(start_days)
end_date = utils.date_to_timestamp_ext(end_days)
runtime_storage = vault.get_runtime_storage()
result = []
for user in runtime_storage.get_all_users():
companies = user.get('companies') or []
if len(companies) < 2:
continue
companies_iter = iter(companies)
company = companies_iter.next()
old_company_name = company['company_name']
date = company['end_date']
for company in companies_iter:
new_company_name = company['company_name']
if start_date <= date <= end_date:
result.append({
'user_id': user['user_id'],
'user_name': user['user_name'],
'old_company_name': old_company_name,
'new_company_name': new_company_name,
'date': date,
})
old_company_name = new_company_name
date = company['end_date']
return result
def _get_week(kwargs, param_name):
date_param = parameters.get_single_parameter(kwargs, param_name)
if date_param:
ts = utils.date_to_timestamp_ext(date_param)
else:
ts = vault.get_vault()[param_name]
return utils.timestamp_to_week(ts)
@app.route('/api/1.0/stats/timeline')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('timeline')
@decorators.record_filter(ignore=['release', 'start_date'])
def timeline(records, **kwargs):
# find start and end dates
metric = parameters.get_parameter(kwargs, 'metric')
start_date = int(parameters.get_single_parameter(kwargs, 'start_date')
or 0)
release_name = parameters.get_single_parameter(kwargs, 'release') or 'all'
releases = vault.get_vault()['releases']
if 'all' in release_name:
start_week = release_start_week = _get_week(kwargs, 'start_date')
end_week = release_end_week = _get_week(kwargs, 'end_date')
else:
release = releases[release_name]
start_week = release_start_week = utils.timestamp_to_week(
release['start_date'])
end_week = release_end_week = utils.timestamp_to_week(
release['end_date'])
now = utils.timestamp_to_week(int(time.time())) + 1
# expand start-end to year if needed
if release_end_week - release_start_week < 52:
expansion = (52 - (release_end_week - release_start_week)) // 2
if release_end_week + expansion < now:
end_week += expansion
else:
end_week = now
start_week = end_week - 52
# empty stats for all weeks in range
weeks = range(start_week, end_week)
week_stat_loc = dict((c, 0) for c in weeks)
week_stat_commits = dict((c, 0) for c in weeks)
week_stat_commits_hl = dict((c, 0) for c in weeks)
commits_handler = lambda record: 1
if 'translations' in metric:
commits_handler = lambda record: record.loc
if ('commits' in metric) or ('loc' in metric):
loc_handler = lambda record: record.loc
else:
loc_handler = lambda record: 0
# fill stats with the data
if 'person-day' in metric:
# special case for man-day effort metric
release_stat = collections.defaultdict(set)
all_stat = collections.defaultdict(set)
for record in records:
if start_week <= record.week < end_week:
day = utils.timestamp_to_day(record.date)
user_id = record.user_id
if record.release == release_name:
release_stat[day].add(user_id)
all_stat[day].add(user_id)
for day, users in six.iteritems(release_stat):
week = utils.timestamp_to_week(day * 24 * 3600)
week_stat_commits_hl[week] += len(users)
for day, users in six.iteritems(all_stat):
week = utils.timestamp_to_week(day * 24 * 3600)
week_stat_commits[week] += len(users)
else:
for record in records:
week = record.week
if start_week <= week < end_week:
week_stat_loc[week] += loc_handler(record)
week_stat_commits[week] += commits_handler(record)
if 'members' in metric:
if record.date >= start_date:
week_stat_commits_hl[week] += 1
else:
if record.release == release_name:
week_stat_commits_hl[week] += commits_handler(record)
if 'all' == release_name and 'members' not in metric:
week_stat_commits_hl = week_stat_commits
# form arrays in format acceptable to timeline plugin
array_loc = []
array_commits = []
array_commits_hl = []
for week in weeks:
week_str = utils.week_to_date(week)
array_loc.append([week_str, week_stat_loc[week]])
array_commits.append([week_str, week_stat_commits[week]])
array_commits_hl.append([week_str, week_stat_commits_hl[week]])
return [array_commits, array_commits_hl, array_loc]
@app.template_test()
def too_old(timestamp):
age = CONF.age_warn
now = time.time()
return timestamp + age < now
def main():
logging.register_options(CONF)
logging.set_defaults()
conf_file = os.getenv('STACKALYTICS_CONF')
if conf_file and os.path.isfile(conf_file):
CONF(default_config_files=[conf_file])
app.config['DEBUG'] = CONF.debug
LOG.info('Stackalytics.dashboard is configured via "%s"', conf_file)
else:
CONF(project='stackalytics')
logging.setup(CONF, 'stackalytics.dashboard')
app.run(CONF.listen_host, CONF.listen_port)
if __name__ == '__main__':
main()
|
apache-2.0
| -1,381,392,780,574,979,000
| 33.088112
| 79
| 0.621261
| false
| 3.736471
| false
| false
| false
|
olga-perederieieva/pyDEA
|
pyDEA/core/gui_modules/table_gui.py
|
1
|
57836
|
''' This module contains classes responsible for displaying input data
in a table (TableFrame and TableFrameWithInputOutputBox).
It also contains many classes necessary for TableFrameWithInputOutputBox.
Attributes:
CELL_WIDTH (int): constant that defined width of a cell in a table
'''
from tkinter import S, N, E, W, END, VERTICAL, HORIZONTAL, ALL
from tkinter import IntVar, DISABLED, StringVar, NORMAL
from tkinter.ttk import Frame, Entry, Scrollbar, Checkbutton
from pyDEA.core.gui_modules.scrollable_frame_gui import MouseWheel
from pyDEA.core.utils.dea_utils import is_valid_coeff, NOT_VALID_COEFF, VALID_COEFF
from pyDEA.core.utils.dea_utils import WARNING_COEFF, EMPTY_COEFF, CELL_DESTROY
from pyDEA.core.utils.dea_utils import CHANGE_CATEGORY_NAME, INPUT_OBSERVER
from pyDEA.core.utils.dea_utils import OUTPUT_OBSERVER, on_canvas_resize
from pyDEA.core.utils.dea_utils import validate_category_name, calculate_nb_pages
from pyDEA.core.gui_modules.custom_canvas_gui import StyledCanvas
from pyDEA.core.data_processing.read_data_from_xls import convert_to_dictionary
CELL_WIDTH = 10
class TableFrame(Frame):
''' This class is a base class that defines minimal functionality of
a table.
Attributes:
parent (Tk object): parent of this widget.
nb_rows (int): number of rows of the table.
nb_cols (int): number of columns of the table.
cells (list of list of Entry): list with Entry widgets
(or derivatives of Entry)
that describes the table and its content.
canvas (Canvas): canvas that holds all widgets
(it is necessary to make the table scrollable).
frame_with_table (Frame): frame that holds all widgets.
Args:
parent (Tk object): parent of this widget.
nb_rows (int, optional): number of rows of the table,
defaults to 20.
nb_cols (int, optional): number of columns of the table,
defaults to 5.
'''
def __init__(self, parent, data, nb_rows=20, nb_cols=5):
Frame.__init__(self, parent)
self.data = data
self.parent = parent
self.nb_rows = nb_rows
self.nb_cols = nb_cols
self.cells = []
self.canvas = None
self.frame_with_table = None
self.create_widgets()
def create_widgets(self):
''' Creates all widgets.
'''
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
yScrollbar = Scrollbar(self, orient=VERTICAL)
yScrollbar.grid(row=0, column=1, sticky=N+S)
xScrollbar = Scrollbar(self, orient=HORIZONTAL)
xScrollbar.grid(row=1, column=0, sticky=E+W)
canvas = StyledCanvas(self, yscrollcommand=yScrollbar.set,
xscrollcommand=xScrollbar.set, bd=0)
self.canvas = canvas
canvas.grid(row=0, column=0, sticky=N+S+W+E)
frame_with_table = Frame(canvas)
self.frame_with_table = frame_with_table
frame_with_table.grid(sticky=N+S+W+E, pady=15, padx=3)
for i in range(2, self.nb_rows + 2):
cols = []
for j in range(1, self.nb_cols + 1):
ent = self.create_entry_widget(frame_with_table)
ent.grid(row=i, column=j, sticky=N+S+E+W)
cols.append(ent)
self.cells.append(cols)
canvas.create_window(0, 0, window=frame_with_table, anchor='nw')
canvas.update_idletasks()
yScrollbar['command'] = canvas.yview
xScrollbar['command'] = canvas.xview
self._update_scroll_region()
MouseWheel(self).add_scrolling(canvas, yscrollbar=yScrollbar)
def create_entry_widget(self, parent):
''' Creates Entry widget.
Args:
parent (Tk object): parent of the Entry widget.
Returns:
Entry: created Entry widget.
'''
return Entry(parent, width=CELL_WIDTH)
def add_row(self):
''' Adds one row to the end of the table.
'''
self.cells.append([])
for j in range(self.nb_cols):
grid_row_index = self.nb_rows + 2
ent = self.create_entry_widget(self.frame_with_table)
ent.grid(row=grid_row_index, column=j + 1, sticky=N+S+E+W)
self.cells[self.nb_rows].append(ent)
self.nb_rows += 1
self._update_scroll_region()
def add_column(self):
''' Adds one column to the end of the table.
'''
for i in range(self.nb_rows):
grid_row_index = i + 2
ent = self.create_entry_widget(self.frame_with_table)
ent.grid(row=grid_row_index, column=self.nb_cols + 1,
sticky=N+S+E+W)
self.cells[i].append(ent)
self.nb_cols += 1
self._update_scroll_region()
def remove_row(self, row_index):
''' Removes row with a specified index from the table.
If row_index is zero or larger than the total number of rows,
no row is removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row was deleted, False otherwise.
'''
# forbid deleting first row
if self.should_remove_row(row_index):
for j in range(self.nb_cols):
self.before_cell_destroy(self.cells[row_index][j])
self.cells[row_index][j].destroy()
for i in range(row_index + 1, self.nb_rows):
self.cells[i][j].grid_remove()
self.cells[i][j].grid(row=i + 1)
self.cells.remove(self.cells[row_index])
self.nb_rows -= 1
self._update_scroll_region()
return True
return False
def should_remove_row(self, row_index):
''' Checks if row with a specified row index can be removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row_index is >= 1 and < total number of rows,
False otherwise.
'''
return row_index >= 1 and row_index < self.nb_rows
def remove_column(self, column_index):
''' Removes column with a specified index from the table.
If column index is zero or larger than the total number of
columns of the table, no column is removed.
Args:
column_index (int): index of the column to remove.
Returns:
bool: True if column was removed, False otherwise.
'''
# do not allow to delete first column
if column_index > 0 and column_index < self.nb_cols:
for i in range(self.nb_rows):
self.cells[i][column_index].destroy()
for j in range(column_index + 1, self.nb_cols):
self.cells[i][j].grid_remove()
self.cells[i][j].grid(column=j)
self.cells[i].remove(self.cells[i][column_index])
self.nb_cols -= 1
self._update_scroll_region()
return True
return False
def before_cell_destroy(self, cell):
''' This method is called before a table cell is destroyed.
In this class this method does nothing, but can be redefined
in children classes.
Args:
cell (Entry): cell that will be destroyed after call to
this method.
'''
pass
def clear_all_data(self):
''' Clears all data from all cells.
'''
for i in range(self.nb_rows):
for j in range(self.nb_cols):
self.before_cell_clear(self.cells[i][j])
self.cells[i][j].delete(0, END)
def before_cell_clear(self, cell):
''' This method is called before data is cleared from a given cell.
In this class this method does nothing, but can be redefined
in children classes.
Args:
cell (Entry): cell that will be cleared after call
to this method.
'''
pass
def _update_scroll_region(self):
''' Updates scroll region. This method must be called each
time table size or number of columns or rows change.
'''
# ensures that bbox will calculate border correctly
self.frame_with_table.update()
on_canvas_resize(self.canvas)
def read_coefficients(self):
''' Converts data stored as a list to a proper dictionary
necessary for constructing data instance.
'''
return convert_to_dictionary(self.data, self.check_value)
def check_value(self, count):
''' This method is called in read_coefficients method to check what
values must be returned for data instance construction.
In this class it always returns True and can be redefined in
children classes.
'''
return True
class TableFrameWithInputOutputBox(TableFrame):
''' Extends TableFrame with extra functionality necessary for data
modification and choosing input and output categories.
Attributes:
params_frame (ParamsFrame): frame with parameters, this
class communicates
with params_frame when data is loaded or modified.
combobox_text_var (StringVar): StringVar object that stores
categorical category.
panel_text_observer (PanelTextObserver): observer that adds star to
label frame of the parent of this widget.
This class notifies panel_text_observer
when data was modified.
frames (list of Frame): list of frames that hold Checkbuttons for
choosing input and output categories.
row_checkboxes (list of Checkbutton): list of Checkbuttons used
for removing rows.
col_checkboxes (list of Checkbutton): list of Checkbuttons used
for removing columns.
current_categories (list of str): list of current valid categories.
This class might modify this list.
str_var_for_input_output_boxes (StringVar): StringVar object that
is used for communication
with ParamsFrame. If the content of
str_var_for_input_output_boxes was modified,
it means that data was loaded from parameters file
and input and output categories
must be checked depending on parameters file.
data (list of list of str or float): input data, it might
be modified by this class.
Args:
parent (Tk object): parent of this widget.
params_frame (ParamsFrame): frame with parameters, this class
communicates
with params_frame when data is loaded or modified.
combobox_text_var (StringVar): StringVar object that stores
categorical category.
current_categories (list of str): list of current valid categories.
This class might modify this list.
str_var_for_input_output_boxes (StringVar): StringVar object
that is used for communication
with ParamsFrame. If the content of
str_var_for_input_output_boxes was modified,
it means that data was loaded from parameters file and input
and output categories
must be checked depending on parameters file.
if_text_modified_str (StringVar): StringVar object that is used
by PanelTextObserver, its content is modified when data
was modified.
data (list of list of str or float): input data, it might be
modified by this class.
nb_rows (int, optional): number of rows of the table, defaults
to 20.
nb_cols (int, optional): number of columns of the table,
defaults to 5.
'''
def __init__(self, parent, params_frame,
combobox_text_var, current_categories,
str_var_for_input_output_boxes,
if_text_modified_str, data,
nb_rows=20, nb_cols=5):
self.params_frame = params_frame
self.combobox_text_var = combobox_text_var
self.panel_text_observer = PanelTextObserver(if_text_modified_str)
self.frames = []
self.row_checkboxes = []
self.col_checkboxes = []
self.current_categories = current_categories
self.str_var_for_input_output_boxes = str_var_for_input_output_boxes
self.str_var_for_input_output_boxes.trace('w', self.on_load_categories)
super().__init__(parent, data, nb_rows, nb_cols)
def create_widgets(self):
''' Creates widgets of this class.
'''
super().create_widgets()
for column_index in range(self.nb_cols - 1):
self._create_input_output_box(column_index)
for row_index in range(self.nb_rows):
self.add_row_check_box(row_index)
# add observers to add * in the first column
for row_index in range(self.nb_rows):
self.cells[row_index][0].panel_text_observer = self.panel_text_observer
def create_entry_widget(self, parent):
''' Creates SelfValidatingEntry widget.
Args:
parent (Tk object): parent of the SelfValidatingEntry widget.
Returns:
SelfValidatingEntry: created SelfValidatingEntry widget.
'''
return SelfValidatingEntry(parent, self.data, self.cells, width=CELL_WIDTH)
def deselect_all_boxes(self):
''' Deselects all Checkbuttons used for choosing input and
output categories.
'''
for frame in self.frames:
for child in frame.winfo_children():
child.deselect()
def _create_input_output_box(self, column_index):
''' Creates Checkbuttons used for choosing input and output categories.
Args:
column_index (int): index of a column for which
Checkbuttons must be created.
'''
frame_for_btns = Frame(self.frame_with_table)
self.frames.append(frame_for_btns)
input_var = IntVar()
output_var = IntVar()
input_btn = ObserverCheckbutton(
frame_for_btns, input_var, output_var,
self.params_frame.input_categories_frame,
self.params_frame.output_categories_frame,
self.current_categories, self.cells, INPUT_OBSERVER,
self.params_frame.change_category_name,
self.data, self.combobox_text_var,
text='Input', state=DISABLED)
input_btn.grid(row=1, column=0, sticky=N+W)
output_btn = FollowingObserverCheckbutton(
frame_for_btns, output_var, input_var,
self.params_frame.output_categories_frame,
self.params_frame.input_categories_frame,
self.current_categories, self.cells, OUTPUT_OBSERVER,
self.params_frame.change_category_name,
self.data, self.combobox_text_var, input_btn,
text='Output', state=DISABLED)
output_btn.grid(row=2, column=0, sticky=N+W)
self._add_observers(input_btn, output_btn, column_index + 1)
var = IntVar()
column_checkbox = CheckbuttonWithVar(frame_for_btns, var)
column_checkbox.grid(row=0, column=0)
self.col_checkboxes.append((column_checkbox, var))
frame_for_btns.grid(row=1, column=column_index + 2, sticky=N)
def _add_observers(self, input_btn, output_btn, column_index):
''' Adds observers to newly created cells in a given column.
Args:
input_btn (ObserverCheckbutton): observer used to select
input categories.
output_btn (FollowingObserverCheckbutton): observer used
to select output categories.
column_index (int): index of the column to cells of
which observers must be added.
'''
names_modifier = DefaultCategoriesAndDMUModifier(
self.cells, self.current_categories)
for row_index in range(self.nb_rows):
self._add_observers_to_cell(self.cells[row_index][column_index],
names_modifier, input_btn, output_btn)
def _add_observers_to_cell(self, cell, names_modifier, input_btn,
output_btn):
''' Adds given observers to a given cell.
Args:
cell (SelfValidatingEntry): cell where observers must be added.
names_modifier (DefaultCategoriesAndDMUModifier): observer,
for details see DefaultCategoriesAndDMUModifier.
input_btn (ObserverCheckbutton): observer used to select
input categories.
output_btn (FollowingObserverCheckbutton): observer used to
select output categories.
'''
cell.observers.append(names_modifier) # IMPORTANT:
# this observer MUST be added first, it modifies data that
# is used by other observers!
cell.observers.append(input_btn)
cell.observers.append(output_btn)
cell.panel_text_observer = self.panel_text_observer
def on_load_categories(self, *args):
''' Selects input and output categories when data is loaded from
parameters file. Args are provided by the StringVar trace
methods and are ignored in this method.
'''
for frame in self.frames:
for child in frame.winfo_children():
try:
category = child.get_category()
except AttributeError:
pass
else:
if (child.observer_type == INPUT_OBSERVER and
child.get_category() in
self.str_var_for_input_output_boxes.input_categories):
child.select()
if (child.observer_type == OUTPUT_OBSERVER and
child.get_category() in
self.str_var_for_input_output_boxes.output_categories):
child.select()
def add_row_check_box(self, row_index):
''' Adds Checkbutton used for removing rows to a given row.
Args:
row_index (int): index of row to which Checkbutton
must be added.
'''
if row_index >= 1:
var = IntVar()
row_checkbox = Checkbutton(self.frame_with_table, variable=var)
self.row_checkboxes.append((row_checkbox, var))
row_checkbox.grid(row=row_index + 2, column=0)
else:
self.row_checkboxes.append((None, None))
def add_column(self):
''' Adds one column to the end of table.
'''
super().add_column()
self._create_input_output_box(self.nb_cols - 2)
def add_row(self):
''' Adds one row to the end of table.
Note: When data is spread across several pages, addition of
row must also update the display of data.
This functionality is implemented in TableModifierFrame.
'''
super().add_row()
self.add_row_check_box(self.nb_rows - 1)
names_modifier = DefaultCategoriesAndDMUModifier(
self.cells, self.current_categories)
for col in range(1, self.nb_cols):
input_btn, output_btn = self.get_check_boxes(col - 1)
self._add_observers_to_cell(self.cells[self.nb_rows - 1][col],
names_modifier,
input_btn, output_btn)
def get_check_boxes(self, column_index):
''' Gets Checkbuttons used for selecting input and output categories
for a given column.
Args:
column_index (int): index of the column for which Checkbuttons
must be returned.
Returns:
tuple of ObserverCheckbutton, FollowingObserverCheckbutton:
tuple of observers
or None, None if no observers were found.
'''
if column_index < 0 or column_index >= len(self.frames):
return None, None
input_btn = None
output_btn = None
for child in self.frames[column_index].winfo_children():
try:
observer_type = child.observer_type
except AttributeError:
pass
else:
if observer_type == INPUT_OBSERVER:
input_btn = child
elif observer_type == OUTPUT_OBSERVER:
output_btn = child
return input_btn, output_btn
def remove_column(self, column_index):
''' Removes column with a specified index from the table.
If column index is zero or larger than the total number of columns
of the table, no column is removed.
Args:
column_index (int): index of the column to remove.
Returns:
bool: True if column was removed, False otherwise.
'''
# we must record category name before removing column,
# because it will disappear
if column_index < len(self.cells[0]):
category_name = self.cells[0][column_index].get().strip()
else:
category_name = ''
if super().remove_column(column_index):
col = column_index - 1
if category_name:
self.params_frame.input_categories_frame.remove_category(
category_name)
self.params_frame.output_categories_frame.remove_category(
category_name)
if col < len(self.current_categories):
self.current_categories[col] = ''
# remove from data only if category is present
if self.data:
column_with_data_removed = False
for row_index in range(len(self.data)):
if column_index < len(self.data[row_index]):
self.data[row_index].pop(column_index)
column_with_data_removed = True
if column_with_data_removed:
for row in range(1, self.nb_rows):
for j in range(column_index, self.nb_cols):
self.cells[row][j].data_column -= 1
self.panel_text_observer.change_state_if_needed()
self.frames[col].destroy()
for i in range(col + 1, len(self.frames)):
self.frames[i].grid_remove()
self.frames[i].grid(column=i + 1)
self.frames.pop(col)
self.col_checkboxes.pop(col)
return True
return False
def remove_row(self, row_index):
''' Removes data row with a specified index from the table.
Row is not physically removed.
If row_index is zero or larger than the total number of rows,
no row is removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row was deleted, False otherwise.
'''
if self.should_remove_row(row_index):
if self.data:
nb_pages = calculate_nb_pages(len(self.data), self.nb_rows)
data_index = self.get_data_index(row_index)
nb_cols = len(self.cells[row_index])
if data_index != -1 and data_index < len(self.data):
nb_rows_to_change = min(self.nb_rows, len(self.data) + 1)
self.data.pop(data_index)
for row in range(row_index + 1, nb_rows_to_change):
for col in range(0, nb_cols):
if self.cells[row][col].data_row != -1:
self.cells[row][col].data_row -= 1
self.panel_text_observer.change_state_if_needed()
super().remove_row(row_index)
if (nb_pages > 1):
self.add_row()
else:
super().remove_row(row_index)
self.row_checkboxes[row_index][0].destroy()
for i in range(row_index + 1, len(self.row_checkboxes)):
self.row_checkboxes[i][0].grid_remove()
self.row_checkboxes[i][0].grid(row=i + 1)
self.row_checkboxes.pop(row_index)
return True
return False
def get_data_index(self, row_index):
for j in range(0, len(self.cells[row_index])):
if self.cells[row_index][j].data_row != -1:
return self.cells[row_index][j].data_row
return -1
def before_cell_destroy(self, cell):
''' This method is called before a table cell is destroyed.
Notifies observers if data is not empty.
Args:
cell (SelfValidatingEntry): cell that will be destroyed
after call to this method.
'''
info = cell.grid_info()
col = int(info['column'])
row = int(info['row'])
if len(self.data) == 0:
cell.notify_observers(CELL_DESTROY, row, col)
def load_visible_data(self):
''' Displays data in the table. First, it adds more rows to fill
the frame, second, it displays data that fits the table.
'''
self.add_rows_to_fill_visible_frame()
self.display_data()
def display_data(self, start_row=0):
''' Displays data starting from a given data row.
This method is usually called by NavigationForTableFrame when
data spans across
several pages and users clicks on page navigation buttons.
Args:
start_row (int, optional): index of input data starting
from which data should be displayed, defaults to 0.
'''
nb_data_rows = len(self.data)
nb_displayed_rows = 0
for row_index in range(start_row, nb_data_rows):
values = self.data[row_index]
# do not insert data that is not visible
if nb_displayed_rows + 1 >= self.nb_rows:
return
for column_index, coeff in enumerate(values):
# row_index + 1 - first row has categories
self._display_one_cell(nb_displayed_rows, column_index,
coeff, row_index,
column_index, False)
row_index += 1
nb_displayed_rows += 1
if len(self.data) > 0:
nb_cols = len(self.data[0])
else:
nb_cols = self.nb_cols
nb_rows = self.nb_rows - 1 # -1 because we add +1 to row_index
while nb_displayed_rows < nb_rows:
for column_index in range(nb_cols):
self._display_one_cell(nb_displayed_rows, column_index, '',
-1, -1, False)
nb_displayed_rows += 1
def _display_one_cell(self, row_index, column_index, value_to_dispay,
data_row, data_col, modify_data=True):
''' Displays data in a cell and sets cell's fields to proper values.
Args:
row_index (int): index of a row where the cell is.
column_index (int): index of a column where the cell is.
value_to_dispay (str): new cell value_to_dispay.
data_row (int): row index of input data.
data_col (int): column index of input data.
modify_data (bool, optional): True if data was modified and
observers
must be notified, False otherwise.
'''
cell_row_index = row_index + 1
self.cells[cell_row_index][column_index].modify_data = modify_data
self.cells[cell_row_index][column_index].text_value.set(value_to_dispay)
self.cells[cell_row_index][column_index].data_row = data_row
self.cells[cell_row_index][column_index].data_column = data_col
def add_rows_to_fill_visible_frame(self):
''' Adds rows to table to fill the frame. Usually adds a bit more and
scroll gets activated.
Exact number of added rows depends on operating system, height of
widgets and screen size.
'''
self.canvas.update_idletasks()
frame_height = self.canvas.winfo_height()
while self.canvas.bbox(ALL)[3] <= frame_height - 20:
self.add_row()
self._update_scroll_region()
def check_value(self, count):
''' This method is called in read_coefficients method to check what
values must be returned for data instance construction.
Args:
count (int): data column index.
Returns:
bool: True if the category in the given column index is not
an empty string,
False otherwise.
'''
if self.current_categories[count]:
return True
return False
def clear_all_data(self):
''' Clears all data from all cells and clears input data.
'''
self.data.clear()
super().clear_all_data()
self.current_categories.clear()
# reset modify data back to true
for cell_row in self.cells:
for cell in cell_row:
cell.modify_data = True
def before_cell_clear(self, cell):
''' This method is called before data is cleared from a given cell.
It sets fields of the given cell to initial values.
Args:
cell (SelfValidatingEntry): cell that will be cleared after
call to this method.
'''
cell.modify_data = False
cell.data_row = -1
cell.data_column = -1
class ObserverCheckbutton(Checkbutton):
''' This class implements Checkbutton for choosing input/output categories.
Attributes:
var (IntVar): variable that is set to 1 when Checkbutton is
selected, to 0 otherwise.
opposite_var (IntVar): variable of the other Checkbutton that
must deselected if this Checkbutton is selected.
parent (Tk object): frame that holds this Checkbutton.
Warning:
it is important for the parent to be gridded in the
same column
as the entire column of table entries is gridded, because
this class uses parent grid column index to determine
the column where the category name can be read from.
category_frame (CategoriesCheckBox): frame that displays selected
input or output categories.
Note:
if this Checkbutton is used to select input categories,
category_frame must be CategoriesCheckBox object that
displays selected input categories.
if this Checkbutton is used to select output categories,
category_frame must be CategoriesCheckBox object that
displays selected output categories.
opposite_category_frame (CategoriesCheckBox): frame that displays
selected input or output categories. If category_frame
displays input categories, then opposite_category_frame
must display output categories, and vice versa.
current_categories (list of str): list of categories. This class
might modify this list by removing invalid categories and
adding the valid ones.
cells (list of list of SelfValidatingEntry): all entry widgets
collected in list.
data (list of list of str or float): input data.
observer_type (int): describes type of the observer, for possible
values see dea_utils.
change_category_name (callable function): this function is
called when name of a category was changed.
combobox_text_var (StringVar): variable of the combobox used for
selecting categorical category.
Arguments are the same as attributes.
'''
def __init__(self, parent, var, opposite_var, category_frame,
opposite_category_frame,
current_categories, cells,
observer_type, change_category_name, data,
combobox_text_var, *args, **kw):
Checkbutton.__init__(self, parent, variable=var,
command=self._process, *args, **kw)
self.var = var
self.opposite_var = opposite_var
self.parent = parent
self.category_frame = category_frame
self.opposite_category_frame = opposite_category_frame
self.current_categories = current_categories
self.cells = cells
self.data = data
self.observer_type = observer_type
self.change_category_name = change_category_name
self.combobox_text_var = combobox_text_var
def _process(self):
''' This method is called when user clicks on Checkbutton.
Makes sure that the same category can be only input or only
output, but not both, and that selected category cannot also
be selected as a categorical category.
'''
category_name = self.get_category()
if self.var.get() == 1:
self.opposite_var.set(0)
if category_name:
self.category_frame.add_category(category_name)
self.opposite_category_frame.remove_category(category_name)
if category_name == self.combobox_text_var.get():
self.combobox_text_var.set('')
elif category_name:
self.category_frame.remove_category(category_name)
def deselect(self):
''' Deselects Checkbutton.
Note:
method _process() is not called in this case.
'''
self.var.set(0)
def select(self):
''' Selects Checkbutton.
Note:
method _process() is not called in this case.
'''
self.var.set(1)
def change_state_if_needed(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data or categories were modified.
Also modifies current_categories if needed.
This widget becomes disabled if invalid category name value or input
data value were provided by user.
Args:
entry (SelfValidatingEntry): Entry widget whose content was
modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid value,
we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
if entry_state == CHANGE_CATEGORY_NAME:
old_name = ''
internal_col = col - 2
if internal_col < len(self.current_categories):
old_name = self.current_categories[internal_col]
category_name = validate_category_name(
self.cells[0][col - 1].text_value.get().strip(),
internal_col, self.current_categories)
if category_name:
index = len(self.current_categories)
while index <= internal_col:
self.current_categories.append('')
index += 1
self.current_categories[internal_col] = category_name
if old_name:
# change category name in params_frame
self.change_category_name(old_name.strip(), category_name)
self.change_state_based_on_data(entry, entry_state, row, col)
entry.config(foreground='black')
else:
# if category name is empty, disable
self.disable(internal_col, old_name)
entry.config(foreground='red')
else:
self.change_state_based_on_data(entry, entry_state, row, col)
def change_state_based_on_data(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data was modified.
Args:
entry (SelfValidatingEntry): Entry widget whose content
was modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
internal_col = col - 2
# IMPORTANT: read from cells, not from current_categories, they might
# be empty at this stage
category_name = self.cells[0][col - 1].text_value.get().strip()
nb_rows = len(self.data)
if nb_rows == 0:
self.disable(internal_col, category_name)
return
elif len(self.data[0]) == 0:
self.disable(internal_col, category_name)
return
has_one_valid_entry = False
for row_index in range(nb_rows):
# can happen if some values are empty
while col - 1 >= len(self.data[row_index]):
self.data[row_index].append('')
try:
# col - 1 - first column contains DMU names
data_elem = float(self.data[row_index][col - 1])
except ValueError:
state = NOT_VALID_COEFF
else:
state = is_valid_coeff(data_elem)
if state == NOT_VALID_COEFF:
has_one_valid_entry = False
self.disable(internal_col, category_name)
return
elif state == VALID_COEFF or state == WARNING_COEFF:
has_one_valid_entry = True
if has_one_valid_entry:
self.config(state=NORMAL)
if category_name:
if category_name not in self.current_categories:
assert internal_col < len(self.current_categories)
self.current_categories[internal_col] = category_name
if entry_state != CELL_DESTROY and self.var.get() == 1:
self.category_frame.add_category(category_name)
return
def disable(self, internal_col, category_name):
''' Disables Checkbutton.
Args:
internal_col (int): internal column index.
category_name (str): name of category.
'''
self.config(state=DISABLED)
if category_name:
if self.var.get() == 1:
self.category_frame.remove_category(category_name)
if self.opposite_var.get() == 1:
self.opposite_category_frame.remove_category(category_name)
if category_name in self.current_categories:
assert(internal_col < len(self.current_categories))
self.current_categories[internal_col] = ''
if category_name == self.combobox_text_var.get():
self.combobox_text_var.set('')
def get_category(self):
''' Finds category name stored in the corresponding Entry widget
based on where parent of Checkbutton was gridded.
Returns:
str: category name, might be empty string.
'''
info = self.parent.grid_info()
# convertion to int is necessary for Windows
# for some reason in Windows grid info is stored as str
col = int(info['column'])
return self.cells[0][col - 1].text_value.get().strip()
class FollowingObserverCheckbutton(ObserverCheckbutton):
''' This class follows state of another ObserverCheckbutton that is
used to select input or output categories.
This class is used in order to skip checking if data is valid
second time. The first Checkbutton has already performed this check.
Attributes:
var (IntVar): variable that is set to 1 when Checkbutton
is selected, to 0 otherwise.
opposite_var (IntVar): variable of the other Checkbutton that
must deselected if this Checkbutton is selected.
parent (Tk object): frame that holds this Checkbutton.
Warning:
it is important for the parent to be gridded in the
same column as the entire column of table entries
is gridded, because this class uses parent grid column
index to determine the column
where the category name can be read from.
category_frame (CategoriesCheckBox): frame that displays
selected input or output categories.
Note:
if this Checkbutton is used to select input categories,
category_frame must be CategoriesCheckBox object that
displays selected input categories.
if this Checkbutton is used to select output categories,
category_frame
must be CategoriesCheckBox object that displays selected
output categories.
opposite_category_frame (CategoriesCheckBox): frame that displays
selected input or output categories. If category_frame displays
input categories, then opposite_category_frame
must display output categories, and vice versa.
current_categories (list of str): list of categories. This class
might modify this list by removing invalid categories and
adding the valid ones.
cells (list of list of SelfValidatingEntry): all entry widgets
collected in list.
data (list of list of str or float): input data.
observer_type (int): describes type of the observer, for
possible values see dea_utils.
change_category_name (callable function): this function is called
when name of a category was changed.
combobox_text_var (StringVar): variable of the combobox used for
selecting categorical category.
main_box (ObserverCheckbutton): Checkbutton that changes state
first. This Checkbutton changes its state to the same state
as main_box, but does not do extra things
that have been already performed by main_box
(changes to current_categories, for example).
'''
def __init__(self, parent, var, opposite_var, category_frame,
opposite_category_frame,
current_categories, cells,
observer_type, params_frame, data,
combobox_text_var, main_box, *args, **kw):
super().__init__(parent, var, opposite_var, category_frame,
opposite_category_frame, current_categories, cells,
observer_type, params_frame, data,
combobox_text_var, *args, **kw)
self.main_box = main_box
def change_state_if_needed(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data was modified depending on
the state of main_box.
Args:
entry (SelfValidatingEntry): Entry widget whose content
was modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
category_name = self.get_category()
if str(self.main_box.cget('state')) == DISABLED:
self.disable(col - 2, category_name)
else:
self.config(state=NORMAL)
if entry_state != CELL_DESTROY and self.var.get() == 1:
self.category_frame.add_category(category_name)
class DefaultCategoriesAndDMUModifier(object):
''' This class is responsible for adding automatic category and DMU names
if user starts typing data without providing such names first.
Attributes:
cells (list of list of SelfValidatingEntry): list of all Entry
widgets with data.
current_categories (list of str): list of categories.
Args:
cells (list of list of SelfValidatingEntry): list of all Entry
widgets with data.
current_categories (list of str): list of categories.
'''
def __init__(self, cells, current_categories):
self.cells = cells
self.current_categories = current_categories
def change_state_if_needed(self, entry, entry_state, row, col):
''' Writes automatic category and DMU names if they were not
specified before.
Args:
entry (SelfValidatingEntry): Entry widget the content
of which was modified.
entry_state (int): constant that describes entry state,
for details see dea_utils module.
row (int): row index of entry widget. It is the real grid value,
we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
if (entry_state != EMPTY_COEFF and entry_state != CELL_DESTROY and
entry_state != CHANGE_CATEGORY_NAME):
internal_row_index = row - 2
dmu_name = self.cells[internal_row_index][0].text_value.get().strip()
if not dmu_name:
self.cells[internal_row_index][0].text_value.set(
'DMU{0}'.format(internal_row_index))
category_name = self.cells[0][col - 1].text_value.get().strip()
if not category_name:
internal_col_index = col - 2
name = 'Category{0}'.format(internal_col_index)
if internal_col_index >= len(self.current_categories):
index = len(self.current_categories) - 1
while index != internal_col_index:
self.current_categories.append('')
index += 1
# category name MUST be written first, because next line calls
# ObserverCheckbutton
self.cells[0][col - 1].text_value.set(name)
class SelfValidatingEntry(Entry):
''' This class implement Entry widget that knows how to highlight
invalid data. It also notifies other widgets if the content of
Entry changes. Other widgets must implement method
change_state_if_needed().
Such widgets should be appended to the list of listening widgets
called observers.
Attributes:
text_value (StringVar): textvariable of Entry widget that
calls method on_text_changed when the content on Entry changes.
observers (list of objects that implement method change_state_if_needed):
list of widgets or other objects that must be notified if the
content of Entry changes.
data_row (int): row index in data table which should be modified
when the content of Entry changes.
data_column (int): column index in data table which should be
modified when the content of Entry changes.
data (list of list of srt or float): data that will be modified.
modify_data (bool): True if data should be modified, False
otherwise. It is usually set to False when data is uploaded
from file.
panel_text_observer (PanelTextObserver): object that is notified
when data changes.
This object is responsible for adding star to file name when
data was modified.
all_cells (list of list of SelfValidatingEntry): refernce where all cells
are stored.
Warning: all cells must be created before any cell content
can be modified.
Args:
parent (Tk object): parent of this Entry widget.
data (list of list of srt or float): input data that will
be modified.
all_cells (list of list of SelfValidatingEntry): refernce where all cells
are stored.
Warning: all cells must be created before any cell content
can be modified.
'''
def __init__(self, parent, data, all_cells, *args, **kw):
self.text_value = StringVar(master=parent)
self.text_value.trace("w", self.on_text_changed)
super().__init__(parent, *args, **kw)
self.config(textvariable=self.text_value)
self.observers = []
self.all_cells = all_cells
self.data_row = -1
self.data_column = -1
self.data = data
self.modify_data = True
self.panel_text_observer = None
def on_text_changed(self, *args):
''' This method is called each time the content of Entry is modified.
It highlights invalid data, changes data if needed and notifies
other objects when data was changed.
Args are provided by StringVar trace method, but are not used.
'''
info = self.grid_info()
# phisical grid indeces
col = int(info['column'])
row = int(info['row'])
self.notify_panel_observer()
if row == 2: # possibly name of category is modified
self.notify_observers(CHANGE_CATEGORY_NAME, row, col)
elif col == 1 and row > 2: # column with DMU names, strings are allowed
self.modify_data_if_needed(row, col)
elif col > 1 and row > 2: # everything left
self.modify_data_if_needed(row, col)
try:
value = float(self.text_value.get().strip())
except ValueError:
self.modify_data = True
self.config(foreground='red')
if len(self.text_value.get().strip()) == 0:
self.notify_observers(EMPTY_COEFF, row, col)
else:
self.notify_observers(NOT_VALID_COEFF, row, col)
return
text_status = is_valid_coeff(value)
if text_status == NOT_VALID_COEFF:
self.config(foreground='red')
elif text_status == WARNING_COEFF:
self.config(foreground='orange')
else:
self.config(foreground='black')
self.notify_observers(text_status, row, col)
self.modify_data = True
def modify_data_if_needed(self, row, col):
''' Modifies data if modify_data is set to True.
Adds empty strings to data when user modifies Entry for which
data_row or/and data_column are equal to -1. Updates data with new
values entered by user.
Args:
row (int): row where Entry is gridded
col (int): column where Entry is gridded
'''
if self.modify_data:
if self.data_row != -1 and self.data_column != -1:
self.data[self.data_row][self.data_column] = self.text_value.get().strip()
else:
row_for_data = len(self.data)
added_rows = False
# -2 because row is physical grid index, not cell index
row_count = len(self.all_cells) - 1
for cells_row in reversed(self.all_cells):
if cells_row[0].data_row != -1:
break
row_count -= 1
if row_count == -1:
row_count = 0
while row_count < row - 2:
self.data.append([])
added_rows = True
row_count += 1
if added_rows:
self.data_row = len(self.data) - 1
else:
assert row_count >= row - 2
self.data_row = len(self.data) - 1 - (row_count - (row - 2))
col_for_data = len(self.data[self.data_row])
added_cols = False
max_nb_col = 0
nb_rows = len(self.data)
for r_ind in range(nb_rows):
row_len = len(self.data[r_ind])
if row_len > max_nb_col:
max_nb_col = row_len
max_nb_col = max(max_nb_col, col)
c_ind = col_for_data
while c_ind < max_nb_col:
self.data[self.data_row].append('')
grid_col = len(self.data[self.data_row])
self.all_cells[row - 2][grid_col - 1].data_row = self.data_row
self.all_cells[row - 2][grid_col - 1].data_column = c_ind
self.notify_observers(EMPTY_COEFF, row, grid_col)
added_cols = True
c_ind += 1
if (col_for_data < col):
col_for_data += 1
if added_cols:
for r_ind in range(nb_rows):
while len(self.data[r_ind]) < max_nb_col:
self.data[r_ind].append('')
grid_col = len(self.data[r_ind])
if r_ind >= self.data_row - (row - 3): # 3 is the first physical
# row with data on the page
grid_row = row - (self.data_row - r_ind)
self.all_cells[grid_row - 2][grid_col - 1].data_row = r_ind
self.all_cells[grid_row - 2][grid_col - 1].data_column = grid_col - 1
self.notify_observers(EMPTY_COEFF, grid_row, grid_col)
self.data_column = col_for_data - 1
else:
self.data_column = col - 1
self.data[self.data_row][self.data_column] = self.text_value.get().strip()
def notify_panel_observer(self):
''' Notifies panel observer that data was modified.
'''
if self.panel_text_observer is not None and self.modify_data is True:
self.panel_text_observer.change_state_if_needed()
def notify_observers(self, entry_state, row, col):
''' Notifies all observers stored in list of observers that data
was modified.
Args:
entry_state (int): state of the Entry widget that describes if
data is valid after modification, for possible values see
dea_utils module.
row (int): row where Entry is gridded.
col (int): column where Entry is gridded.
'''
for observer in self.observers:
observer.change_state_if_needed(self, entry_state, row, col)
class PanelTextObserver(object):
''' This class changes StringVar value that is traced in other classes.
Attributes:
if_text_modified_str (StringVar): StringVar object that
changes value when this observer is notified.
'''
def __init__(self, if_text_modified_str):
self.if_text_modified_str = if_text_modified_str
def change_state_if_needed(self):
''' Changes value of internal StringVar object.
'''
self.if_text_modified_str.set('*')
class CheckbuttonWithVar(Checkbutton):
''' Custom Checkbutton widget that provides deselect method.
Attributes:
var (IntVar): 0 if not selected, 1 otherwise.
Args:
parent (Tk object): parent of this widget.
var (IntVar): variable that controls if Checkbutton is selected.
'''
def __init__(self, parent, var, *args, **kw):
super().__init__(parent, variable=var, *args, **kw)
self.var = var
def deselect(self):
''' Deselects Checkbutton.
'''
self.var.set(0)
|
mit
| -219,998,321,544,119,700
| 42.22571
| 101
| 0.564147
| false
| 4.477857
| false
| false
| false
|
sbesson/zeroc-ice
|
py/test/Ice/admin/Client.py
|
1
|
1029
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
slice_dir = Ice.getSliceDir()
if not slice_dir:
print(sys.argv[0] + ': Slice directory not found.')
sys.exit(1)
Ice.loadSlice("'-I" + slice_dir + "' Test.ice")
import AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def run(args, communicator):
AllTests.allTests(communicator)
return True
try:
communicator = Ice.initialize(sys.argv)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
gpl-2.0
| 4,657,194,139,776,982,000
| 22.386364
| 72
| 0.578231
| false
| 3.741818
| true
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/distutils/cygwinccompiler.py
|
1
|
9736
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: cygwinccompiler.py
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
__revision__ = '$Id$'
import os
import sys
import copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos + 6:msc_pos + 10]
if msc_ver == '1300':
return [
'msvcr70']
if msc_ver == '1310':
return [
'msvcr71']
if msc_ver == '1400':
return [
'msvcr80']
if msc_ver == '1500':
return [
'msvcr90']
raise ValueError('Unknown MS Compiler version %s ' % msc_ver)
class CygwinCCompiler(UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = '.o'
static_lib_extension = '.a'
shared_lib_extension = '.dll'
static_lib_format = 'lib%s%s'
shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" % (
status, details))
if status is not CONFIG_H_OK:
self.warn("Python's pyconfig.h doesn't seem to support your compiler. Reason: %s. Compiling may fail because of undefined preprocessor macros." % details)
self.gcc_version, self.ld_version, self.dllwrap_version = get_versions()
self.debug_print(self.compiler_type + ': gcc %s, ld %s, dllwrap %s\n' % (
self.gcc_version,
self.ld_version,
self.dllwrap_version))
if self.ld_version >= '2.10.90':
self.linker_dll = 'gcc'
else:
self.linker_dll = 'dllwrap'
if self.ld_version >= '2.13':
shared_option = '-shared'
else:
shared_option = '-mdll -static'
self.set_executables(compiler='gcc -mcygwin -O -Wall', compiler_so='gcc -mcygwin -mdll -O -Wall', compiler_cxx='g++ -mcygwin -O -Wall', linker_exe='gcc -mcygwin', linker_so='%s -mcygwin %s' % (
self.linker_dll, shared_option))
if self.gcc_version == '2.91.57':
self.dll_libraries = [
'msvcrt']
self.warn('Consider upgrading to a newer version of gcc')
else:
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
try:
self.spawn(['windres', '-i', src, '-o', obj])
except DistutilsExecError as msg:
raise CompileError, msg
else:
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs)
except DistutilsExecError as msg:
raise CompileError, msg
def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
libraries.extend(self.dll_libraries)
if export_symbols is not None and (target_desc != self.EXECUTABLE or self.linker_dll == 'gcc'):
temp_dir = os.path.dirname(objects[0])
dll_name, dll_extension = os.path.splitext(os.path.basename(output_filename))
def_file = os.path.join(temp_dir, dll_name + '.def')
lib_file = os.path.join(temp_dir, 'lib' + dll_name + '.a')
contents = [
'LIBRARY %s' % os.path.basename(output_filename),
'EXPORTS']
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents), 'writing %s' % def_file)
if self.linker_dll == 'dllwrap':
extra_preargs.extend(['--output-lib', lib_file])
extra_preargs.extend(['--def', def_file])
else:
objects.append(def_file)
if not debug:
extra_preargs.append('-s')
UnixCCompiler.link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, debug, extra_preargs, extra_postargs, build_temp, target_lang)
return
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in self.src_extensions + ['.rc', '.res']:
raise UnknownFileError, "unknown file type '%s' (from '%s')" % (
ext, src_name)
if strip_dir:
base = os.path.basename(base)
if ext == '.res' or ext == '.rc':
obj_names.append(os.path.join(output_dir, base + ext + self.obj_extension))
else:
obj_names.append(os.path.join(output_dir, base + self.obj_extension))
return obj_names
class Mingw32CCompiler(CygwinCCompiler):
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__(self, verbose, dry_run, force)
if self.ld_version >= '2.13':
shared_option = '-shared'
else:
shared_option = '-mdll -static'
if self.gcc_version <= '2.91.57':
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall', compiler_so='gcc -mno-cygwin -mdll -O -Wall', compiler_cxx='g++ -mno-cygwin -O -Wall', linker_exe='gcc -mno-cygwin', linker_so='%s -mno-cygwin %s %s' % (
self.linker_dll, shared_option,
entry_point))
self.dll_libraries = []
self.dll_libraries = get_msvcr()
CONFIG_H_OK = 'ok'
CONFIG_H_NOTOK = 'not ok'
CONFIG_H_UNCERTAIN = 'uncertain'
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
from distutils import sysconfig
import string
if string.find(sys.version, 'GCC') >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
else:
fn = sysconfig.get_config_h_filename()
try:
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError as exc:
return (
CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
if string.find(s, '__GNUC__') >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
return (
CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion', 'r')
out_string = out.read()
out.close()
result = re.search('(\\d+\\.\\d+(\\.\\d+)*)', out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v', 'r')
out_string = out.read()
out.close()
result = re.search('(\\d+\\.\\d+(\\.\\d+)*)', out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version', 'r')
out_string = out.read()
out.close()
result = re.search(' (\\d+\\.\\d+(\\.\\d+)*)', out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
|
unlicense
| 2,730,727,172,121,840,000
| 38.104418
| 243
| 0.578677
| false
| 3.582046
| true
| false
| false
|
USCLiquidPropulsionLaboratory/Engine-sizing-snake
|
Blue_Steel.py
|
1
|
38183
|
## GOX-kerosene sim
#@ Author Juha Nieminen
#import sys
#sys.path.insert(0, '/Users/juhanieminen/Documents/adamrocket')
import RocketComponents as rc
from physical_constants import poise, inches, Runiv, gallons, lbm, \
gearth, atm, psi, lbf
from numpy import pi, linspace, cos, radians, sqrt, exp, log, array, full, ceil
from scipy import optimize as opt
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import Flows1D as flows
#DESIGN VARIABLES____________________________________________________________________________________
# nominal parameters
Preg_N2 = 1300*psi # regulated N2 outlet pressure [Pa]
mdot_fuel_nom = 0.2 # This is only for cooling jacket pressure drop purposes [kg/s]
Pdrop_jacket_nom= 1*psi # Cooling jacket pressure drop at mdot_nominal [Pa]
OF_nom = 2.25 # Oxidizer-to-fuel ratio. This has only effect on initial guesses during solving
# Pressurant tank dimensions
Vprestank = 0.053 # N2 pressurant tank volume [m3]
# Propellant tank dimensions
Vfueltank = 4*gallons # fuel tank volume [m3]
Voxtank = 4*0.053 # ox tank volume [m3]
# Tubing
d_presfuel_tube = 1.0*inches # pressurant tank -> fuel tank tube diameter [m]
L_presfuel_tube = 0.5 # pressurant tank -> fuel tank tube length [m]
d_oxtube = 0.87*inches # ox tank -> manifold tube diameter [m]
L_oxtube = 2.4 # ox tank -> manifold tube length [m]
d_fueltube = 0.87*inches # fuel tank -> manifold tube diameter [m]
L_fueltube = 3.0 # fuel tank -> manifold tube length [m]
roughness = 0.005 # epsilon/diameter, dimensionless
# Valves
Cv_ox_check = 4.7 # oxidizer check valve flow coefficient, dimensionless
Pcrack_ox_check = 10*psi # oxidizer check valve opening pressure [Pa]
Cv_pres_check = 1.8 # nitrogen check valve flow coefficient, dimensionless
Pcrack_pres_check = 0.33*psi # nitrogen check valve opening pressure [Pa]
Cv_pres_valve = 8.8 # nitrogen solenoid valve flow coefficient, dimensionless
Cv_ox_valve = 8.8 # oxidizer solenoid valve flow coefficient, dimensionless
Cv_fuel_valve = 8.8 # fuel solenoid valve flow coefficient, dimensionless
# Injector
cd_oxInjector = 0.767 # orifice discharge coefficient
diameter_oxInjectorHoles = 2.54e-3 #number xx drill # ox orifice diameter [m]
#length_oxHole = 0.005 # ox orifice length [m]
numOxInjectorHoles = 24 # number of ox orifices in the injector
area_oxInjector = numOxInjectorHoles*pi*diameter_oxInjectorHoles**2/4 # total ox flow area [m2]
cd_fuelInjector = 0.767 # orifice discharge coefficient
diameter_fuelInjectorHoles = 0.508e-3 #number xx drill # fuel orifice diameter [m]
numFuelHoles = 59 # number of fuel orifices in the injector
area_fuelInjector = numFuelHoles*pi*diameter_fuelInjectorHoles**2/4 # total fuel flow area [m2]
# Define initial/nominal conditions in the chamber (obtained from CEA code assuming OFratio = 2.25)
TfireInit = 293 # initial flame temperature [K]
Pfire = 1*atm # initial chamber pressure [Pa]
gammaFireInit = 1.148 # dimensionless
ga = gammaFireInit
mbarFireInit = 21.87 # combustion products' initial molecular mass [kg/kmol]
RfireInit = Runiv/mbarFireInit # combustion products' initial specific gas constant [J/kgK]
Pambient = atm # ambient pressure [Pa]
# Nozzle and chamber
d_nozzleThroat = 1.0*inches # throat diameter [m]
A_nozzleThroat = pi*d_nozzleThroat**2/4 # throat area [m2]
area_ratio = 7.46 # nozzle exit-to-throat area ratio
A_nozzleExit = area_ratio*A_nozzleThroat # nozzle exit area [m2]
d_nozzleExit = sqrt(4*A_nozzleExit/pi) # nozzle exit diameter [m]
Dchamber = 0.08 # chamber diameter [m]
Achamber = pi*Dchamber**2/4 # chamber cross sectional area [m2]
Lchamber = 0.14 # chamber length [m]
Vchamber = Achamber*Lchamber # chamber volume [m3]
Lstar = Vchamber/A_nozzleThroat # chamber characteristic length [m]
Mc_nom = flows.getIsentropicMs(A_nozzleThroat, Achamber, gammaFireInit)[0] # nominal chamber Mach number
print("throat diameter is", '%.1f'%(d_nozzleThroat*1000), 'mm')
print("exit diameter is", '%.1f'%(d_nozzleExit*1000), 'mm')
print("chamber volume is", '%.5f'%Vchamber, "m3")
print("chamber Lstar is", '%.2f'%Lstar, "m")
print("chamber Mach_nom is", '%.2f'%Mc_nom)
# INITIAL CONDITIONS____________________________________________________________________________________________
#Define initial conditions in the tanks
TfuelPresStart = 293 # Fuel pressurant (=nitrogen) temp [K]
FFfueltankStart = 0.9 # Fuel tank fill fraction (Vfuel/Vtank)
PfuelPrestankStart = 2640*psi - Preg_N2*Vfueltank*(1-FFfueltankStart)/Vprestank # Fuel pressurant tank pressure once fueltank has been pressurized [Pa]
ToxStart = 293 # Oxidizer (GOX) temp [K]
PoxtankStart = 1600*psi # Oxidizer tank pressure [Pa]
TfuelStart = 293 # Fuel temp [K]
PfueltankStart = Preg_N2 -1*psi # Fuel tank pressure [Pa] (-10psi helps convergence on first timestep)
# initialize propellants
nitrogen = rc.NitrogenFluid()
GOX = rc.GOXFluid()
kerosene = rc.Kerosene()
#initialize nozzle and chamber
nozzle = rc.ConvergingDivergingNozzle(A_nozzleExit, A_nozzleThroat)
mdot_init_noz = nozzle.getmdot(gammaFireInit, GOX.R, Pfire, TfireInit, atm)
chamber = rc.GOXKeroCombustionChamber(nozzle, Vchamber, TfireInit, ga, mbarFireInit, Pfire, atm, mdot_init_noz)
#initialize injector orifices
ox_orifice = rc.GasOrifice(area_oxInjector, cd_oxInjector, GOX.gamma, GOX.R)
fuel_orifice = rc.LiquidOrifice(area_fuelInjector, cd_fuelInjector )
#initialize pressurant tanks
fuelprestank = rc.IdealgasTank(nitrogen, Vprestank, TfuelPresStart, PfuelPrestankStart)
#initialize propellant tanks
oxtank = rc.IdealgasTank(GOX, Voxtank, ToxStart, PoxtankStart)
fueltank = rc.LiquidPropellantTank(nitrogen, kerosene, Vfueltank, TfuelStart, TfuelPresStart,\
PfueltankStart, FFfueltankStart, Preg_N2)
#initialize pressure regulators
N2_regu = rc.PressureRegulator(Preg_N2, nitrogen)
#initialize solenoids
fuelSole = rc.IncompressibleFlowSolenoid( Cv_fuel_valve)
oxSole = rc.CompressibleFlowSolenoid( Cv_ox_valve, GOX)
presSole = rc.CompressibleFlowSolenoid( Cv_pres_valve, nitrogen)
#initialize check valves
ox_check = rc.CompressibleFlowCheckValve( Cv_ox_check, Pcrack_ox_check, GOX)
pres_check = rc.CompressibleFlowCheckValve( Cv_pres_check, Pcrack_pres_check, nitrogen)
#initialize tubing
ox_tube = rc.RoughStraightCylindricalTube(d_oxtube, L_oxtube, roughness, True)
fuel_tube = rc.RoughStraightCylindricalTube(d_fueltube, L_fueltube, roughness, True)
presfuel_tube = rc.RoughStraightCylindricalTube(d_presfuel_tube, L_presfuel_tube, roughness, True)
#initialize cooling jacket
jacket = rc.CoolingJacket(mdot_fuel_nom, Pdrop_jacket_nom)
#initialize arrays for various data time histories
T_chamber = [chamber.T] # combustion chamber temperature [K]
Pchamber = [chamber.get_P_inlet()] # combustion chamber pressure [Pa]
Pexit = [nozzle.getPe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit pressure [Pa]
Mexit = [nozzle.getMe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit Mach number
cmass = [chamber.m] # resident propellant mass in combustion chamber [kg]
mdot_nozzle = [nozzle.getmdot(gammaFireInit, RfireInit, chamber.get_P_inlet(), chamber.T, chamber.Pa)] # mass flow out of the nozzle [kg/s]
Poxtank = [oxtank.getPtank()] # ox tank pressure [Pa]
Toxtank = [oxtank.getTtank()] # ox tank temperature [K]
mox = [oxtank.getM()] # oxidizer mass in tank [kg]
Pfueltank = [fueltank.getPtank()] # fuel tank pressure [Pa]
Tfueltank = [fueltank.getTpres()] # pressurant temperature in fuel tank[K]
mPresFueltank = [fueltank.getMpres()] # pressurant mass in fuel tank [kg]
mfuel = [fueltank.getMprop()] # fuel mass in tank [kg]
FFfueltank = [fueltank.getFF()] # fuel tank fill fraction defined as Vfuel/(Vfueltank)
TfuelPres = [fuelprestank.getTtank()] # temperature in fuel pressurant tank [K]
PfuelPres = [fuelprestank.getPtank()] # pressure in fuel pressurant tank [Pa]
mfuelPres = [fuelprestank.getM()] # pressurant mass in fuel pressurant tank [Pa]
time = [0] # time array [s]
mdot_ox = [0] # ox mass flow out of the tank [kg/s]
P1ox = [0] # ox tank presssure [Pa]
P2ox = [0] # ox check valve outlet pressure [Pa]
P3ox = [0] # ox flow solenoid outlet pressure [Pa]
P4ox = [0] # ox injector inlet pressure [Pa]
T1ox = [0] # ox tank temp [K]
T2ox = [0] # ox check valve outlet temp [K]
T3ox = [0] # ox flow solenoid outlet temp [K]
T4ox = [0] # ox injector inlet temp [K]
mdot_fuel = [0] # fuel mass flow out of the tank [kg/s]
rooFuel = fueltank.propellant.density # fuel density, assumed constant [kg/m3]
P1fuel = [0] # fuel tank presssure [Pa]
P2fuel = [0] # fuel solenoid outlet pressure [Pa]
P3fuel = [0] # fuel cooling jacket inlet pressure [Pa]
P4fuel = [0] # fuel injector inlet pressure [Pa]
mdot_fuel_pres = [0] # fuel pressurant mass flow rate [kg/s]
P3pres = [0] # pressurant pressure at check valve outlet [kg/s]
P4pres = [0] # pressurant pressure at solenoid valve outlet [kg/s]
mTotal = [0] # propellant mass in the system [kg]
mprs = [mfuelPres[0]+mPresFueltank[0]] # pressurant mass in the system [kg]
OFratio = [0] # oxidizer to fuel mass flow ratio
Isp = [0] # specific impulse [s]
Thrust = [nozzle.getThrust(chamber.get_P_inlet(), Pambient, gammaFireInit) ] # rocket thrust [N]
#SIMULATE_______________________________________________________________________________________________________
# using orifices as follows: ejecting GOX from manifold to chamber, fuel liq-to-liq from manifold to chamber
print("")
print("STARTING SIM...")
print("")
print("mOxStart is", '%.2f'%mox[0], "kg")
print("mKerostart is", mfuel[0], "kg")
print("mN2start in N2 tank is", '%.2f'%mfuelPres[0], "kg")
print("mN2start in fuel tank is", '%.2f'%(fueltank.getMpres()), "kg")
# The first step is to solve oxidizer and fuel mass flow rates from the tank to combustion chamber.
# definitions:
# P1ox = GOX tank pressure
# P2ox = check valve outlet pressure
# P3ox = ox valve outlet pressure
# P4ox = injector inlet, pressure
# (P1ox-P2ox) = ox check valve pressure drop, eq 1
# (P2ox-P3ox) = ox flow solenoid pressure drop, eq 2
# (P3ox-P4ox) = ox tubing pressure drop, eq 3
# (P4ox-Pchamber) = ox injector pressure drop, eq 4
# P1pres = Nitrogen tank pressure
# P2pres = Regulation pressure
# P3pres = Check valve outlet pressure
# P4pres = Nitrogen solenoid outlet
# P5pres = Nitrogen tubing outlet = fuel tank pressure
# (P2pres-P3pres) = Nitrogen check valve pressure drop
# (P3pres-P4pres) = Nitrogen solenoid valve pressure drop
# (P4pres-P5pres) = Nitrogen tubing pressure drop
# P1fuel = fuel tank pressure
# P2fuel = fuel valve outlet pressure
# P3fuel = cooling jacket inlet pressure
# P4fuel = injector inlet pressure
# (P1fuel-P2fuel) = fuel valve pressure drop, eq1
# (P2fuel-P3fuel) = fuel tubing pressure drop, eq2
# (P3fuel-P4fuel) = cooling jacket pressure drop, eq3
# (P4fuel-Pchamber) = injector pressure drop, eq4
# In the case of oxidizer, P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is the mass flow rate. The four equations are check valve/solenoid/tubing/injector pressure drops. These equations are defined in oxfunks method below, and underlying physics are in RocketComponents.py under their respective classes.
# With pressurant, P2 (regulation pressure) and P5 (fuel tank pressure) are known, so one must solve for P3 and P4. The third unknown is pressurant mass flow rate. Equations to be solved are pressure drops over the check valve, solenoid valve, and the tubing.
# With fuel P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is mass flow rate.
# fsolve requires sensible initial guesses for all unknowns. They are established by guessing the mass flow rate, because all other pressures trickle down from that.
timestep_small = 1e-5 # seconds, used during initial transient
timestep_nom = 1e-4 # seconds, used after 0.01 seconds of simulation time
t_transient = 0.01 # seconds, estimated time of initial transient
t_simulation = 3 # seconds
if t_simulation <= t_transient:
simsteps = int(ceil(t_simulation/timestep_small))
else:
simsteps = int(ceil( t_transient/timestep_small + (t_simulation-t_transient)/timestep_nom ))
print("Sim time is", t_simulation, "s, number of simsteps is", simsteps)
i=0
for i in range(0, simsteps):
if time[i] < t_transient:
timestep = timestep_small # use shorter timestep during initial transient
else: timestep = timestep_nom # proceed with nominal timestep
#while True:
print("i=", i)
P1ox = Poxtank[i]
P1fuel = Pfueltank[i]
Pchamb = Pchamber[i]
mu_ox = GOX.getViscosity(P1ox, Toxtank[i])
roo_ox = GOX.getDensity(P1ox, Toxtank[i])
Tox = Toxtank[i]
Tpres = TfuelPres[i]
mu_fuel = kerosene.mu
mu_N2_fuel = nitrogen.getViscosity(Preg_N2, TfuelPres[i])
roo_N2_fuel = nitrogen.getDensity(Preg_N2, TfuelPres[i])
if i==0: # First guesses. Based on choked flow at ox injector (multiplied by 0.7 to adjust for better convergence)
mdot_injector_choked = ox_orifice.getMdot(P1ox, Pfire, Tox)
'''
mdot_checkvalve_choked = ox_check.getMdot(P1ox, Pfire, GOX.roo_std, roo_ox, Tox)
if mdot_injector_choked >= mdot_checkvalve_choked: #check valve is choking
print("check valve is initially choking")
mdot_ox_guess = mdot_checkvalve_choked
print("mdot_ox_guess is", mdot_ox_guess)
P4ox_guess = ox_orifice.getUpstreamPressure(Pchamb, Tox, mdot_ox_guess)
P3ox_guess = P4ox_guess + ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P2ox_guess = P3ox_guess + oxSole.getPressureDrop(mdot_ox_guess, P2ox_guess, roo_ox)
else:
'''
mdot_ox_guess = mdot_injector_choked *0.7
P2ox_guess = P1ox - ox_check.getPressureDrop(mdot_ox_guess, P1ox, GOX.roo_std, roo_ox, Tox)
P3ox_guess = P2ox_guess - oxSole.getPressureDrop(mdot_ox_guess, P2ox_guess, roo_ox)
P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox is", P2ox/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P5ox_guess is", P5ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_ox_guess/OF_nom
P2fuel_guess = P1fuel - fuelSole.getPressureDrop(mdot_fuel_guess, rooFuel)
P3fuel_guess = P2fuel_guess - fuel_tube.getPressureDrop(mdot_fuel_guess, mu_fuel, rooFuel)
P4fuel_guess = P3fuel_guess - jacket.getPressureDrop(mdot_fuel_guess)
mdot_pres_guess = mdot_fuel_guess*roo_N2_fuel/rooFuel #volumetric flowrates of fuel and pressurant are the same
P3pres_guess = Preg_N2 - pres_check.getPressureDrop(mdot_pres_guess, Preg_N2, nitrogen.roo_std, roo_N2_fuel, Tpres)
P4pres_guess = P3pres_guess - presSole.getPressureDrop(mdot_pres_guess, P3pres_guess, roo_N2_fuel)
P5pres_guess = P4pres_guess - presfuel_tube.getPressureDrop(mdot_pres_guess, mu_N2_fuel, roo_N2_fuel)
#print("mdot_pres_guess is is", mdot_pres_guess, "kg/s")
#print("P3pres_guess is is", P3pres_guess/psi, "psi")
#print("P4pres_guess is is", P4pres_guess/psi, "psi")
#print("P5pres_guess is is", P5pres_guess/psi, "psi")
#print("mdot_fuel_guess is", mdot_fuel_guess)
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P5fuel_guess is is", P5fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
else : # guesses for further steps. Use values from previous timestep
mdot_ox_guess = mdot_ox[i-1] #ox_orifice.getMdot(Preg_ox, Pchamb, Tox)
#P3ox_guess = P2ox - oxSole.getPressureDrop(mdot_ox_guess, P2ox,roo_ox)
#P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P2ox_guess = P2ox[i-1]
P3ox_guess = P3ox[i-1]
P4ox_guess = P4ox[i-1]
#print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox_guess is", P2ox_guess/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_fuel[i-1] #mdot_ox_guess/OF_nom*1
P2fuel_guess = P2fuel[i-1]
P3fuel_guess = P3fuel[i-1]
P4fuel_guess = P4fuel[i-1]
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_pres_guess = mdot_fuel_pres[i-1]
P3pres_guess = P3pres[i-1]
P4pres_guess = P4pres[i-1]
initial_ox_guesses = [P2ox_guess, P3ox_guess, P4ox_guess, mdot_ox_guess]
initial_fuel_guesses= [P2fuel_guess, P3fuel_guess, P4fuel_guess, mdot_fuel_guess]
initial_pres_guesses= [P3pres_guess, P4pres_guess, mdot_pres_guess]
def oxfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("nyt TAALLA")
#print("P3 as U0 is", P3/psi, "psi")
#print("P4 as U1 is", P4/psi, "psi")
#print("P5 as U2 is", P5/psi, "psi")
#print("mdot as U3 is", mdot, "kg/s")
#print("mdot is", mdot, "kg/s")
#print("P4ox is", P4/psi, "psi")
#print("Pchamb is", Pchamb/psi, "psi")
#out = [ P2ox - P3 - ox_check.getPressureDrop(mdot, P2ox, GOX.roo_std, roo_ox, Tox) ]
out = [ mdot - ox_check.getMdot(P1ox, P2, GOX.roo_std, roo_ox, Tox) ]
out.append( P2 - P3 - oxSole.getPressureDrop( mdot, P2, roo_ox) )
out.append( P3 - P4 - ox_tube.getPressureDrop(mdot, mu_ox, roo_ox) )
out.append( mdot - ox_orifice.getMdot(P4, Pchamb, Tox) )
#print("oxoutti", out)
return out
ox_solution = opt.fsolve(oxfunks, initial_ox_guesses) # iterates until finds a solution or goes bust
#print("ox solution is", ox_solution)
mdot_ox_new = ox_solution[3]
#print("mdot_ox_nyyy is", mdot_ox_new, "kg/s")
def fuelfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("U is", U)
#print("fuelmdot is", mdot)
out = [ mdot - fuelSole.getMdot(P1fuel, P2, rooFuel, kerosene.P_crit, kerosene.P_vapor) ]
out.append( P2 - P3 - fuel_tube.getPressureDrop(mdot, mu_fuel, rooFuel) )
out.append( P3 - P4 - jacket.getPressureDrop(mdot) )
out.append( P4 - Pchamb - fuel_orifice.getPressureDrop(mdot, rooFuel) )
#print("fueloutti", out)
return out
fuel_solution = opt.fsolve(fuelfunks, initial_fuel_guesses)
#print("fuel solution is", fuel_solution)
mdot_fuel_new = fuel_solution[3]
# Now that fuel mass flow rate out has been solved, intermediate state (=no N2 inflow yet) of the fuel tank can be established:
fueltank.update(TfuelPres[i], 0, mdot_fuel_new, timestep)
Pfuel_intermediate = fueltank.getPtank()
Pfuel_eff = (Pfuel_intermediate + P1fuel)/2 # average of pressures before and after ejection of fuel from tank; incoming nitrogen will see this 'effective' pressure in the tank
# Next, nitrogen flow into the void created by ejected fuel is calculated
def presfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
mdot = U[2]
out = [mdot - pres_check.getMdot(Preg_N2, P3, nitrogen.roo_std, roo_N2_fuel, Tpres) ]
#out.append( P3 - P4 - presSole.getPressureDrop(mdot, P3, roo_N2_fuel) )
out.append( mdot - presSole.getMdot(P3, P4, roo_N2_fuel) )
out.append( P4 - Pfuel_eff - presfuel_tube.getPressureDrop(mdot, mu_N2_fuel, roo_N2_fuel) )
#out.append( mdot - presfuel_tube.getMdot(P4, Pfuel_eff, mu_N2_fuel, roo_N2_fuel) )
#print("presoutti", out)
return out
pres_solution = opt.fsolve(presfunks, initial_pres_guesses)
#print("pres solution is", pres_solution)
mdot_pres_new = pres_solution[2]
#print("mdot_pres_new is", mdot_pres_new, "kg/s")
# Determine final conditions in prop tanks now that N2 inflow has been determined
oxtank.update(mdot_ox_new, timestep)
fueltank.update(TfuelPres[i], mdot_pres_new, 0, timestep)
# ...and fuel pressurant tank
fuelprestank.update(mdot_pres_new, timestep)
# Check if OFratio is within limits. If not, stop simulation (no CEA data beyond OFratio 0.5-3.0)
if (mdot_ox_new/mdot_fuel_new) < 0.5 or (mdot_ox_new/mdot_fuel_new) > 8.0:
print("OF ratio out of range, terminate (",(mdot_ox_new/mdot_fuel_new),")")
print("mdot_ox_new is", mdot_ox_new, "kg/s")
print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
break
# Update chamber parameters:
chamber.update(mdot_ox_new, mdot_fuel_new, Pambient, timestep) # mdot_ox_in, mdot_fuel_in, Pambient, timestep
#print("mdot_ox_new is", mdot_ox_new, "kg/s")
#print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
#print("kammiopaine on", chamber.get_P_inlet()/psi, "psi" )
# Check if ox or fuel tank will empty during this timestep. If so, stop simulation.
if oxtank.getPtank() < chamber.get_P_inlet()*1.2:
print("Ox tank reached chamber pressure x1.2 (=empty) after", i, " iterations, ie", time[-1], "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fueltank.getMprop() < 0:
print("Fuel tank empty after", i, " iterations, ie", itime[-1], "seconds")
print("remaining GOX", mox[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fuelprestank.getPtank() < Preg_N2:
print("Out of fuel pressurant after", i, " iterations, ie", time[-1], "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining GOX", mox[i], "kg")
break
#update mass flow time histories. These are values during the CURRENT time step.
if i==0:
P2ox = [ox_solution[0]]
P3ox = [ox_solution[1]]
P4ox = [ox_solution[2]]
mdot_ox = [ox_solution[3]]
P2fuel = [fuel_solution[0]]
P3fuel = [fuel_solution[1]]
P4fuel = [fuel_solution[2]]
mdot_fuel = [fuel_solution[3]]
P3pres = [pres_solution[0]]
P4pres = [pres_solution[1]]
mdot_fuel_pres = [pres_solution[2]]
OFratio = [ mdot_ox[0]/mdot_fuel[0] ]
else:
P2ox.append( ox_solution[0])
P3ox.append( ox_solution[1])
P4ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
#print("mdot_pres_new solution is", pres_solution[2], "kg/s")
mdot_fuel_pres.append( pres_solution[2])
#print("i is= ", i)
OFratio.append( mdot_ox[i]/mdot_fuel[i])
#update the rest of the time histories. System will have these values during the NEXT time step.
Poxtank.append( oxtank.getPtank())
Toxtank.append( oxtank.getTtank())
mox.append( oxtank.getM())
Pfueltank.append( fueltank.getPtank())
Tfueltank.append( fueltank.getTpres())
mPresFueltank.append( fueltank.getMpres())
mfuel.append( fueltank.getMprop())
FFfueltank.append( fueltank.getFF())
TfuelPres.append( fuelprestank.getTtank())
PfuelPres.append( fuelprestank.getPtank())
mfuelPres.append( fuelprestank.getM())
#mdot_fuel_pres.append( mdot_pres_new)
Pchamber.append( chamber.get_P_inlet() )
Pexit.append( nozzle.getPe(Pchamber[i+1], chamber.gamma, Pambient) )
Mexit.append( nozzle.getMe(Pchamber[i+1], chamber.gamma, Pambient) )
cmass.append( chamber.m)
mdot_nozzle.append( nozzle.getmdot(chamber.gamma, Runiv/chamber.mbar, chamber.get_P_inlet(),\
chamber.T, chamber.Pa) )
Thrust.append( nozzle.getThrust(chamber.get_P_inlet(), Pambient, chamber.gamma) )
T_chamber.append( chamber.T)
Isp.append( Thrust[i+1]/(mdot_ox[i] + mdot_fuel[i])/9.81 )
mTotal.append(mox[i+1] + mfuel[i+1] + cmass[i+1] + mdot_nozzle[i]*timestep )
mprs.append( mPresFueltank[i+1] + mfuelPres[i+1] )
time.append( time[i]+timestep )
#dP_ox_check = (Poxtank[-1] - P2ox[-1])
#print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi")
i+=1
# Print some values
bindex = 1001
print("")
print("mdot_nozzle initial is", '%.3f'%mdot_nozzle[bindex], "kg/s")
print("initial thrust is", '%.1f'%Thrust[bindex], "N")
print("initial Isp is", '%.1f'%Isp[bindex], "s")
print("initial T_chamber is",'%.1f'%T_chamber[bindex], "K")
print("initial P_chamber is", '%.1f'%(Pchamber[bindex]/psi), "psi")
print("initial P_exit is", '%.3f'%(Pexit[bindex]/atm), "atm")
print("initial thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[bindex], atm, chamber.get_gamma(OFratio[bindex], Pchamber[bindex])) )
print("initial mdot_N2 is", '%.3f'%mdot_fuel_pres[bindex], "kg/s")
print("initial N2 flow rate is", '%.3f'%(mdot_fuel_pres[bindex]/roo_N2_fuel*1000/3.78*60), "GPM")
print("initial mdot_ox is", '%.3f'%mdot_ox[bindex], "kg/s")
print("initial mdot_fuel is", '%.3f'%mdot_fuel[bindex], "kg/s")
print("initial O/F ratio is", '%.3f'%OFratio[bindex])
print("initial ox tube velocity is", '%.1f'%(mdot_ox[bindex]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("initial fuel tube velocity is", '%.1f'%(mdot_fuel[bindex]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("initial ox injection velocity is", '%.1f'%(mdot_ox[bindex]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("initial fuel injection velocity is", '%.1f'%(mdot_fuel[bindex]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("initial ox injector P_drop", '%.1f'%((P4ox[bindex]-Pchamber[bindex])/Pchamber[bindex]*100), "% of Pchamber")
print("initial fuel injector P_drop", '%.1f'%((P4fuel[bindex]-Pchamber[bindex])/Pchamber[bindex]*100), "% of Pchamber")
print("")
print("")
print("mdot_nozzle steady state (end of sim) is", '%.3f'%mdot_nozzle[-1], "kg/s")
print("SS thrust is", '%.1f'%Thrust[-1], "N")
print("SS Isp is", '%.1f'%Isp[-1], "s")
print("SS T_chamber is",'%.1f'%T_chamber[-1], "K")
print("SS P_chamber is", '%.1f'%(Pchamber[-1]/psi), "psi")
print("SS P_exit is", '%.3f'%(Pexit[-1]/atm), "atm")
print("SS thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[-1], atm, chamber.get_gamma(OFratio[-1], Pchamber[-1])) )
print("SS mdot_N2 is", '%.3f'%mdot_fuel_pres[-1], "kg/s")
print("SS N2 flow rate is", '%.3f'%(mdot_fuel_pres[-1]/roo_N2_fuel*1000/3.78*60), "GPM")
print("SS mdot_ox is", '%.3f'%mdot_ox[-1], "kg/s")
print("SS mdot_fuel is", '%.3f'%mdot_fuel[-1], "kg/s")
print("SS O/F ratio is", '%.3f'%OFratio[-1])
print("SS ox tube velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("SS fuel tube velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("SS ox injection velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("SS fuel injection velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("SS ox injector P_drop", '%.1f'%((P4ox[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("SS fuel injector P_drop", '%.1f'%((P4fuel[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("")
# See what check valves are doing
dP_ox_check = (Poxtank[-1] - P2ox[-1])
dP_N2_check = (Preg_N2 - P3pres[-1])
if dP_ox_check < ox_check.Pcrack:
print("Warning: Pressure drop over ox check valve (",'%.1f'%(dP_ox_check/psi),"psi) is less than its cracking pressure (",ox_check.Pcrack/psi,"psi) and will remain shut")
else:
print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi, enough to keep it flowing")
if dP_N2_check < pres_check.Pcrack:
print("Warning: Pressure drop over N2 check valve(",'%.1f'%(dP_N2_check/psi),"psi) is less than its cracking pressure (",pres_check.Pcrack/psi,"psi) and will remain shut")
else:
print("N2 check valve pressure drop is", '%.1f'%(dP_N2_check/psi), "psi, enough to keep it flowing")
# following time histories are one element shorter than the rest, so the last calculated value will be duplicated to match the length of other time histories.
P2ox.append( ox_solution[0])
P3ox.append( ox_solution[1])
P4ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
mdot_fuel_pres.append( pres_solution[2])
OFratio.append( mdot_ox[i]/mdot_fuel[i])
# plot time histories
plt.ion()
plt.figure(1)
plt.plot(time, array(Poxtank)/psi, label='ox tank')
plt.figure(1)
plt.plot(time,array(P2ox)/psi, label='Pcheck_out')
plt.figure(1)
plt.plot(time,array(P3ox)/psi, label='Psolenoid_out')
plt.figure(1)
plt.plot(time,array(P4ox)/psi, label='Pinj_in')
plt.figure(1)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(1)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Ox pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('psia')
plt.show()
Preg_N2_array = full((1, len(time)), Preg_N2/psi)
plt.figure(2)
plt.plot(time, array(PfuelPres)/psi, label='fuelpres tank')
plt.figure(2)
plt.plot(time, Preg_N2_array.T, label="P_regulation")
plt.figure(2)
plt.plot(time,array(P3pres)/psi, label='N2 check valve out')
plt.figure(2)
plt.plot(time,array(P4pres)/psi, label='N2 solenoid valve out')
plt.figure(2)
plt.plot(time,array(Pfueltank)/psi, label='fuel tank')
plt.figure(2)
plt.plot(time,array(P2fuel)/psi, label='Pvalve_out')
plt.figure(2)
plt.plot(time,array(P3fuel)/psi, label='Pjacket_in')
plt.figure(2)
plt.plot(time,array(P4fuel)/psi, label='Pinj_in')
plt.figure(2)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(2)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Fuel pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('Psia')
plt.show()
plt.figure(3)
plt.plot(time,Toxtank, label='Ox tank')
plt.figure(3)
plt.plot(time,Tfueltank, label='Fuel tank')
plt.figure(3)
plt.plot(time,TfuelPres, label='fuel pressurant tank')
plt.title('Tank temperatures')
plt.legend( loc='lower left')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(4)
plt.plot(time,mdot_ox, label='mdot_ox')
plt.figure(4)
plt.plot(time,mdot_fuel, label='mdot_fuel')
plt.figure(4)
plt.plot(time,mdot_nozzle, label='mdot_nozzle')
plt.figure(4)
plt.plot(time,mdot_fuel_pres, label='mdot_fuel_pres')
plt.title('Mass flows')
plt.xlabel('Time [s]')
plt.ylabel('kg/s')
plt.legend( loc='upper right')
plt.show()
plt.figure(5)
plt.plot(time,FFfueltank, label='fuel tank')
plt.title('Fill fractions in fuel tank (Vfuel_/Vtank)')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.legend( loc='upper right')
plt.show()
plt.figure(6)
plt.plot(time, OFratio)
plt.title('O/F ratio')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.show()
plt.figure(7)
plt.plot(time,mox, label='GOX')
plt.figure(7)
plt.plot(time,mfuel, label='fuel')
plt.figure(7)
plt.plot(time,mfuelPres, label='fuel pressurant')
plt.figure(7)
plt.plot(time,mPresFueltank, label='pressurant in fuel tank')
plt.figure(7)
plt.plot(time,mprs, label='total pressurant')
plt.title('Fluid masses')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.legend( loc='upper right')
plt.show()
plt.figure(8)
plt.plot(time, cmass)
plt.title('Resident mass in chamber')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.show()
plt.figure(9)
plt.plot(time, Thrust)
plt.title('Thrust')
plt.xlabel('Time [s]')
plt.ylabel('N')
plt.show()
plt.figure(10)
plt.plot(time, Isp)
plt.title('Isp')
plt.xlabel('Time [s]')
plt.ylabel('s')
plt.show()
plt.figure(11)
plt.plot(time, T_chamber)
plt.title('T chamber')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(12)
plt.plot(time, Mexit)
plt.title('Exit Mach number')
plt.xlabel('Time [s]')
plt.ylabel('-')
plt.show()
plt.figure(13)
y1 = PfuelPres[-1]/psi
y2 = Preg_N2/psi
y3 = P3pres[-1]/psi
y4 = P4pres[-1]/psi
y5 = Pfueltank[-1]/psi
y6 = P2fuel[-1]/psi
y7 = P3fuel[-1]/psi
y8 = P4fuel[-1]/psi
y9 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Pressurant tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Check valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Pressurant solenoid")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Pressurant tubing")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Fuel tank")
plt.plot( [6, 7], [y5, y6], linewidth=2, label="Fuel solenoid")
plt.plot( [7, 8], [y6, y7], linewidth=2, label="Piping")
plt.plot( [8, 9], [y7, y8], linewidth=2, label="Cooling jacket")
plt.plot( [9, 10], [y8, y9], linewidth=2, label="Fuel injector")
plt.plot( [10, 11], [y9, y9], linewidth=2, label="Chamber")
plt.title('Fuel line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
plt.figure(14)
y1 = Poxtank[-1]/psi
y2 = P2ox[-1]/psi
y3 = P3ox[-1]/psi
y4 = P4ox[-1]/psi
y5 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Ox tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Check valve")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Ox solenoid")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Tubing")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Ox injector")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Chamber")
plt.title('Ox line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
|
mit
| 7,787,178,057,936,864,000
| 45.792892
| 340
| 0.601917
| false
| 2.736544
| false
| false
| false
|
uclmr/inferbeddings
|
scripts/synth/create_table_iterative.py
|
1
|
6170
|
import numpy as np
from collections import defaultdict
results = '/Users/tdmeeste/workspace/inferbeddings/logs/synth/synth_paper_iterative_aggregated.txt'
models_lst = ['DistMult', 'ComplEx']
clauses_lst = ['symm', 'impl', 'impl_inv', 'trans_single', 'trans_diff']
confs_lst = ['0.0']
versions_lst = ['v0', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9']
adv_weights_lst = ['0', '1']
adv_epochs_lst = ['0', '10']
disc_epochs_lst = ['10']
def string(s):
return {'TransE' : r"\emph{ASR}-\mdl{TransE}",
'DistMult' : r"\mdl{DistM.}",
'ComplEx' : r"\mdl{Compl.}",
'symm' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow r(X_2, X_1) \end{array}$ }",
'impl' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow s(X_1, X_2) \end{array}$ }",
'impl_inv' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow s(X_2, X_1) \end{array}$ }",
'trans_single': r"\multirow{ 2}{*}{$\begin{array} {l@{}} r(X_1, X_2) \wedge r(X_2, X_3) \\ \quad\Rightarrow r(X_1, X_3) \end{array}$}",
'trans_diff': r"\multirow{ 2}{*}{$\begin{array} {l@{}} r(X_1, X_2) \wedge s(X_2, X_3) \\ \quad\Rightarrow t(X_1, X_3) \end{array}$}"
}[s]
#'symm': r"$r(\x_2, \x_1) :- r(\x_1, \x_2)$",
#'impl': r"$s(\x_1, \x_2) :- r(\x_1, \x_2)$",
#'impl_inv': r"$s(\x_2, \x_1) :- r(\x_1, \x_2)$",
#'trans_single': r"$r(\x_1, \x_3) :- r(\x_1, \x_2), r(\x_2, \x_3)$",
#'trans_diff': r"$t(\x_1, \x_3) :- r(\x_1, \x_2), s(\x_2, \x_3)$"
def id2clause(id):
if 'tag=impl_inv' in id:
return 'impl_inv' #first!!
elif 'tag=impl' in id:
return 'impl'
for clause in ['symm', 'trans_single', 'trans_diff']:
if 'tag=%s'%clause in id:
return clause
return None
def id2model(id):
for model in models_lst:
if 'model=%s'%model in id:
return model
return None
def id2adv_init_ground(id):
if 'adv_init_ground=True' in id:
return True
elif 'adv_init_ground=False' in id:
return False
else:
return None
def id2conf(id):
for conf in confs_lst:
if '_c%s'%conf in id:
return conf
return None
def id2version(id):
for version in versions_lst:
if '_%s.log'%version in id:
return version
return None
def id2adv_weight(id):
for adv_weight in adv_weights_lst:
if 'adv_weight=%s_'%adv_weight in id:
return adv_weight
return None
def id2adv_epochs(id):
for adv_epoch in adv_epochs_lst:
if 'adv_epochs=%s_'%adv_epoch in id:
return adv_epoch
return None
def id2disc_epochs(id):
for disc_epoch in disc_epochs_lst:
if 'disc_epochs=%s_'%disc_epoch in id:
return disc_epoch
return None
def id2entity_space(id):
return 'unit_sphere' if 'unit-sphere' in id else 'unit_cube'
from time import sleep
ID2AUC = {}
found = False
with open(results) as rID:
for line in rID:
auc, id = line.strip().split('\t')
clause = id2clause(id)
model = id2model(id)
adv_init_ground = id2adv_init_ground(id)
conf = id2conf(id)
adv_weight = id2adv_weight(id)
adv_epochs = id2adv_epochs(id)
disc_epochs = id2disc_epochs(id)
entity_space = id2entity_space(id)
version = id2version(id)
if not None in (clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space, version):
ID2AUC[(clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space, version)] = float(auc)
ID2AUC_versions = {}
for (clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space, version), auc in ID2AUC.items():
if not (clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space) in ID2AUC_versions:
ID2AUC_versions[(clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space)] = []
ID2AUC_versions[(clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space)].append(auc)
ID2MEAN = defaultdict(lambda: -1)
for k in ID2AUC_versions:
ID2MEAN[k] = np.mean(ID2AUC_versions[k])
#construct table:
title = r"PR-AUC results for \emph{ASR}-DistMult (DistM.) and \emph{ASR}-ComplEx (Compl.) on synthetic datasets with various types of clauses (with $r\not=s\not=t$). Comparison of standard models without clauses ($\alpha=0$), and iterative adversarial training with clauses ($\alpha=1$). "
header = r"""
\begin{table}[t!]
\centering
\caption{
""" + title + \
r"""
}
\label{synth}
\vspace{1em}
\resizebox{\columnwidth}{!}{
\begin{tabular}{llcccc}
\toprule
\multirow{ 2}{*}{Clauses} & \multirow{ 2}{*}{Model} & $\alpha=0$ & $\alpha=0$ & $\alpha=1$ & $\alpha=1$ \\
&& cube & sphere & cube & sphere \\
\midrule
"""
footer = r"""
\bottomrule
\end{tabular}
}
\end{table}
"""
def results_line(clause, model):
res = string(model) + " & "
conf = "0.0"
res_STD_cube = ID2MEAN[(clause, model, True, conf, '0', '10', '10','unit_cube')]
res_STD_sphere = ID2MEAN[(clause, model, True, conf, '0', '10', '10','unit_sphere')]
#res_SMPL = ID2MEAN[(clause, model, True, conf, '1', '0', '10')]
#res_ASR_R = ID2MEAN[(clause, model, False, conf, '1', '1')]
res_ASR_cube = ID2MEAN[(clause, model, True, conf, '1', '10', '10', 'unit_cube')]
res_ASR_sphere = ID2MEAN[(clause, model, True, conf, '1', '10', '10', 'unit_sphere')]
resu = [res_STD_cube, res_STD_sphere, res_ASR_cube, res_ASR_sphere]
resu = [np.round(1000*res)/10. for res in resu]
maxvalue = max(resu)
resu_str = ["\\textbf{%.1f}"%res if res == maxvalue else "%.1f"%res for res in resu]
res += " & ".join(resu_str)
return res + r" \\"
print(header)
for clause in clauses_lst:
for model in models_lst:
show_clause = string(clause) if model == models_lst[0] else ""
line = show_clause + " & " + results_line(clause, model)
print(line)
if not clause == clauses_lst[-1]:
print(r"\midrule")
print(footer)
|
mit
| 2,269,847,217,643,453,000
| 30.968912
| 289
| 0.583468
| false
| 2.672152
| false
| false
| false
|
hlange/LogSoCR
|
.waf/waflib/Runner.py
|
1
|
9332
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2016 (ita)
"""
Runner.py: Task scheduling and execution
"""
import random
try:
from queue import Queue
except ImportError:
from Queue import Queue
from waflib import Utils, Task, Errors, Logs
GAP = 20
"""
Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run
"""
class Consumer(Utils.threading.Thread):
"""
Daemon thread object that executes a task. It shares a semaphore with
the coordinator :py:class:`waflib.Runner.Spawner`. There is one
instance per task to consume.
"""
__slots__ = ('task', 'spawner')
def __init__(self, spawner, task):
Utils.threading.Thread.__init__(self)
self.task = task
"""Task to execute"""
self.spawner = spawner
"""Coordinator object"""
self.setDaemon(1)
self.start()
def run(self):
"""
Processes a single task
"""
try:
if not self.spawner.master.stop:
self.task.process()
finally:
self.spawner.sem.release()
self.spawner.master.out.put(self.task)
self.task = None
self.spawner = None
class Spawner(Utils.threading.Thread):
"""
Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and
spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each
:py:class:`waflib.Task.TaskBase` instance.
"""
def __init__(self, master):
Utils.threading.Thread.__init__(self)
self.master = master
""":py:class:`waflib.Runner.Parallel` producer instance"""
self.sem = Utils.threading.Semaphore(master.numjobs)
"""Bounded semaphore that prevents spawning more than *n* concurrent consumers"""
self.setDaemon(1)
self.start()
def run(self):
"""
Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop`
"""
try:
self.loop()
except Exception:
# Python 2 prints unnecessary messages when shutting down
# we also want to stop the thread properly
pass
def loop(self):
"""
Consumes task objects from the producer; ends when the producer has no more
task to provide.
"""
master = self.master
while 1:
task = master.ready.get()
self.sem.acquire()
task.log_display(task.generator.bld)
Consumer(self, task)
class Parallel(object):
"""
Schedule the tasks obtained from the build context for execution.
"""
def __init__(self, bld, j=2):
"""
The initialization requires a build context reference
for computing the total number of jobs.
"""
self.numjobs = j
"""
Amount of parallel consumers to use
"""
self.bld = bld
"""
Instance of :py:class:`waflib.Build.BuildContext`
"""
self.outstanding = Utils.deque()
"""List of :py:class:`waflib.Task.TaskBase` that may be ready to be executed"""
self.frozen = Utils.deque()
"""List of :py:class:`waflib.Task.TaskBase` that are not ready yet"""
self.ready = Queue(0)
"""List of :py:class:`waflib.Task.TaskBase` ready to be executed by consumers"""
self.out = Queue(0)
"""List of :py:class:`waflib.Task.TaskBase` returned by the task consumers"""
self.count = 0
"""Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""
self.processed = 1
"""Amount of tasks processed"""
self.stop = False
"""Error flag to stop the build"""
self.error = []
"""Tasks that could not be executed"""
self.biter = None
"""Task iterator which must give groups of parallelizable tasks when calling ``next()``"""
self.dirty = False
"""
Flag that indicates that the build cache must be saved when a task was executed
(calls :py:meth:`waflib.Build.BuildContext.store`)"""
self.spawner = Spawner(self)
"""
Coordinating daemon thread that spawns thread consumers
"""
def get_next_task(self):
"""
Obtains the next Task instance to run
:rtype: :py:class:`waflib.Task.TaskBase`
"""
if not self.outstanding:
return None
return self.outstanding.popleft()
def postpone(self, tsk):
"""
Adds the task to the list :py:attr:`waflib.Runner.Parallel.frozen`.
The order is scrambled so as to consume as many tasks in parallel as possible.
:param tsk: task instance
:type tsk: :py:class:`waflib.Task.TaskBase`
"""
if random.randint(0, 1):
self.frozen.appendleft(tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"""
Adds the next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
"""
while self.count > self.numjobs * GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
elif self.frozen:
try:
cond = self.deadlock == self.processed
except AttributeError:
pass
else:
if cond:
msg = 'check the build order for the tasks'
for tsk in self.frozen:
if not tsk.run_after:
msg = 'check the methods runnable_status'
break
lst = []
for tsk in self.frozen:
lst.append('%s\t-> %r' % (repr(tsk), [id(x) for x in tsk.run_after]))
raise Errors.WafError('Deadlock detected: %s%s' % (msg, ''.join(lst)))
self.deadlock = self.processed
if self.frozen:
self.outstanding.extend(self.frozen)
self.frozen.clear()
elif not self.count:
self.outstanding.extend(next(self.biter))
self.total = self.bld.total()
break
def add_more_tasks(self, tsk):
"""
If a task provides :py:attr:`waflib.Task.TaskBase.more_tasks`, then the tasks contained
in that list are added to the current build and will be processed before the next build group.
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
if getattr(tsk, 'more_tasks', None):
self.outstanding.extend(tsk.more_tasks)
self.total += len(tsk.more_tasks)
def get_out(self):
"""
Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution.
Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`.
:rtype: :py:attr:`waflib.Task.TaskBase`
"""
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count -= 1
self.dirty = True
return tsk
def add_task(self, tsk):
"""
Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them.
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
self.ready.put(tsk)
def skip(self, tsk):
"""
Mark a task as skipped/up-to-date
"""
tsk.hasrun = Task.SKIPPED
def error_handler(self, tsk):
"""
Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, unless
the build is executed with::
$ waf build -k
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
if hasattr(tsk, 'scan') and hasattr(tsk, 'uid'):
# TODO waf 2.0 - this breaks encapsulation
try:
del self.bld.imp_sigs[tsk.uid()]
except KeyError:
pass
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
def task_status(self, tsk):
"""
Obtains the task status to decide whether to run it immediately or not.
:return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER`
:rtype: integer
"""
try:
return tsk.runnable_status()
except Exception:
self.processed += 1
tsk.err_msg = Utils.ex_stack()
if not self.stop and self.bld.keep:
self.skip(tsk)
if self.bld.keep == 1:
# if -k stop at the first exception, if -kk try to go as far as possible
if Logs.verbose > 1 or not self.error:
self.error.append(tsk)
self.stop = True
else:
if Logs.verbose > 1:
self.error.append(tsk)
return Task.EXCEPTION
tsk.hasrun = Task.EXCEPTION
self.error_handler(tsk)
return Task.EXCEPTION
def start(self):
"""
Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to
:py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread
has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out`
and marks the build as failed by setting the ``stop`` flag.
If only one job is used, then executes the tasks one by one, without consumers.
"""
self.total = self.bld.total()
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next_task()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
continue
if self.stop: # stop immediately after a failure was detected
break
st = self.task_status(tsk)
if st == Task.RUN_ME:
self.count += 1
self.processed += 1
if self.numjobs == 1:
tsk.log_display(tsk.generator.bld)
try:
tsk.process()
finally:
self.out.put(tsk)
else:
self.add_task(tsk)
if st == Task.ASK_LATER:
self.postpone(tsk)
elif st == Task.SKIP_ME:
self.processed += 1
self.skip(tsk)
self.add_more_tasks(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
self.ready.put(None)
assert (self.count == 0 or self.stop)
|
agpl-3.0
| -2,180,526,306,143,412,700
| 25.662857
| 104
| 0.673168
| false
| 3.084959
| false
| false
| false
|
pacoqueen/bbinn
|
gajim-0.9.1/src/common/logger.py
|
1
|
16495
|
# -*- coding: utf-8 -*-
## logger.py
##
## Contributors for this file:
## - Yann Le Boulanger <asterix@lagaule.org>
## - Nikos Kouremenos <kourem@gmail.com>
##
## Copyright (C) 2003-2004 Yann Le Boulanger <asterix@lagaule.org>
## Vincent Hanquez <tab@snarc.org>
## Copyright (C) 2005 Yann Le Boulanger <asterix@lagaule.org>
## Vincent Hanquez <tab@snarc.org>
## Nikos Kouremenos <nkour@jabber.org>
## Dimitur Kirov <dkirov@gmail.com>
## Travis Shirk <travis@pobox.com>
## Norman Rasmussen <norman@rasmussen.co.za>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 2 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
# XXX Modificado por queen para deshabilitar el log y que así no necesite psqlite para funcionar.
# XXX Modificado para que vuelva a usar logger.
# OJO: Necesitará entonces psqlite y gnupg en los clientes (qué remedio).
import os
import sys
import time
import datetime
import exceptions
import i18n
_ = i18n._
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
raise exceptions.PysqliteNotAvailable
# pass
if os.name == 'nt':
try:
# Documents and Settings\[User Name]\Application Data\Gajim\logs.db
LOG_DB_PATH = os.path.join(os.environ['appdata'], 'Gajim', 'logs.db')
except KeyError:
# win9x, ./logs.db
LOG_DB_PATH = 'logs.db'
else: # Unices
LOG_DB_PATH = os.path.expanduser('~/.gajim/logs.db')
try:
LOG_DB_PATH = LOG_DB_PATH.decode(sys.getfilesystemencoding())
except:
pass
class Constants:
def __init__(self):
(
self.JID_NORMAL_TYPE,
self.JID_ROOM_TYPE
) = range(2)
(
self.KIND_STATUS,
self.KIND_GCSTATUS,
self.KIND_GC_MSG,
self.KIND_SINGLE_MSG_RECV,
self.KIND_CHAT_MSG_RECV,
self.KIND_SINGLE_MSG_SENT,
self.KIND_CHAT_MSG_SENT
) = range(7)
(
self.SHOW_ONLINE,
self.SHOW_CHAT,
self.SHOW_AWAY,
self.SHOW_XA,
self.SHOW_DND,
self.SHOW_OFFLINE
) = range(6)
constants = Constants()
class Logger:
def __init__(self):
self.jids_already_in = [] # holds jids that we already have in DB
if not os.path.exists(LOG_DB_PATH):
# this can happen only the first time (the time we create the db)
# db is not created here but in src/common/checks_paths.py
return
self.init_vars()
def init_vars(self):
# if locked, wait up to 20 sec to unlock
# before raise (hopefully should be enough)
self.con = sqlite.connect(LOG_DB_PATH, timeout = 20.0,
isolation_level = 'IMMEDIATE')
self.cur = self.con.cursor()
self.get_jids_already_in_db()
def get_jids_already_in_db(self):
self.cur.execute('SELECT jid FROM jids')
rows = self.cur.fetchall() # list of tupples: (u'aaa@bbb',), (u'cc@dd',)]
for row in rows:
# row[0] is first item of row (the only result here, the jid)
self.jids_already_in.append(row[0])
def jid_is_from_pm(self, jid):
'''if jid is gajim@conf/nkour it's likely a pm one, how we know
gajim@conf is not a normal guy and nkour is not his resource?
we ask if gajim@conf is already in jids (with type room jid)
this fails if user disables logging for room and only enables for
pm (so higly unlikely) and if we fail we do not go chaos
(user will see the first pm as if it was message in room's public chat)
and after that all okay'''
possible_room_jid, possible_nick = jid.split('/', 1)
self.cur.execute('SELECT jid_id FROM jids WHERE jid="%s" AND type=%d' %\
(possible_room_jid, constants.JID_ROOM_TYPE))
row = self.cur.fetchone()
if row is not None:
return True
else:
return False
def get_jid_id(self, jid, typestr = None):
'''jids table has jid and jid_id
logs table has log_id, jid_id, contact_name, time, kind, show, message
so to ask logs we need jid_id that matches our jid in jids table
this method asks jid and returns the jid_id for later sql-ing on logs
'''
if jid.find('/') != -1: # if it has a /
jid_is_from_pm = self.jid_is_from_pm(jid)
if not jid_is_from_pm: # it's normal jid with resource
jid = jid.split('/', 1)[0] # remove the resource
if jid in self.jids_already_in: # we already have jids in DB
self.cur.execute('SELECT jid_id FROM jids WHERE jid="%s"' % jid)
jid_id = self.cur.fetchone()[0]
else: # oh! a new jid :), we add it now
if typestr == 'ROOM':
typ = constants.JID_ROOM_TYPE
else:
typ = constants.JID_NORMAL_TYPE
self.cur.execute('INSERT INTO jids (jid, type) VALUES (?, ?)', (jid, typ))
try:
self.con.commit()
except sqlite.OperationalError, e:
print >> sys.stderr, str(e)
jid_id = self.cur.lastrowid
self.jids_already_in.append(jid)
return jid_id
def convert_human_values_to_db_api_values(self, kind, show):
'''coverts from string style to constant ints for db'''
if kind == 'status':
kind_col = constants.KIND_STATUS
elif kind == 'gcstatus':
kind_col = constants.KIND_GCSTATUS
elif kind == 'gc_msg':
kind_col = constants.KIND_GC_MSG
elif kind == 'single_msg_recv':
kind_col = constants.KIND_SINGLE_MSG_RECV
elif kind == 'single_msg_sent':
kind_col = constants.KIND_SINGLE_MSG_SENT
elif kind == 'chat_msg_recv':
kind_col = constants.KIND_CHAT_MSG_RECV
elif kind == 'chat_msg_sent':
kind_col = constants.KIND_CHAT_MSG_SENT
if show == 'online':
show_col = constants.SHOW_ONLINE
elif show == 'chat':
show_col = constants.SHOW_CHAT
elif show == 'away':
show_col = constants.SHOW_AWAY
elif show == 'xa':
show_col = constants.SHOW_XA
elif show == 'dnd':
show_col = constants.SHOW_DND
elif show == 'offline':
show_col = constants.SHOW_OFFLINE
elif show is None:
show_col = None
else: # invisible in GC when someone goes invisible
# it's a RFC violation .... but we should not crash
show_col = 'UNKNOWN'
return kind_col, show_col
def commit_to_db(self, values):
#print 'saving', values
sql = 'INSERT INTO logs (jid_id, contact_name, time, kind, show, message, subject) VALUES (?, ?, ?, ?, ?, ?, ?)'
self.cur.execute(sql, values)
try:
self.con.commit()
except sqlite.OperationalError, e:
print >> sys.stderr, str(e)
def write(self, kind, jid, message = None, show = None, tim = None, subject = None):
'''write a row (status, gcstatus, message etc) to logs database
kind can be status, gcstatus, gc_msg, (we only recv for those 3),
single_msg_recv, chat_msg_recv, chat_msg_sent, single_msg_sent
we cannot know if it is pm or normal chat message, we try to guess
see jid_is_from_pm() which is called by get_jid_id()
we analyze jid and store it as follows:
jids.jid text column will hold JID if TC-related, room_jid if GC-related,
ROOM_JID/nick if pm-related.'''
if self.jids_already_in == []: # only happens if we just created the db
self.con = sqlite.connect(LOG_DB_PATH, timeout = 20.0,
isolation_level = 'IMMEDIATE')
self.cur = self.con.cursor()
jid = jid.lower()
contact_name_col = None # holds nickname for kinds gcstatus, gc_msg
# message holds the message unless kind is status or gcstatus,
# then it holds status message
message_col = message
subject_col = subject
if tim:
time_col = int(float(time.mktime(tim)))
else:
time_col = int(float(time.time()))
kind_col, show_col = self.convert_human_values_to_db_api_values(kind,
show)
# now we may have need to do extra care for some values in columns
if kind == 'status': # we store (not None) time, jid, show, msg
# status for roster items
jid_id = self.get_jid_id(jid)
if show is None: # show is None (xmpp), but we say that 'online'
show_col = constants.SHOW_ONLINE
elif kind == 'gcstatus':
# status in ROOM (for pm status see status)
if show is None: # show is None (xmpp), but we say that 'online'
show_col = constants.SHOW_ONLINE
jid, nick = jid.split('/', 1)
jid_id = self.get_jid_id(jid, 'ROOM') # re-get jid_id for the new jid
contact_name_col = nick
elif kind == 'gc_msg':
if jid.find('/') != -1: # if it has a /
jid, nick = jid.split('/', 1)
else:
# it's server message f.e. error message
# when user tries to ban someone but he's not allowed to
nick = None
jid_id = self.get_jid_id(jid, 'ROOM') # re-get jid_id for the new jid
contact_name_col = nick
else:
jid_id = self.get_jid_id(jid)
if show_col == 'UNKNOWN': # unknown show, do not log
return
values = (jid_id, contact_name_col, time_col, kind_col, show_col,
message_col, subject_col)
self.commit_to_db(values)
def get_last_conversation_lines(self, jid, restore_how_many_rows,
pending_how_many, timeout):
'''accepts how many rows to restore and when to time them out (in minutes)
(mark them as too old) and number of messages that are in queue
and are already logged but pending to be viewed,
returns a list of tupples containg time, kind, message,
list with empty tupple if nothing found to meet our demands'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
now = int(float(time.time()))
timed_out = now - (timeout * 60) # before that they are too old
# so if we ask last 5 lines and we have 2 pending we get
# 3 - 8 (we avoid the last 2 lines but we still return 5 asked)
self.cur.execute('''
SELECT time, kind, message FROM logs
WHERE jid_id = %d AND kind IN (%d, %d, %d, %d) AND time > %d
ORDER BY time DESC LIMIT %d OFFSET %d
''' % (jid_id, constants.KIND_SINGLE_MSG_RECV, constants.KIND_CHAT_MSG_RECV,
constants.KIND_SINGLE_MSG_SENT, constants.KIND_CHAT_MSG_SENT,
timed_out, restore_how_many_rows, pending_how_many)
)
results = self.cur.fetchall()
results.reverse()
return results
def get_unix_time_from_date(self, year, month, day):
# year (fe 2005), month (fe 11), day (fe 25)
# returns time in seconds for the second that starts that date since epoch
# gimme unixtime from year month day:
d = datetime.date(year, month, day)
local_time = d.timetuple() # time tupple (compat with time.localtime())
start_of_day = int(time.mktime(local_time)) # we have time since epoch baby :)
return start_of_day
def get_conversation_for_date(self, jid, year, month, day):
'''returns contact_name, time, kind, show, message
for each row in a list of tupples,
returns list with empty tupple if we found nothing to meet our demands'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
start_of_day = self.get_unix_time_from_date(year, month, day)
seconds_in_a_day = 86400 # 60 * 60 * 24
last_second_of_day = start_of_day + seconds_in_a_day - 1
self.cur.execute('''
SELECT contact_name, time, kind, show, message FROM logs
WHERE jid_id = %d
AND time BETWEEN %d AND %d
ORDER BY time
''' % (jid_id, start_of_day, last_second_of_day))
results = self.cur.fetchall()
return results
def get_search_results_for_query(self, jid, query):
'''returns contact_name, time, kind, show, message
for each row in a list of tupples,
returns list with empty tupple if we found nothing to meet our demands'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
if False: #query.startswith('SELECT '): # it's SQL query
try:
self.cur.execute(query)
except sqlite.OperationalError, e:
results = [('', '', '', '', str(e))]
return results
else: # user just typed something, we search in message column
like_sql = '%' + query + '%'
self.cur.execute('''
SELECT contact_name, time, kind, show, message, subject FROM logs
WHERE jid_id = ? AND message LIKE ?
ORDER BY time
''', (jid_id, like_sql))
results = self.cur.fetchall()
return results
def get_days_with_logs(self, jid, year, month, max_day):
'''returns the list of days that have logs (not status messages)'''
return
jid = jid.lower()
jid_id = self.get_jid_id(jid)
list = []
# First select all date of month whith logs we want
start_of_month = self.get_unix_time_from_date(year, month, 1)
seconds_in_a_day = 86400 # 60 * 60 * 24
last_second_of_month = start_of_month + (seconds_in_a_day * max_day) - 1
self.cur.execute('''
SELECT time FROM logs
WHERE jid_id = %d
AND time BETWEEN %d AND %d
AND kind NOT IN (%d, %d)
ORDER BY time
''' % (jid_id, start_of_month, last_second_of_month,
constants.KIND_STATUS, constants.KIND_GCSTATUS))
result = self.cur.fetchall()
#Copy all interesant time in a temporary table
self.cur.execute('CREATE TEMPORARY TABLE blabla(time,INTEGER)')
for line in result:
self.cur.execute('''
INSERT INTO blabla (time) VALUES (%d)
''' % (line[0]))
#then search in this small temp table for each day
for day in xrange(1, max_day):
start_of_day = self.get_unix_time_from_date(year, month, day)
last_second_of_day = start_of_day + seconds_in_a_day - 1
# just ask one row to see if we have sth for this date
self.cur.execute('''
SELECT time FROM blabla
WHERE time BETWEEN %d AND %d
LIMIT 1
''' % (start_of_day, last_second_of_day))
result = self.cur.fetchone()
if result:
list[0:0]=[day]
#Delete temporary table
self.cur.execute('DROP TABLE blabla')
result = self.cur.fetchone()
return list
def get_last_date_that_has_logs(self, jid):
'''returns last time (in seconds since EPOCH) for which
we had logs (excluding statuses)'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
self.cur.execute('''
SELECT time FROM logs
WHERE jid_id = ?
AND kind NOT IN (?, ?)
ORDER BY time DESC LIMIT 1
''', (jid_id, constants.KIND_STATUS, constants.KIND_GCSTATUS))
results = self.cur.fetchone()
if results is not None:
result = results[0]
else:
result = None
return result
|
gpl-2.0
| 279,528,875,289,906,300
| 38.266667
| 120
| 0.563182
| false
| 3.714414
| false
| false
| false
|
joshwatson/binaryninja-api
|
python/mediumlevelil.py
|
1
|
51471
|
# Copyright (c) 2018-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
import struct
# Binary Ninja components
import binaryninja
from binaryninja import _binaryninjacore as core
from binaryninja.enums import MediumLevelILOperation, InstructionTextTokenType, ILBranchDependence, DataFlowQueryOption
from binaryninja import basicblock #required for MediumLevelILBasicBlock argument
from binaryninja import function
from binaryninja import types
from binaryninja import lowlevelil
# 2-3 compatibility
from binaryninja import range
class SSAVariable(object):
def __init__(self, var, version):
self._var = var
self._version = version
def __repr__(self):
return "<ssa %s version %d>" % (repr(self._var), self._version)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self._var, self._version) == (other.var, other.version)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self._var, self._version))
@property
def var(self):
""" """
return self._var
@var.setter
def var(self, value):
self._var = value
@property
def version(self):
""" """
return self._version
@version.setter
def version(self, value):
self._version = value
class MediumLevelILLabel(object):
def __init__(self, handle = None):
if handle is None:
self.handle = (core.BNMediumLevelILLabel * 1)()
core.BNMediumLevelILInitLabel(self.handle)
else:
self.handle = handle
class MediumLevelILOperationAndSize(object):
def __init__(self, operation, size):
self._operation = operation
self._size = size
def __repr__(self):
if self._size == 0:
return "<%s>" % self._operation.name
return "<%s %d>" % (self._operation.name, self._size)
def __eq__(self, other):
if isinstance(other, MediumLevelILOperation):
return other == self._operation
if isinstance(other, self.__class__):
return (other.size, other.operation) == (self._size, self._operation)
return NotImplemented
def __ne__(self, other):
if isinstance(other, MediumLevelILOperation) or isinstance(other, self.__class__):
return not (self == other)
return NotImplemented
def __hash__(self):
return hash((self._operation, self._size))
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
class MediumLevelILInstruction(object):
"""
``class MediumLevelILInstruction`` Medium Level Intermediate Language Instructions are infinite length tree-based
instructions. Tree-based instructions use infix notation with the left hand operand being the destination operand.
Infix notation is thus more natural to read than other notations (e.g. x86 ``mov eax, 0`` vs. MLIL ``eax = 0``).
"""
ILOperations = {
MediumLevelILOperation.MLIL_NOP: [],
MediumLevelILOperation.MLIL_SET_VAR: [("dest", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_FIELD: [("dest", "var"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT: [("high", "var"), ("low", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_LOAD: [("src", "expr")],
MediumLevelILOperation.MLIL_LOAD_STRUCT: [("src", "expr"), ("offset", "int")],
MediumLevelILOperation.MLIL_STORE: [("dest", "expr"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT: [("dest", "expr"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR: [("src", "var")],
MediumLevelILOperation.MLIL_VAR_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT: [("high", "var"), ("low", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF: [("src", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_CONST: [("constant", "int")],
MediumLevelILOperation.MLIL_CONST_PTR: [("constant", "int")],
MediumLevelILOperation.MLIL_EXTERN_PTR: [("constant", "int"), ("offset", "int")],
MediumLevelILOperation.MLIL_FLOAT_CONST: [("constant", "float")],
MediumLevelILOperation.MLIL_IMPORT: [("constant", "int")],
MediumLevelILOperation.MLIL_ADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ADC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_SUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SBB: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_AND: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_OR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_XOR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ASR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ROL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RLC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_ROR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RRC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_MUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_NEG: [("src", "expr")],
MediumLevelILOperation.MLIL_NOT: [("src", "expr")],
MediumLevelILOperation.MLIL_SX: [("src", "expr")],
MediumLevelILOperation.MLIL_ZX: [("src", "expr")],
MediumLevelILOperation.MLIL_LOW_PART: [("src", "expr")],
MediumLevelILOperation.MLIL_JUMP: [("dest", "expr")],
MediumLevelILOperation.MLIL_JUMP_TO: [("dest", "expr"), ("targets", "target_map")],
MediumLevelILOperation.MLIL_RET_HINT: [("dest", "expr")],
MediumLevelILOperation.MLIL_CALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_CALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT: [("dest", "var_list")],
MediumLevelILOperation.MLIL_CALL_PARAM: [("src", "var_list")],
MediumLevelILOperation.MLIL_RET: [("src", "expr_list")],
MediumLevelILOperation.MLIL_NORET: [],
MediumLevelILOperation.MLIL_IF: [("condition", "expr"), ("true", "int"), ("false", "int")],
MediumLevelILOperation.MLIL_GOTO: [("dest", "int")],
MediumLevelILOperation.MLIL_CMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_TEST_BIT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_BOOL_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_ADD_OVERFLOW: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SYSCALL: [("output", "var_list"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_BP: [],
MediumLevelILOperation.MLIL_TRAP: [("vector", "int")],
MediumLevelILOperation.MLIL_INTRINSIC: [("output", "var_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_INTRINSIC_SSA: [("output", "var_ssa_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT: [("dest", "var")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT_SSA: [("prev", "var_ssa_dest_and_src")],
MediumLevelILOperation.MLIL_UNDEF: [],
MediumLevelILOperation.MLIL_UNIMPL: [],
MediumLevelILOperation.MLIL_UNIMPL_MEM: [("src", "expr")],
MediumLevelILOperation.MLIL_FADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FMUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FDIV: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSQRT: [("src", "expr")],
MediumLevelILOperation.MLIL_FNEG: [("src", "expr")],
MediumLevelILOperation.MLIL_FABS: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_INT_TO_FLOAT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_CONV: [("src", "expr")],
MediumLevelILOperation.MLIL_ROUND_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOOR: [("src", "expr")],
MediumLevelILOperation.MLIL_CEIL: [("src", "expr")],
MediumLevelILOperation.MLIL_FTRUNC: [("src", "expr")],
MediumLevelILOperation.MLIL_FCMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_O: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_UO: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA: [("dest", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED: [("prev", "var_ssa_dest_and_src"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_SSA: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_SSA_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_ALIASED: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_ALIASED_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa")],
MediumLevelILOperation.MLIL_CALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_SYSCALL_SSA: [("output", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA: [("dest_memory", "int"), ("dest", "var_ssa_list")],
MediumLevelILOperation.MLIL_CALL_PARAM_SSA: [("src_memory", "int"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_LOAD_SSA: [("src", "expr"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_LOAD_STRUCT_SSA: [("src", "expr"), ("offset", "int"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_STORE_SSA: [("dest", "expr"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT_SSA: [("dest", "expr"), ("offset", "int"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_PHI: [("dest", "var_ssa"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_MEM_PHI: [("dest_memory", "int"), ("src_memory", "int_list")]
}
def __init__(self, func, expr_index, instr_index=None):
instr = core.BNGetMediumLevelILByIndex(func.handle, expr_index)
self._function = func
self._expr_index = expr_index
if instr_index is None:
self._instr_index = core.BNGetMediumLevelILInstructionForExpr(func.handle, expr_index)
else:
self._instr_index = instr_index
self._operation = MediumLevelILOperation(instr.operation)
self._size = instr.size
self._address = instr.address
self._source_operand = instr.sourceOperand
operands = MediumLevelILInstruction.ILOperations[instr.operation]
self._operands = []
i = 0
for operand in operands:
name, operand_type = operand
if operand_type == "int":
value = instr.operands[i]
value = (value & ((1 << 63) - 1)) - (value & (1 << 63))
elif operand_type == "float":
if instr.size == 4:
value = struct.unpack("f", struct.pack("I", instr.operands[i] & 0xffffffff))[0]
elif instr.size == 8:
value = struct.unpack("d", struct.pack("Q", instr.operands[i]))[0]
else:
value = instr.operands[i]
elif operand_type == "expr":
value = MediumLevelILInstruction(func, instr.operands[i])
elif operand_type == "intrinsic":
value = lowlevelil.ILIntrinsic(func.arch, instr.operands[i])
elif operand_type == "var":
value = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
elif operand_type == "var_ssa":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
version = instr.operands[i + 1]
i += 1
value = SSAVariable(var, version)
elif operand_type == "var_ssa_dest_and_src":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
dest_version = instr.operands[i + 1]
src_version = instr.operands[i + 2]
i += 2
self._operands.append(SSAVariable(var, dest_version))
#TODO: documentation for dest
self.dest = SSAVariable(var, dest_version)
value = SSAVariable(var, src_version)
elif operand_type == "int_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
value = []
for j in range(count.value):
value.append(operand_list[j])
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(function.Variable.from_identifier(self._function.source_function, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_ssa_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value // 2):
var_id = operand_list[j * 2]
var_version = operand_list[(j * 2) + 1]
value.append(SSAVariable(function.Variable.from_identifier(self._function.source_function,
var_id), var_version))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "expr_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(MediumLevelILInstruction(func, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "target_map":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = {}
for j in range(count.value // 2):
key = operand_list[j * 2]
target = operand_list[(j * 2) + 1]
value[key] = target
core.BNMediumLevelILFreeOperandList(operand_list)
self._operands.append(value)
self.__dict__[name] = value
i += 1
def __str__(self):
tokens = self.tokens
if tokens is None:
return "invalid"
result = ""
for token in tokens:
result += token.text
return result
def __repr__(self):
return "<il: %s>" % str(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self._expr_index == other.expr_index
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index < other.expr_index
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index <= other.expr_index
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index > other.expr_index
def __ge__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index >= other.expr_index
def __hash__(self):
return hash((self._instr_index, self._function))
@property
def tokens(self):
"""MLIL tokens (read-only)"""
count = ctypes.c_ulonglong()
tokens = ctypes.POINTER(core.BNInstructionTextToken)()
if ((self._instr_index is not None) and (self._function.source_function is not None) and
(self._expr_index == core.BNGetMediumLevelILIndexForInstruction(self._function.handle, self._instr_index))):
if not core.BNGetMediumLevelILInstructionText(self._function.handle, self._function.source_function.handle,
self._function.arch.handle, self._instr_index, tokens, count):
return None
else:
if not core.BNGetMediumLevelILExprText(self._function.handle, self._function.arch.handle,
self._expr_index, tokens, count):
return None
result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
core.BNFreeInstructionText(tokens, count.value)
return result
@property
def il_basic_block(self):
"""IL basic block object containing this expression (read-only) (only available on finalized functions)"""
return MediumLevelILBasicBlock(self._function.source_function.view, core.BNGetMediumLevelILBasicBlockForInstruction(self._function.handle, self._instr_index), self._function)
@property
def ssa_form(self):
"""SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.ssa_form,
core.BNGetMediumLevelILSSAExprIndex(self._function.handle, self._expr_index))
@property
def non_ssa_form(self):
"""Non-SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.non_ssa_form,
core.BNGetMediumLevelILNonSSAExprIndex(self._function.handle, self._expr_index))
@property
def value(self):
"""Value of expression if constant or a known value (read-only)"""
value = core.BNGetMediumLevelILExprValue(self._function.handle, self._expr_index)
result = function.RegisterValue(self._function.arch, value)
return result
@property
def possible_values(self):
"""Possible values of expression using path-sensitive static data flow analysis (read-only)"""
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, None, 0)
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
@property
def branch_dependence(self):
"""Set of branching instructions that must take the true or false path to reach this instruction"""
count = ctypes.c_ulonglong()
deps = core.BNGetAllMediumLevelILBranchDependence(self._function.handle, self._instr_index, count)
result = {}
for i in range(0, count.value):
result[deps[i].branch] = ILBranchDependence(deps[i].dependence)
core.BNFreeILBranchDependenceList(deps)
return result
@property
def low_level_il(self):
"""Low level IL form of this expression"""
expr = self._function.get_low_level_il_expr_index(self._expr_index)
if expr is None:
return None
return lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def llils(self):
exprs = self._function.get_low_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr))
return result
@property
def high_level_il(self):
"""High level IL form of this expression"""
expr = self._function.get_high_level_il_expr_index(self._expr_index)
if expr is None:
return None
return binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr)
@property
def hlil(self):
"""Alias for high_level_il"""
return self.high_level_il
@property
def hlils(self):
exprs = self._function.get_high_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr))
return result
@property
def ssa_memory_version(self):
"""Version of active memory contents in SSA form for this instruction"""
return core.BNGetMediumLevelILSSAMemoryVersionAtILInstruction(self._function.handle, self._instr_index)
@property
def prefix_operands(self):
"""All operands in the expression tree in prefix order"""
result = [MediumLevelILOperationAndSize(self._operation, self._size)]
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.prefix_operands
else:
result.append(operand)
return result
@property
def postfix_operands(self):
"""All operands in the expression tree in postfix order"""
result = []
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.postfix_operands
else:
result.append(operand)
result.append(MediumLevelILOperationAndSize(self._operation, self._size))
return result
@property
def vars_written(self):
"""List of variables written by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SSA, MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED, MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD,
MediumLevelILOperation.MLIL_VAR_PHI]:
return [self.dest]
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA]:
return [self.high, self.low]
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL]:
return self.output
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_TAILCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.output.vars_written
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return self.dest
return []
@property
def vars_read(self):
"""List of variables read by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SSA,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA, MediumLevelILOperation.MLIL_SET_VAR_ALIASED]:
return self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD]:
return [self.prev] + self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_SSA]:
result = []
for param in self.params:
result += param.vars_read
return result
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.params.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL_PARAM, MediumLevelILOperation.MLIL_CALL_PARAM_SSA,
MediumLevelILOperation.MLIL_VAR_PHI]:
return self.src
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return []
result = []
for operand in self._operands:
if (isinstance(operand, function.Variable)) or (isinstance(operand, SSAVariable)):
result.append(operand)
elif isinstance(operand, MediumLevelILInstruction):
result += operand.vars_read
return result
@property
def expr_type(self):
"""Type of expression"""
result = core.BNGetMediumLevelILExprType(self._function.handle, self._expr_index)
if result.type:
platform = None
if self._function.source_function:
platform = self._function.source_function.platform
return types.Type(result.type, platform = platform, confidence = result.confidence)
return None
def get_possible_values(self, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_possible_values(self, ssa_var, options = []):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleSSAVarValues(self._function.handle, var_data, ssa_var.version,
self._instr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_version(self, var):
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
return core.BNGetMediumLevelILSSAVarVersionAtILInstruction(self._function.handle, var_data, self._instr_index)
def get_var_for_reg(self, reg):
reg = self._function.arch.get_reg_index(reg)
result = core.BNGetMediumLevelILVariableForRegisterAtInstruction(self._function.handle, reg, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_flag(self, flag):
flag = self._function.arch.get_flag_index(flag)
result = core.BNGetMediumLevelILVariableForFlagAtInstruction(self._function.handle, flag, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_stack_location(self, offset):
result = core.BNGetMediumLevelILVariableForStackLocationAtInstruction(self._function.handle, offset, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_reg_value(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAtInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_reg_value_after(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAfterInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_reg_values(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAtInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_reg_values_after(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAfterInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_flag_value(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAtInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_flag_value_after(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAfterInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_flag_values(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAtInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_flag_values_after(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAfterInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_stack_contents(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_stack_contents_after(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_stack_contents(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_stack_contents_after(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_branch_dependence(self, branch_instr):
return ILBranchDependence(core.BNGetMediumLevelILBranchDependence(self._function.handle, self._instr_index, branch_instr))
@property
def function(self):
""" """
return self._function
@property
def expr_index(self):
""" """
return self._expr_index
@property
def instr_index(self):
""" """
return self._instr_index
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
@property
def address(self):
""" """
return self._address
@property
def source_operand(self):
""" """
return self._source_operand
@property
def operands(self):
""" """
return self._operands
class MediumLevelILExpr(object):
"""
``class MediumLevelILExpr`` hold the index of IL Expressions.
.. note:: This class shouldn't be instantiated directly. Rather the helper members of MediumLevelILFunction should be \
used instead.
"""
def __init__(self, index):
self._index = index
@property
def index(self):
""" """
return self._index
@index.setter
def index(self, value):
self._index = value
class MediumLevelILFunction(object):
"""
``class MediumLevelILFunction`` contains the list of MediumLevelILExpr objects that make up a binaryninja.function. MediumLevelILExpr
objects can be added to the MediumLevelILFunction by calling :func:`append` and passing the result of the various class
methods which return MediumLevelILExpr objects.
"""
def __init__(self, arch = None, handle = None, source_func = None):
self._arch = arch
self._source_function = source_func
if handle is not None:
self.handle = core.handle_of_type(handle, core.BNMediumLevelILFunction)
if self._source_function is None:
self._source_function = binaryninja.function.Function(handle = core.BNGetMediumLevelILOwnerFunction(self.handle))
if self._arch is None:
self._arch = self._source_function.arch
else:
if self._source_function is None:
self.handle = None
raise ValueError("IL functions must be created with an associated function")
if self._arch is None:
self._arch = self._source_function.arch
func_handle = self._source_function.handle
self.handle = core.BNCreateMediumLevelILFunction(arch.handle, func_handle)
def __del__(self):
if self.handle is not None:
core.BNFreeMediumLevelILFunction(self.handle)
def __repr__(self):
arch = self.source_function.arch
if arch:
return "<mlil func: %s@%#x>" % (arch.name, self.source_function.start)
else:
return "<mlil func: %#x>" % self.source_function.start
def __len__(self):
return int(core.BNGetMediumLevelILInstructionCount(self.handle))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash(('MLIL', self._source_function))
def __getitem__(self, i):
if isinstance(i, slice) or isinstance(i, tuple):
raise IndexError("expected integer instruction index")
if isinstance(i, MediumLevelILExpr):
return MediumLevelILInstruction(self, i.index)
# for backwards compatibility
if isinstance(i, MediumLevelILInstruction):
return i
if i < -len(self) or i >= len(self):
raise IndexError("index out of range")
if i < 0:
i = len(self) + i
return MediumLevelILInstruction(self, core.BNGetMediumLevelILIndexForInstruction(self.handle, i), i)
def __setitem__(self, i, j):
raise IndexError("instruction modification not implemented")
def __iter__(self):
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
view = None
if self._source_function is not None:
view = self._source_function.view
try:
for i in range(0, count.value):
yield MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self)
finally:
core.BNFreeBasicBlockList(blocks, count.value)
@property
def current_address(self):
"""Current IL Address (read/write)"""
return core.BNMediumLevelILGetCurrentAddress(self.handle)
@current_address.setter
def current_address(self, value):
core.BNMediumLevelILSetCurrentAddress(self.handle, self._arch.handle, value)
def set_current_address(self, value, arch = None):
if arch is None:
arch = self._arch
core.BNMediumLevelILSetCurrentAddress(self.handle, arch.handle, value)
@property
def basic_blocks(self):
"""list of MediumLevelILBasicBlock objects (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
result = []
view = None
if self._source_function is not None:
view = self._source_function.view
for i in range(0, count.value):
result.append(MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def instructions(self):
"""A generator of mlil instructions of the current function"""
for block in self.basic_blocks:
for i in block:
yield i
@property
def ssa_form(self):
"""Medium level IL in SSA form (read-only)"""
result = core.BNGetMediumLevelILSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def non_ssa_form(self):
"""Medium level IL in non-SSA (default) form (read-only)"""
result = core.BNGetMediumLevelILNonSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def low_level_il(self):
"""Low level IL for this function"""
result = core.BNGetLowLevelILForMediumLevelIL(self.handle)
if not result:
return None
return lowlevelil.LowLevelILFunction(self._arch, result, self._source_function)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def high_level_il(self):
"""High level IL for this medium level IL."""
result = core.BNGetHighLevelILForMediumLevelIL(self.handle)
if not result:
return None
return binaryninja.highlevelil.HighLevelILFunction(self._arch, result, self._source_function)
@property
def hlil(self):
return self.high_level_il
def get_instruction_start(self, addr, arch = None):
if arch is None:
arch = self._arch
result = core.BNMediumLevelILGetInstructionStart(self.handle, arch.handle, addr)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return result
def expr(self, operation, a = 0, b = 0, c = 0, d = 0, e = 0, size = 0):
if isinstance(operation, str):
operation = MediumLevelILOperation[operation]
elif isinstance(operation, MediumLevelILOperation):
operation = operation.value
return MediumLevelILExpr(core.BNMediumLevelILAddExpr(self.handle, operation, size, a, b, c, d, e))
def append(self, expr):
"""
``append`` adds the MediumLevelILExpr ``expr`` to the current MediumLevelILFunction.
:param MediumLevelILExpr expr: the MediumLevelILExpr to add to the current MediumLevelILFunction
:return: number of MediumLevelILExpr in the current function
:rtype: int
"""
return core.BNMediumLevelILAddInstruction(self.handle, expr.index)
def goto(self, label):
"""
``goto`` returns a goto expression which jumps to the provided MediumLevelILLabel.
:param MediumLevelILLabel label: Label to jump to
:return: the MediumLevelILExpr that jumps to the provided label
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILGoto(self.handle, label.handle))
def if_expr(self, operand, t, f):
"""
``if_expr`` returns the ``if`` expression which depending on condition ``operand`` jumps to the MediumLevelILLabel
``t`` when the condition expression ``operand`` is non-zero and ``f`` when it's zero.
:param MediumLevelILExpr operand: comparison expression to evaluate.
:param MediumLevelILLabel t: Label for the true branch
:param MediumLevelILLabel f: Label for the false branch
:return: the MediumLevelILExpr for the if expression
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILIf(self.handle, operand.index, t.handle, f.handle))
def mark_label(self, label):
"""
``mark_label`` assigns a MediumLevelILLabel to the current IL address.
:param MediumLevelILLabel label:
:rtype: None
"""
core.BNMediumLevelILMarkLabel(self.handle, label.handle)
def add_label_list(self, labels):
"""
``add_label_list`` returns a label list expression for the given list of MediumLevelILLabel objects.
:param labels: the list of MediumLevelILLabel to get a label list expression from
:type labels: list(MediumLevelILLabel)
:return: the label list expression
:rtype: MediumLevelILExpr
"""
label_list = (ctypes.POINTER(core.BNMediumLevelILLabel) * len(labels))()
for i in range(len(labels)):
label_list[i] = labels[i].handle
return MediumLevelILExpr(core.BNMediumLevelILAddLabelList(self.handle, label_list, len(labels)))
def add_operand_list(self, operands):
"""
``add_operand_list`` returns an operand list expression for the given list of integer operands.
:param operands: list of operand numbers
:type operands: list(int)
:return: an operand list expression
:rtype: MediumLevelILExpr
"""
operand_list = (ctypes.c_ulonglong * len(operands))()
for i in range(len(operands)):
operand_list[i] = operands[i]
return MediumLevelILExpr(core.BNMediumLevelILAddOperandList(self.handle, operand_list, len(operands)))
def finalize(self):
"""
``finalize`` ends the function and computes the list of basic blocks.
:rtype: None
"""
core.BNFinalizeMediumLevelILFunction(self.handle)
def get_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILSSAInstructionIndex(self.handle, instr)
def get_non_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILNonSSAInstructionIndex(self.handle, instr)
def get_ssa_var_definition(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
result = core.BNGetMediumLevelILSSAVarDefinition(self.handle, var_data, ssa_var.version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_memory_definition(self, version):
result = core.BNGetMediumLevelILSSAMemoryDefinition(self.handle, version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_var_uses(self, ssa_var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
instrs = core.BNGetMediumLevelILSSAVarUses(self.handle, var_data, ssa_var.version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_memory_uses(self, version):
count = ctypes.c_ulonglong()
instrs = core.BNGetMediumLevelILSSAMemoryUses(self.handle, version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def is_ssa_var_live(self, ssa_var):
"""
``is_ssa_var_live`` determines if ``ssa_var`` is live at any point in the function
:param SSAVariable ssa_var: the SSA variable to query
:return: whether the variable is live at any point in the function
:rtype: bool
"""
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
return core.BNIsMediumLevelILSSAVarLive(self.handle, var_data, ssa_var.version)
def get_var_definitions(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableDefinitions(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_var_uses(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableUses(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_var_value(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
value = core.BNGetMediumLevelILSSAVarValue(self.handle, var_data, ssa_var.version)
result = function.RegisterValue(self._arch, value)
return result
def get_low_level_il_instruction_index(self, instr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetLowLevelILInstructionCount(low_il.handle):
return None
return result
def get_low_level_il_expr_index(self, expr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILExprIndex(self.handle, expr)
if result >= core.BNGetLowLevelILExprCount(low_il.handle):
return None
return result
def get_low_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetLowLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def get_high_level_il_instruction_index(self, instr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetHighLevelILInstructionCount(high_il.handle):
return None
return result
def get_high_level_il_expr_index(self, expr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILExprIndex(self.handle, expr)
if result >= core.BNGetHighLevelILExprCount(high_il.handle):
return None
return result
def get_high_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetHighLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def create_graph(self, settings = None):
if settings is not None:
settings_obj = settings.handle
else:
settings_obj = None
return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateMediumLevelILFunctionGraph(self.handle, settings_obj))
@property
def arch(self):
""" """
return self._arch
@arch.setter
def arch(self, value):
self._arch = value
@property
def source_function(self):
""" """
return self._source_function
@source_function.setter
def source_function(self, value):
self._source_function = value
class MediumLevelILBasicBlock(basicblock.BasicBlock):
def __init__(self, view, handle, owner):
super(MediumLevelILBasicBlock, self).__init__(handle, view)
self.il_function = owner
def __repr__(self):
arch = self.arch
if arch:
return "<mlil block: %s@%d-%d>" % (arch.name, self.start, self.end)
else:
return "<mlil block: %d-%d>" % (self.start, self.end)
def __iter__(self):
for idx in range(self.start, self.end):
yield self.il_function[idx]
def __getitem__(self, idx):
size = self.end - self.start
if idx > size or idx < -size:
raise IndexError("list index is out of range")
if idx >= 0:
return self.il_function[idx + self.start]
else:
return self.il_function[self.end + idx]
def __hash__(self):
return hash((self.start, self.end, self.il_function))
def __contains__(self, instruction):
if type(instruction) != MediumLevelILInstruction or instruction.il_basic_block != self:
return False
if instruction.instr_index >= self.start and instruction.instr_index <= self.end:
return True
else:
return False
def _create_instance(self, handle, view):
"""Internal method by super to instantiate child instances"""
return MediumLevelILBasicBlock(view, handle, self.il_function)
@property
def il_function(self):
""" """
return self._il_function
@il_function.setter
def il_function(self, value):
self._il_function = value
|
mit
| -3,140,209,989,579,271,000
| 38.9
| 176
| 0.71345
| false
| 3.141541
| false
| false
| false
|
aimalz/chippr
|
chippr/log_z_dens.py
|
1
|
22517
|
import numpy as np
import scipy as sp
import os
import scipy.optimize as op
import cPickle as cpkl
import emcee
import matplotlib as mpl
mpl.use('PS')
import matplotlib.pyplot as plt
import chippr
from chippr import defaults as d
from chippr import plot_utils as pu
from chippr import utils as u
from chippr import stat_utils as s
from chippr import log_z_dens_plots as plots
class log_z_dens(object):
def __init__(self, catalog, hyperprior, truth=None, loc='.', prepend='', vb=True):
"""
An object representing the redshift density function (normalized
redshift distribution function)
Parameters
----------
catalog: chippr.catalog object
dict containing bin endpoints, interim prior bin values, and
interim posterior PDF bin values
hyperprior: chippr.mvn object
multivariate Gaussian distribution for hyperprior distribution
truth: chippr.gmix object, optional
true redshift density function expressed as univariate Gaussian
mixture
loc: string, optional
directory into which to save results and plots made along the way
prepend: str, optional
prepend string to file names
vb: boolean, optional
True to print progress messages to stdout, False to suppress
"""
self.info = {}
self.add_text = prepend + '_'
self.bin_ends = np.array(catalog['bin_ends'])
self.bin_range = self.bin_ends[:-1]-self.bin_ends[0]
self.bin_mids = (self.bin_ends[1:]+self.bin_ends[:-1])/2.
self.bin_difs = self.bin_ends[1:]-self.bin_ends[:-1]
self.log_bin_difs = u.safe_log(self.bin_difs)
self.n_bins = len(self.bin_mids)
self.info['bin_ends'] = self.bin_ends
self.log_int_pr = np.array(catalog['log_interim_prior'])
self.int_pr = np.exp(self.log_int_pr)
self.info['log_interim_prior'] = self.log_int_pr
self.log_pdfs = np.array(catalog['log_interim_posteriors'])
self.pdfs = np.exp(self.log_pdfs)
self.n_pdfs = len(self.log_pdfs)
self.info['log_interim_posteriors'] = self.log_pdfs
if vb:
print(str(self.n_bins) + ' bins, ' + str(len(self.log_pdfs)) + ' interim posterior PDFs')
self.hyper_prior = hyperprior
self.truth = truth
self.info['truth'] = None
if self.truth is not None:
self.info['truth'] = {}
self.tru_nz = np.zeros(self.n_bins)
self.fine_zs = []
self.fine_nz = []
for b in range(self.n_bins):
fine_z = np.linspace(self.bin_ends[b], self.bin_ends[b+1], self.n_bins)
self.fine_zs.extend(fine_z)
fine_dz = (self.bin_ends[b+1] - self.bin_ends[b]) / self.n_bins
fine_n = self.truth.evaluate(fine_z)
self.fine_nz.extend(fine_n)
coarse_nz = np.sum(fine_n) * fine_dz
self.tru_nz[b] += coarse_nz
self.tru_nz /= np.dot(self.tru_nz, self.bin_difs)
self.log_tru_nz = u.safe_log(self.tru_nz)
self.info['log_tru_nz'] = self.log_tru_nz
self.info['truth']['z_grid'] = np.array(self.fine_zs)
self.info['truth']['nz_grid'] = np.array(self.fine_nz)
self.info['estimators'] = {}
self.info['stats'] = {}
self.dir = loc
self.data_dir = os.path.join(loc, 'data')
self.plot_dir = os.path.join(loc, 'plots')
if not os.path.exists(self.plot_dir):
os.makedirs(self.plot_dir)
self.res_dir = os.path.join(loc, 'results')
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
return
#
# def precompute(self):
# """
# Function to precompute values that show up in posterior that are independent of n(z) params
#
# Returns
# -------
# precomputed: float
# log-probability component independent of test params
# """
# integrated_int_pr = np.log(np.dot(self.int_pr, self.bin_difs))
# integrated_int_posts = np.log(np.dot(self.pdfs, axis=0)
# precomputed = integrated_int_posts - integrated_int_pr
# return precomputed
def evaluate_log_hyper_likelihood(self, log_nz):
"""
Function to evaluate log hyperlikelihood
Parameters
----------
log_nz: numpy.ndarray, float
vector of logged redshift density bin values at which to evaluate
the hyperlikelihood
Returns
-------
log_hyper_likelihood: float
log likelihood probability associated with parameters in log_nz
"""
nz = np.exp(log_nz)
norm_nz = nz / np.dot(nz, self.bin_difs)
# testing whether the norm step is still necessary
hyper_lfs = np.sum(norm_nz[None,:] * self.pdfs / self.int_pr[None,:] * self.bin_difs, axis=1)
log_hyper_likelihood = np.sum(u.safe_log(hyper_lfs)) - u.safe_log(np.dot(norm_nz, self.bin_difs))
# this used to work...
# log_hyper_likelihood = np.dot(np.exp(log_nz + self.precomputed), self.bin_difs)
return log_hyper_likelihood
def evaluate_log_hyper_prior(self, log_nz):
"""
Function to evaluate log hyperprior
Parameters
----------
log_nz: numpy.ndarray, float
vector of logged redshift density bin values at which to evaluate
the hyperprior
Returns
-------
log_hyper_prior: float
log prior probability associated with parameters in log_nz
"""
log_hyper_prior = u.safe_log(self.hyper_prior.evaluate_one(log_nz))
return log_hyper_prior
def evaluate_log_hyper_posterior(self, log_nz):
"""
Function to evaluate log hyperposterior
Parameters
----------
log_nz: numpy.ndarray, float
vector of logged redshift density bin values at which to evaluate
the full posterior
Returns
-------
log_hyper_posterior: float
log hyperposterior probability associated with parameters in log_nz
"""
log_hyper_likelihood = self.evaluate_log_hyper_likelihood(log_nz)
log_hyper_prior = self.evaluate_log_hyper_prior(log_nz)
log_hyper_posterior = log_hyper_likelihood + log_hyper_prior
return log_hyper_posterior
def optimize(self, start, no_data, no_prior, vb=True):
"""
Maximizes the hyperposterior of the redshift density
Parameters
----------
start: numpy.ndarray, float
array of log redshift density function bin values at which to begin
optimization
no_data: boolean
True to exclude data contribution to hyperposterior
no_prior: boolean
True to exclude prior contribution to hyperposterior
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
res.x: numpy.ndarray, float
array of logged redshift density function bin values maximizing
hyperposterior
"""
if no_data:
if vb: print('only optimizing prior')
def _objective(log_nz):
return -2. * self.evaluate_log_hyper_prior(log_nz)
elif no_prior:
if vb: print('only optimizing likelihood')
def _objective(log_nz):
return -2. * self.evaluate_log_hyper_likelihood(log_nz)
else:
if vb: print('optimizing posterior')
def _objective(log_nz):
return -2. * self.evaluate_log_hyper_posterior(log_nz)
if vb:
print(self.dir + ' starting at ', start, _objective(start))
res = op.minimize(_objective, start, method="Nelder-Mead", options={"maxfev": 1e5, "maxiter":1e5})
if vb:
print(self.dir + ': ' + str(res))
return res.x
def calculate_mmle(self, start, vb=True, no_data=0, no_prior=0):
"""
Calculates the marginalized maximum likelihood estimator of the
redshift density function
Parameters
----------
start: numpy.ndarray, float
array of log redshift density function bin values at which to begin
optimization
vb: boolean, optional
True to print progress messages to stdout, False to suppress
no_data: boolean, optional
True to exclude data contribution to hyperposterior
no_prior: boolean, optional
True to exclude prior contribution to hyperposterior
Returns
-------
log_mle_nz: numpy.ndarray, float
array of logged redshift density function bin values maximizing
hyperposterior
"""
# self.precomputed = self.precompute()
if 'log_mmle_nz' not in self.info['estimators']:
log_mle = self.optimize(start, no_data=no_data, no_prior=no_prior, vb=vb)
mle_nz = np.exp(log_mle)
self.mle_nz = mle_nz / np.dot(mle_nz, self.bin_difs)
self.log_mle_nz = u.safe_log(self.mle_nz)
self.info['estimators']['log_mmle_nz'] = self.log_mle_nz
else:
self.log_mle_nz = self.info['estimators']['log_mmle_nz']
self.mle_nz = np.exp(self.log_mle_nz)
return self.log_mle_nz
def calculate_stacked(self, vb=True):
"""
Calculates the stacked estimator of the redshift density function
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
log_stk_nz: ndarray, float
array of logged redshift density function bin values
"""
if 'log_stacked_nz' not in self.info['estimators']:
self.stk_nz = np.sum(self.pdfs, axis=0)
self.stk_nz /= np.dot(self.stk_nz, self.bin_difs)
self.log_stk_nz = u.safe_log(self.stk_nz)
self.info['estimators']['log_stacked_nz'] = self.log_stk_nz
else:
self.log_stk_nz = self.info['estimators']['log_stacked_nz']
self.stk_nz = np.exp(self.log_stk_nz)
return self.log_stk_nz
def calculate_mmap(self, vb=True):
"""
Calculates the marginalized maximum a posteriori estimator of the
redshift density function
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
log_map_nz: ndarray, float
array of logged redshift density function bin values
"""
if 'log_mmap_nz' not in self.info['estimators']:
self.map_nz = np.zeros(self.n_bins)
mappreps = [np.argmax(l) for l in self.log_pdfs]
for m in mappreps:
self.map_nz[m] += 1.
self.map_nz /= self.bin_difs[m] * self.n_pdfs
self.log_map_nz = u.safe_log(self.map_nz)
self.info['estimators']['log_mmap_nz'] = self.log_map_nz
else:
self.log_map_nz = self.info['estimators']['log_mmap_nz']
self.map_nz = np.exp(self.log_map_nz)
return self.log_map_nz
def calculate_mexp(self, vb=True):
"""
Calculates the marginalized expected value estimator of the redshift
density function
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
log_exp_nz: ndarray, float
array of logged redshift density function bin values
"""
if 'log_mexp_nz' not in self.info['estimators']:
expprep = [sum(z) for z in self.bin_mids * self.pdfs * self.bin_difs]
self.exp_nz = np.zeros(self.n_bins)
for z in expprep:
for k in range(self.n_bins):
if z > self.bin_ends[k] and z < self.bin_ends[k+1]:
self.exp_nz[k] += 1.
self.exp_nz /= self.bin_difs * self.n_pdfs
self.log_exp_nz = u.safe_log(self.exp_nz)
self.info['estimators']['log_mexp_nz'] = self.log_exp_nz
else:
self.log_exp_nz = self.info['estimators']['log_mexp_nz']
self.exp_nz = np.exp(self.log_exp_nz)
return self.log_exp_nz
def sample(self, ivals, n_samps, vb=True):
"""
Samples the redshift density hyperposterior
Parameters
----------
ivals: numpy.ndarray, float
initial values of the walkers
n_samps: int
number of samples to accept before stopping
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
mcmc_outputs: dict
dictionary containing array of sampled redshift density function
bin values as well as posterior probabilities, acceptance
fractions, and autocorrelation times
"""
self.sampler.reset()
pos, prob, state = self.sampler.run_mcmc(ivals, n_samps)
chains = self.sampler.chain
probs = self.sampler.lnprobability
fracs = self.sampler.acceptance_fraction
acors = s.acors(chains)
mcmc_outputs = {}
mcmc_outputs['chains'] = chains
mcmc_outputs['probs'] = probs
mcmc_outputs['fracs'] = fracs
mcmc_outputs['acors'] = acors
return mcmc_outputs
def calculate_samples(self, ivals, n_accepted=d.n_accepted, n_burned=d.n_burned, vb=True, n_procs=1, no_data=0, no_prior=0, gr_threshold=d.gr_threshold):
"""
Calculates samples estimating the redshift density function
Parameters
----------
ivals: numpy.ndarray, float
initial values of log n(z) for each walker
n_accepted: int, optional
log10 number of samples to accept per walker
n_burned: int, optional
log10 number of samples between tests of burn-in condition
n_procs: int, optional
number of processors to use, defaults to single-thread
vb: boolean, optional
True to print progress messages to stdout, False to suppress
no_data: boolean, optional
True to exclude data contribution to hyperposterior
no_prior: boolean, optional
True to exclude prior contribution to hyperposterior
Returns
-------
log_samples_nz: ndarray, float
array of sampled log redshift density function bin values
"""
# self.precomputed = self.precompute()
if 'log_mean_sampled_nz' not in self.info['estimators']:
self.n_walkers = len(ivals)
if no_data:
def distribution(log_nz):
return self.evaluate_log_hyper_prior(log_nz)
elif no_prior:
def distribution(log_nz):
return self.evaluate_log_hyper_likelihood(log_nz)
else:
def distribution(log_nz):
return self.evaluate_log_hyper_posterior(log_nz)
self.sampler = emcee.EnsembleSampler(self.n_walkers, self.n_bins, distribution, threads=n_procs)
self.burn_ins = 0
if n_burned == 0:
self.burning_in = False
else:
self.burning_in = True
vals = ivals
vals -= u.safe_log(np.sum(np.exp(ivals) * self.bin_difs[np.newaxis, :], axis=1))[:, np.newaxis]
if vb:
plots.plot_ivals(vals, self.info, self.plot_dir, prepend=self.add_text)
canvas = plots.set_up_burn_in_plots(self.n_bins, self.n_walkers)
full_chain = np.array([[vals[w]] for w in range(self.n_walkers)])
while self.burning_in:
if vb:
print('beginning sampling '+str(self.burn_ins))
burn_in_mcmc_outputs = self.sample(vals, 10**n_burned)
chain = burn_in_mcmc_outputs['chains']
burn_in_mcmc_outputs['chains'] -= u.safe_log(np.sum(np.exp(chain) * self.bin_difs[np.newaxis, np.newaxis, :], axis=2))[:, :, np.newaxis]
with open(os.path.join(self.res_dir, 'mcmc'+str(self.burn_ins)+'.p'), 'wb') as file_location:
cpkl.dump(burn_in_mcmc_outputs, file_location)
full_chain = np.concatenate((full_chain, burn_in_mcmc_outputs['chains']), axis=1)
if vb:
canvas = plots.plot_sampler_progress(canvas, burn_in_mcmc_outputs, full_chain, self.burn_ins, self.plot_dir, prepend=self.add_text)
self.burning_in = s.gr_test(full_chain, gr_threshold)
vals = np.array([item[-1] for item in burn_in_mcmc_outputs['chains']])
self.burn_ins += 1
mcmc_outputs = self.sample(vals, 10**n_accepted)
chain = mcmc_outputs['chains']
mcmc_outputs['chains'] -= u.safe_log(np.sum(np.exp(chain) * self.bin_difs[np.newaxis, np.newaxis, :], axis=2))[:, :, np.newaxis]
full_chain = np.concatenate((full_chain, mcmc_outputs['chains']), axis=1)
with open(os.path.join(self.res_dir, 'full_chain.p'), 'wb') as file_location:
cpkl.dump(full_chain, file_location)
self.log_smp_nz = mcmc_outputs['chains']
self.smp_nz = np.exp(self.log_smp_nz)
self.info['log_sampled_nz_meta_data'] = mcmc_outputs
self.log_bfe_nz = s.norm_fit(self.log_smp_nz)[0]
self.bfe_nz = np.exp(self.log_bfe_nz)
self.info['estimators']['log_mean_sampled_nz'] = self.log_bfe_nz
else:
self.log_smp_nz = self.info['log_sampled_nz_meta_data']
self.smp_nz = np.exp(self.log_smp_nz)
self.log_bfe_nz = self.info['estimators']['log_mean_sampled_nz']
self.bfe_nz = np.exp(self.log_smp_nz)
# if vb:
# plots.plot_samples(self.info, self.plot_dir)
return self.log_smp_nz
def compare(self, vb=True):
"""
Calculates all available goodness of fit measures
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
out_info: dict
dictionary of all available statistics
"""
self.info['stats']['kld'], self.info['stats']['log_kld'] = {}, {}
self.info['stats']['rms'], self.info['stats']['log_rms'] = {}, {}
if self.truth is not None:
for key in self.info['estimators']:
self.info['stats']['kld'][key] = s.calculate_kld(np.exp(self.info['log_tru_nz']), np.exp(self.info['estimators'][key]))
# self.info['stats']['log_kld'][key] = s.calculate_kld(self.log_tru_nz, self.info['estimators'][key])
self.info['stats']['rms']['true_nz' + '__' + key[4:]] = s.calculate_rms(np.exp(self.info['log_tru_nz']), np.exp(self.info['estimators'][key]))
self.info['stats']['log_rms']['log_true_nz'+ '__' + key] = s.calculate_rms(self.info['log_tru_nz'], self.info['estimators'][key])
for i in range(len(self.info['estimators'].keys())):
key_1 = self.info['estimators'].keys()[i]
for j in range(len(self.info['estimators'].keys()[:i])):
key_2 = self.info['estimators'].keys()[j]
# print(((i,j), (key_1, key_2)))
self.info['stats']['log_rms'][key_1 + '__' + key_2] = s.calculate_rms(self.info['estimators'][key_1], self.info['estimators'][key_2])
self.info['stats']['rms'][key_1[4:] + '__' + key_2[4:]] = s.calculate_rms(np.exp(self.info['estimators'][key_1]), np.exp(self.info['estimators'][key_2]))
out_info = self.info['stats']
if vb:
print(out_info)
return out_info
def plot_estimators(self, log=True, mini=True):
"""
Plots all available estimators of the redshift density function.
"""
if mini:
also = 'mini'
else:
also = ''
if log:
plots.plot_estimators(self.info, self.plot_dir, prepend=self.add_text+also+'log_', mini=mini)
else:
plots.plot_estimators(self.info, self.plot_dir, log=False, prepend=self.add_text+also+'lin_', mini=mini)
return
def read(self, read_loc, style='pickle', vb=True):
"""
Function to load inferred quantities from files.
Parameters
----------
read_loc: string
filepath where inferred redshift density function is stored
style: string, optional
keyword for file format, currently only 'pickle' supported
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
self.info: dict
returns the log_z_dens information dictionary object
"""
with open(os.path.join(self.res_dir, read_loc), 'rb') as file_location:
self.info = cpkl.load(file_location)
if vb:
print('The following quantities were read from '+read_loc+' in the '+style+' format:')
for key in self.info:
print(key)
if 'estimators' in self.info:
print(self.info['estimators'].keys())
return self.info
def write(self, write_loc, style='pickle', vb=True):
"""
Function to write results of inference to files.
Parameters
----------
write_loc: string
filepath where results of inference should be saved.
style: string, optional
keyword for file format, currently only 'pickle' supported
vb: boolean, optional
True to print progress messages to stdout, False to suppress
"""
with open(os.path.join(self.res_dir, write_loc), 'wb') as file_location:
cpkl.dump(self.info, file_location)
if vb:
print('The following quantities were written to '+write_loc+' in the '+style+' format:')
for key in self.info:
print(key)
return
|
mit
| 7,646,769,710,592,497,000
| 38.782686
| 169
| 0.569481
| false
| 3.749084
| false
| false
| false
|
Wikidata/StrepHit
|
strephit/side_projects/wlm.py
|
1
|
3270
|
# -*- encoding: utf-8 -*-
import click
import csv
import logging
from strephit.commons import wikidata, cache
from collections import defaultdict
logger = logging.getLogger(__name__)
COLUMN_TO_PROPERTY = {
'località': 'P131',
'Prov': 'P131',
'indirizzo': 'P969',
'proprieta': 'P127',
'WLMID': 'P2186',
}
@cache.cached
@wikidata.resolver('P127', 'P131')
def place_resolver(property, value, language, **kwargs):
types = [
3146899, # diocese of the Catholic Church
747074, # comune of Italy
515, # city
15089, # province of Italy
]
value = value.lower()
if 'com.' in value or 'comune' in value:
value = value.replace('com.', '').replace('comune', '').strip()
types = [747074]
elif 'prov.' in value or 'provincia' in value:
value = value.replace('prov.', '').replace('provincia', '').strip()
types = [15089]
results = wikidata.search(value, language, type_=types)
if results:
res = results[0]['id']
logger.debug('resolved "%s" to %s', value, res.encode('utf8'))
return res
else:
logger.debug('could not resolve "%s"', value)
return ''
@wikidata.resolver('P2186')
def wlmid_resolver(property, value, language, **kwargs):
return value
@cache.cached
@wikidata.resolver('P969')
def indirizzo_resolver(property, value, language, **kwargs):
return '%s@"%s"' % (language, value)
def process_row(data):
subject = data['emergenza']
resolved = defaultdict(lambda: [])
for k, v in data.iteritems():
if COLUMN_TO_PROPERTY.get(k):
v = wikidata.resolve(COLUMN_TO_PROPERTY[k], v.decode('utf8'), 'it')
if v:
resolved[COLUMN_TO_PROPERTY[k]].append(v)
info = {k: v for k, v in resolved.iteritems()}
subject = wikidata.resolver_with_hints('ddd', subject, 'it', **info)
if subject:
statements = []
for property, value in resolved.iteritems():
stmt = wikidata.finalize_statement(subject, property, value,
'it', resolve_property=False,
resolve_value=False)
if stmt is not None:
statements.append(stmt)
else:
logger.warn('could not find the wikidata id of "%s"' % data['emergenza'])
statements = None
return statements
@click.command()
@click.argument('input', type=click.File('r'))
@click.argument('output', type=click.File('w'))
@click.option('--skipped', type=click.File('w'), help='save the ids of un-resolved monuments')
def main(input, output, skipped):
rows = count = skipped_count = 0
for row in csv.DictReader(input):
rows += 1
statements = process_row(row)
if statements is None:
skipped_count += 1
if skipped:
skipped.write(row['WLMID'])
skipped.write('\n')
else:
for each in statements:
count += 1
output.write(each.encode('utf8'))
output.write('\n')
logger.info('Processed %d items (skipped %d), produced %d statements',
rows, skipped_count, count)
|
gpl-3.0
| 5,434,997,803,650,359,000
| 29.849057
| 94
| 0.57156
| false
| 3.637375
| false
| false
| false
|
paristote/zipf
|
app/zipf.py
|
1
|
3862
|
import subprocess
import os
import re
import collections
import sys
import shutil
import urllib
import urlparse
from flask import Flask, redirect, render_template, url_for, request
app=Flask(__name__)
class WordCounter(dict):
def __missing__(self, key):
return 0
# Debug mode True or False
DEBUG=(len(sys.argv) >= 2 and "debug"==sys.argv[1])
# Where all analyzed repos are cloned
TEMP_FOLDER="../clones"
# Pattern to identify a valid word: only letters, 2 or more times
VALID_WORD_PATTERN=re.compile('^[a-zA-Z]{2,}$')
# Pattern to separate words in a line: any of : _ = ' " ( ) and whitespace chars
WORD_SEPARATOR=re.compile('[:_=\'"\s\(\)]')
# Error codes
ERR_EMPTY_ORGA_REPO=10
ERR_ORGA_REPO_DONT_EXIST=11
# Routes
@app.route("/")
def home():
return render_template("index.html")
@app.route("/zipf", methods=['POST'])
def zipf():
# Prepare variables
orga=request.form['orga']
repo=request.form['repo']
logd("GitHub organization: "+orga)
logd("GitHub repository : "+repo)
if len(orga) == 0 or len(repo) == 0 :
return redirect(url_for("error", err=ERR_EMPTY_ORGA_REPO))
gitUrl="https://github.com/{0}/{1}.git".format(orga, repo)
cloneFolder="{0}/{1}/{2}".format(TEMP_FOLDER, orga, repo)
# Clone the repository
gitCloneSuccess=gitExport(gitUrl, cloneFolder)
if not gitCloneSuccess:
return redirect(url_for("error", err=ERR_ORGA_REPO_DONT_EXIST))
# Get the list of all files in the repository
files=walkProject(cloneFolder)
# Count each word occurrences
wordCount=WordCounter()
for f in files:
countWordsInFile(f, wordCount)
# Keep only the top 50 words and order them desc
mostCommon=collections.Counter(wordCount).most_common(50)
# Transform the words dic into key=value url query string
words=urllib.urlencode(mostCommon)
return redirect(url_for("result", orga=orga, repo=repo, words=words))
@app.route("/result/<orga>/<repo>/<words>")
def result(orga, repo, words):
# Transform the query string back into a dictionary of tuples (word, occurrences)
result=urlparse.parse_qsl(words)
return render_template("result.html", orga=orga, repo=repo, result=result)
@app.route("/error/<err>")
def error(err):
return render_template("index.html", error=err)
# ZIPF
# Clone the GitHub repo and delete all .git folders
# Return False if the git command failed
# Return True otherwise, eg if the destination folder already exists
def gitExport(gitUrl, cloneFolder):
if os.path.exists(cloneFolder):
return True
try:
subprocess.check_call(["git", "clone", "-q", "--depth=1", gitUrl, cloneFolder])
except subprocess.CalledProcessError as cpe:
logd("Command failed "+str(cpe.cmd))
return False
try:
subprocess.check_call(["rm", "-rf", cloneFolder+"/.git"])
except subprocess.CalledProcessError as cpe:
logd("Command failed "+str(cpe.cmd))
return True
# True if the given string is a valid word
def isValid(word):
return re.match(VALID_WORD_PATTERN, word)
# Walk through all folders and return a list of file paths
def walkProject(cloneFolder):
res=set()
for root, dirs, files in os.walk(cloneFolder):
for fname in files:
if fname[0] != ".":
res.add(os.path.join(root,fname))
return res
# Open the given file, then parse each line to count each word number of occurrence
# The result is added to the given WordCounter
def countWordsInFile(fileName, wc):
if os.path.exists(fileName):
with open(fileName, 'r') as f:
for line in f:
words=re.split(WORD_SEPARATOR, line)
for word in words:
if isValid(word):
wc[word.lower()]+=1
# Print a debug log message
def logd(message):
if DEBUG:
print "[DEBUG] " + str(message)
# Delete the folder with the given full path
# Unused
def deleteFolder(folder):
if os.path.exists(folder):
shutil.rmtree(folder)
# Server
if DEBUG:
print " * Running in DEBUG mode"
if __name__ == "__main__":
app.run(debug=DEBUG)
|
gpl-2.0
| -1,967,538,582,482,414,600
| 26.784173
| 83
| 0.711289
| false
| 3.137287
| false
| false
| false
|
pughlab/cbioportal
|
core/src/main/scripts/migrate_db.py
|
1
|
14251
|
#!/usr/bin/env python3
import os
import sys
import contextlib
import argparse
from collections import OrderedDict
import MySQLdb
# globals
ERROR_FILE = sys.stderr
OUTPUT_FILE = sys.stdout
DATABASE_HOST = 'db.host'
DATABASE_NAME = 'db.portal_db_name'
DATABASE_USER = 'db.user'
DATABASE_PW = 'db.password'
VERSION_TABLE = 'info'
VERSION_FIELD = 'DB_SCHEMA_VERSION'
REQUIRED_PROPERTIES = [DATABASE_HOST, DATABASE_NAME, DATABASE_USER, DATABASE_PW]
ALLOWABLE_GENOME_REFERENCES = ['37', 'hg19', 'GRCh37', '38', 'hg38', 'GRCh38', 'mm10', 'GRCm38']
DEFAULT_GENOME_REFERENCE = 'hg19'
MULTI_REFERENCE_GENOME_SUPPORT_MIGRATION_STEP = (2,11,0)
class PortalProperties(object):
""" Properties object class, just has fields for db conn """
def __init__(self, database_host, database_name, database_user, database_pw):
# default port:
self.database_port = 3306
# if there is a port added to the host name, split and use this one:
if ':' in database_host:
host_and_port = database_host.split(':')
self.database_host = host_and_port[0]
if self.database_host.strip() == 'localhost':
print(
"Invalid host config '" + database_host + "' in properties file. If you want to specify a port on local host use '127.0.0.1' instead of 'localhost'",
file=ERROR_FILE)
sys.exit(1)
self.database_port = int(host_and_port[1])
else:
self.database_host = database_host
self.database_name = database_name
self.database_user = database_user
self.database_pw = database_pw
def get_db_cursor(portal_properties):
""" Establishes a MySQL connection """
try:
connection = MySQLdb.connect(host=portal_properties.database_host,
port = portal_properties.database_port,
user = portal_properties.database_user,
passwd = portal_properties.database_pw,
db = portal_properties.database_name)
except MySQLdb.Error as exception:
print(exception, file=ERROR_FILE)
port_info = ''
if portal_properties.database_host.strip() != 'localhost':
# only add port info if host is != localhost (since with localhost apparently sockets are used and not the given port) TODO - perhaps this applies for all names vs ips?
port_info = " on port " + str(portal_properties.database_port)
message = (
"--> Error connecting to server "
+ portal_properties.database_host
+ port_info)
print(message, file=ERROR_FILE)
raise ConnectionError(message) from exception
if connection is not None:
return connection, connection.cursor()
def get_portal_properties(properties_filename):
""" Returns a properties object """
properties = {}
with open(properties_filename, 'r') as properties_file:
for line in properties_file:
line = line.strip()
# skip line if its blank or a comment
if len(line) == 0 or line.startswith('#'):
continue
try:
name, value = line.split('=', maxsplit=1)
except ValueError:
print(
'Skipping invalid entry in property file: %s' % (line),
file=ERROR_FILE)
continue
properties[name] = value.strip()
missing_properties = []
for required_property in REQUIRED_PROPERTIES:
if required_property not in properties or len(properties[required_property]) == 0:
missing_properties.append(required_property)
if missing_properties:
print(
'Missing required properties : (%s)' % (', '.join(missing_properties)),
file=ERROR_FILE)
return None
# return an instance of PortalProperties
return PortalProperties(properties[DATABASE_HOST],
properties[DATABASE_NAME],
properties[DATABASE_USER],
properties[DATABASE_PW])
def get_db_version(cursor):
""" gets the version number of the database """
# First, see if the version table exists
version_table_exists = False
try:
cursor.execute('select table_name from information_schema.tables')
for row in cursor.fetchall():
if VERSION_TABLE == row[0].lower().strip():
version_table_exists = True
except MySQLdb.Error as msg:
print(msg, file=ERROR_FILE)
return None
if not version_table_exists:
return (0, 0, 0)
# Now query the table for the version number
try:
cursor.execute('select ' + VERSION_FIELD + ' from ' + VERSION_TABLE)
for row in cursor.fetchall():
version = tuple(map(int, row[0].strip().split('.')))
except MySQLdb.Error as msg:
print(msg, file=ERROR_FILE)
return None
return version
def is_version_larger(version1, version2):
""" Checks if version 1 is larger than version 2 """
if version1[0] > version2[0]:
return True
if version2[0] > version1[0]:
return False
if version1[1] > version2[1]:
return True
if version2[1] > version1[1]:
return False
if version1[2] > version2[2]:
return True
return False
def print_all_check_reference_genome_warnings(warnings, force_migration):
""" Format warnings for output according to mode, and print to ERROR_FILE """
space = ' '
indent = 28 * space
allowable_reference_genome_string = ','.join(ALLOWABLE_GENOME_REFERENCES)
clean_up_string = ' Please clean up the mutation_event table and ensure it only contains references to one of the valid reference genomes (%s).' % (allowable_reference_genome_string)
use_default_string = 'the default reference genome (%s) will be used in place of invalid reference genomes and the first encountered reference genome will be used.' % (DEFAULT_GENOME_REFERENCE)
use_force_string = 'OR use the "--force" option to override this warning, then %s' % (use_default_string)
forcing_string = '--force option in effect : %s' % (use_default_string)
for warning in warnings:
if force_migration:
print('%s%s\n%s%s\n' % (indent, warning, indent, forcing_string), file=ERROR_FILE)
else:
print('%s%s%s\n%s%s\n' % (indent, warning, clean_up_string, indent, use_force_string), file=ERROR_FILE)
def validate_reference_genome_values_for_study(warnings, ncbi_to_count, study):
""" check if there are unrecognized or varied ncbi_build values for the study, add to warnings if problems are found """
if len(ncbi_to_count) == 1:
for retrieved_ncbi_build in ncbi_to_count: # single iteration
if retrieved_ncbi_build.upper() not in [x.upper() for x in ALLOWABLE_GENOME_REFERENCES]:
msg = 'WARNING: Study %s contains mutation_event records with unsupported NCBI_BUILD value %s.'%(study, retrieved_ncbi_build)
warnings.append(msg)
elif len(ncbi_to_count) > 1:
msg = 'WARNING: Study %s contains mutation_event records with %s NCBI_BUILD values {ncbi_build:record_count,...} %s.'%(study, len(ncbi_to_count), ncbi_to_count)
warnings.append(msg)
def check_reference_genome(portal_properties, cursor, force_migration):
""" query database for ncbi_build values, aggregate per study, then validate and report problems """
print('Checking database contents for reference genome information', file=OUTPUT_FILE)
""" Retrieve reference genomes from database """
warnings = []
try:
sql_statement = """
select NCBI_BUILD, count(NCBI_BUILD), CANCER_STUDY_IDENTIFIER
from mutation_event
join mutation on mutation.MUTATION_EVENT_ID = mutation_event.MUTATION_EVENT_ID
join genetic_profile on genetic_profile.GENETIC_PROFILE_ID = mutation.GENETIC_PROFILE_ID
join cancer_study on cancer_study.CANCER_STUDY_ID = genetic_profile.CANCER_STUDY_ID
group by CANCER_STUDY_IDENTIFIER, NCBI_BUILD
"""
cursor.execute(sql_statement)
study_to_ncbi_to_count = {} # {cancer_study_identifier : {ncbi_build : record_count}}
for row in cursor.fetchall():
retrieved_ncbi_build, ref_count, study = row
if study in study_to_ncbi_to_count:
study_to_ncbi_to_count[study][retrieved_ncbi_build] = ref_count
else:
study_to_ncbi_to_count[study] = {retrieved_ncbi_build : ref_count}
for study in study_to_ncbi_to_count:
validate_reference_genome_values_for_study(warnings, study_to_ncbi_to_count[study], study)
except MySQLdb.Error as msg:
print(msg, file=ERROR_FILE)
sys.exit(1)
if warnings:
print_all_check_reference_genome_warnings(warnings, force_migration)
if not force_migration:
sys.exit(1)
def run_migration(db_version, sql_filename, connection, cursor):
"""
Goes through the sql and runs lines based on the version numbers. SQL version should be stated as follows:
##version: 1.0.0
INSERT INTO ...
##version: 1.1.0
CREATE TABLE ...
"""
sql_file = open(sql_filename, 'r')
sql_version = (0, 0, 0)
run_line = False
statements = OrderedDict()
statement = ''
for line in sql_file:
if line.startswith('##'):
sql_version = tuple(map(int, line.split(':')[1].strip().split('.')))
run_line = is_version_larger(sql_version, db_version)
continue
# skip blank lines
if len(line.strip()) < 1:
continue
# skip comments
if line.startswith('#'):
continue
# skip sql comments
if line.startswith('--') and len(line) > 2 and line[2].isspace():
continue
# only execute sql line if the last version seen in the file is greater than the db_version
if run_line:
line = line.strip()
statement = statement + ' ' + line
if line.endswith(';'):
if sql_version not in statements:
statements[sql_version] = [statement]
else:
statements[sql_version].append(statement)
statement = ''
if len(statements) > 0:
run_statements(statements, connection, cursor)
else:
print('Everything up to date, nothing to migrate.', file=OUTPUT_FILE)
def run_statements(statements, connection, cursor):
try:
cursor.execute('SET autocommit=0;')
except MySQLdb.Error as msg:
print(msg, file=ERROR_FILE)
sys.exit(1)
for version, statement_list in statements.items():
print(
'Running statements for version: %s' % ('.'.join(map(str, version))),
file=OUTPUT_FILE)
for statement in statement_list:
print(
'\tExecuting statement: %s' % (statement.strip()),
file=OUTPUT_FILE)
try:
cursor.execute(statement.strip())
except MySQLdb.Error as msg:
print(msg, file=ERROR_FILE)
sys.exit(1)
connection.commit()
def warn_user():
"""Warn the user to back up their database before the script runs."""
response = input(
'WARNING: This script will alter your database! Be sure to back up your data before running.\nContinue running DB migration? (y/n) '
).strip()
while response.lower() != 'y' and response.lower() != 'n':
response = input(
'Did not recognize response.\nContinue running DB migration? (y/n) '
).strip()
if response.lower() == 'n':
sys.exit()
def usage():
print(
'migrate_db.py --properties-file [portal properties file] --sql [sql migration file]',
file=OUTPUT_FILE)
def main():
""" main function to run mysql migration """
parser = argparse.ArgumentParser(description='cBioPortal DB migration script')
parser.add_argument('-y', '--suppress_confirmation', default=False, action='store_true')
parser.add_argument('-p', '--properties-file', type=str, required=True,
help='Path to portal.properties file')
parser.add_argument('-s', '--sql', type=str, required=True,
help='Path to official migration.sql script.')
parser.add_argument('-f', '--force', default=False, action='store_true', help='Force to run database migration')
parser = parser.parse_args()
properties_filename = parser.properties_file
sql_filename = parser.sql
# check existence of properties file and sql file
if not os.path.exists(properties_filename):
print('properties file %s cannot be found' % (properties_filename), file=ERROR_FILE)
usage()
sys.exit(2)
if not os.path.exists(sql_filename):
print('sql file %s cannot be found' % (sql_filename), file=ERROR_FILE)
usage()
sys.exit(2)
# parse properties file
portal_properties = get_portal_properties(properties_filename)
if portal_properties is None:
print('failure reading properties file (%s)' % (properties_filename), file=ERROR_FILE)
sys.exit(1)
# warn user
if not parser.suppress_confirmation:
warn_user()
# set up - get db cursor
connection, cursor = get_db_cursor(portal_properties)
if cursor is None:
print('failure connecting to sql database', file=ERROR_FILE)
sys.exit(1)
# execute - get the database version and run the migration
with contextlib.closing(connection):
db_version = get_db_version(cursor)
if is_version_larger(MULTI_REFERENCE_GENOME_SUPPORT_MIGRATION_STEP, db_version):
#retrieve reference genomes from database
check_reference_genome(portal_properties, cursor, parser.force)
run_migration(db_version, sql_filename, connection, cursor)
print('Finished.', file=OUTPUT_FILE)
# do main
if __name__ == '__main__':
main()
|
agpl-3.0
| -2,279,951,055,318,271,200
| 42.184848
| 197
| 0.617711
| false
| 4.00309
| false
| false
| false
|
tensorflow/deepmath
|
deepmath/deephol/deephol_loop/checkpoint_monitor.py
|
1
|
4882
|
"""Monitor the latest model checkpoint and compute embedding stores.
This library is a helper method for the loop to monitor checkpoints
when they get available. Once a new checkpoint appears, it gets copied over
to a temporary directory, then the embeddings are computed for the theorem
database. Finally, the checkpoint file is updated. Old checkpoints can be
removed in the meantime.
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import os
from tensorflow import gfile
from tensorflow import logging
def get_latest_checkpoint(dirname: str):
"""Get the latest checkpoint in the directory.
Args:
dirname: Name of the directory.
Returns:
Checkpoint prefix string.
"""
chkpt_file = os.path.join(dirname, 'checkpoint')
if not gfile.Exists(chkpt_file):
logging.info('File %s does not exist', chkpt_file)
return None
chkpt_export_folder = os.path.join(dirname, 'export')
if not gfile.Exists(chkpt_export_folder):
logging.info('Eval export folder %s does not exist', chkpt_export_folder)
return None
num_lines = 0
with gfile.Open(chkpt_file) as f:
for l in f:
num_lines += 1
if l.startswith(b'model_checkpoint_path:'):
return os.path.basename(l.strip().split()[1][1:-1])
return None
def set_latest_checkpoint(dirname: str, chkpt: str):
"""Set the latest checkpoint in the checkpoint file.
Args:
dirname: Directory in which the checkpoint is located.
chkpt: Checkpoint prefix.
"""
chkpt_file = os.path.join(dirname, 'checkpoint')
lines = []
if gfile.Exists(chkpt_file):
logging.info('Loading preexisting checkpoint file "%s"', chkpt_file)
with gfile.Open(chkpt_file) as f:
lines = [
l.strip()
for l in f.readlines()
if l.startswith(b'all_model_checkpoint_paths:')
]
else:
logging.info('No preexisting checkpoint file "%s"', chkpt_file)
with gfile.Open(chkpt_file, 'w') as f:
lines = [
'%s\n' % l.strip() for l in ([
'model_checkpoint_path: "%s"' % chkpt,
'all_model_checkpoint_paths: "%s"' % chkpt
] + lines)
]
f.writelines(lines)
def verbose_copy(src, tgt, overwrite=True):
logging.info('Copying "%s" -> "%s"', src, tgt)
gfile.Copy(src, tgt, overwrite=overwrite)
class CheckpointMonitor(object):
"""Class for syncing checkpoints between two directories."""
def __init__(self, model_directory, target_directory, checkpoints_to_keep=2):
self.model_directory = model_directory
self.target_directory = target_directory
self.checkpoints_to_keep = checkpoints_to_keep
def new_checkpoint(self):
logging.info('Looking for checkpoint in "%s"', self.model_directory)
chkpt = get_latest_checkpoint(self.model_directory)
logging.info('Checkpoint: %s', chkpt)
if chkpt != get_latest_checkpoint(self.target_directory):
logging.info('latest checkpoint: %s',
get_latest_checkpoint(self.target_directory))
return chkpt
else:
return None
def copy_latest_checkpoint(self):
"""Copy over the latest checkpoints to the target directory."""
chkpt = get_latest_checkpoint(self.model_directory)
logging.info('Got latest checkpoint: %s', chkpt)
if chkpt is None:
return None
# Check if the evaluation meta graph has been copied.
if self.has_checkpoint() is None:
# Don't copy temp export folders, e.g. 'temp-01234567/saved_model.pb'
export_file = gfile.Glob(
os.path.join(self.model_directory,
'export/best_exporter/[0-9]*/saved_model.pb'))[0]
logging.info('Copying eval export file: %s', ', '.join(export_file))
target_export_dir = os.path.join(
self.target_directory, 'export/best_exporter',
os.path.basename(os.path.dirname(export_file)))
gfile.MakeDirs(target_export_dir)
verbose_copy(
export_file,
os.path.join(target_export_dir, os.path.basename(export_file)))
files = gfile.Glob(os.path.join(self.model_directory, chkpt) + b'.*')
logging.info('Copying files: %s', ', '.join(files))
for fname in files:
verbose_copy(fname,
os.path.join(self.target_directory, os.path.basename(fname)))
return chkpt
def update_latest_checkpoint(self, chkpt):
old_chkpt = get_latest_checkpoint(self.target_directory)
if old_chkpt != chkpt:
set_latest_checkpoint(self.target_directory, chkpt)
def has_checkpoint(self):
return get_latest_checkpoint(self.target_directory)
def get_checkpoint(self):
logging.info('Getting checkpoint for %s', self.target_directory)
chkpt = get_latest_checkpoint(self.target_directory)
if chkpt is None:
return None
else:
return os.path.join(self.target_directory, chkpt)
|
apache-2.0
| -8,842,323,261,161,711,000
| 33.871429
| 80
| 0.673085
| false
| 3.654192
| true
| false
| false
|
brocade-apj/anzsdnhackathon2016
|
www/www/__init__.py
|
1
|
1269
|
#! ../env/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Darin Sikanic'
__email__ = 'dsikanic@brocade.com'
__version__ = '1.0'
from flask import Flask
from webassets.loaders import PythonLoader as PythonAssetsLoader
from www.controllers.main import main
from www import assets
from www.extensions import (
cache,
assets_env,
mongo
)
from www.controllers.api import api
def create_app(object_name):
"""
An flask application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
Arguments:
object_name: the python path of the config object,
e.g. www.settings.ProdConfig
env: The name of the current environment, e.g. prod or dev
"""
app = Flask(__name__)
app.config.from_object(object_name)
# initialize the cache
cache.init_app(app)
# conntect to mongodb
mongo.init_app(app)
# initialise the api
api.init_app(app)
# Import and register the different asset bundles
assets_env.init_app(app)
assets_loader = PythonAssetsLoader(assets)
for name, bundle in assets_loader.load_bundles().items():
assets_env.register(name, bundle)
# register our blueprints
app.register_blueprint(main)
return app
|
apache-2.0
| -7,661,005,437,306,028,000
| 21.660714
| 66
| 0.667455
| false
| 3.594901
| false
| false
| false
|
osamak/wikiproject-med
|
accounts/forms.py
|
1
|
1406
|
# -*- coding: utf-8 -*-
from django import forms
from userena.forms import SignupForm
from accounts.models import Profile
class WikithonSignupForm(SignupForm):
name = forms.CharField(label=Profile._meta.get_field('name').verbose_name,
max_length=100)
email = forms.EmailField(label=Profile._meta.get_field('email').verbose_name)
twitter = forms.CharField(label=Profile._meta.get_field('twitter').verbose_name,
max_length=20)
bio = forms.TextField(label=Profile._meta.get_field('bio').verbose_name, widget=forms.Textarea)
avatar = forms.ImageField(label=Profile._meta.get_field('avatar').verbose_name)
def save(self):
# Save the parent form and get the user
new_user = super(WikithonSignupForm, self).save()
Profile.objects.create(user=new_user,
name=self.cleaned_data['name'],
email=self.cleaned_data['email'],
twitter=self.cleaned_data['twitter'],
bio=self.cleaned_data['bio'],
avatar=self.cleaned_data['avatar'])
return new_user
class EditProfile(forms.ModelForm):
class Meta:
model = Profile
fields = ['name', 'email', 'twitter','bio', 'avatar']
class ResendForm(forms.Form):
email = forms.EmailField()
|
agpl-3.0
| -1,130,210,196,212,903,600
| 40.352941
| 99
| 0.59744
| false
| 4.087209
| false
| false
| false
|
miketheman/opencomparison
|
settings/base.py
|
1
|
9605
|
# -*- coding: utf-8 -*-
# Django settings
import os.path
from os import environ
from django.template.defaultfilters import slugify
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
("Daniel Greenfeld", "pydanny@gmail.com"),
]
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/media/"
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "collected_static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/static/"
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "static"),
]
# Use the default admin media prefix, which is...
#ADMIN_MEDIA_PREFIX = "/static/admin/"
# List of callables that know how to import templates from various sources.
from memcacheify import memcacheify
CACHES = memcacheify()
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"reversion.middleware.RevisionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"pagination.middleware.PaginationMiddleware",
"django_sorting.middleware.SortingMiddleware",
)
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.static",
"package.context_processors.used_packages_list",
"grid.context_processors.grid_headers",
"core.context_processors.current_path",
"profiles.context_processors.lazy_profile",
"core.context_processors.core_values",
]
PROJECT_APPS = [
"grid",
'core',
"homepage",
"package",
"profiles",
"apiv1",
"feeds",
"searchv2",
"importer",
]
PREREQ_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.humanize",
"django.contrib.staticfiles",
# external
"uni_form",
"pagination",
"django_extensions",
"south",
"tastypie",
"reversion",
"django_sorting",
#"django_modeler",
# Celery task queue:
#'djcelery',
'social_auth',
'floppyforms',
'rest_framework',
]
INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
AUTH_PROFILE_MODULE = "profiles.Profile"
LOGIN_URL = "/login/github/"
LOGIN_REDIRECT_URLNAME = "home"
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
CACHE_TIMEOUT = 60 * 60
ROOT_URLCONF = "urls"
SECRET_KEY = "CHANGEME"
URCHIN_ID = ""
DEFAULT_FROM_EMAIL = 'Django Packages <djangopackages-noreply@djangopackages.com>'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_SUBJECT_PREFIX = '[Django Packages] '
try:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']
EMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']
EMAIL_PORT = 587
SERVER_EMAIL = 'info@cartwheelweb.com'
EMAIL_USE_TLS = True
DEBUG = False
except Exception as e:
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_SUBJECT_PREFIX = '[Cartwheel Web]'
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
if DEBUG:
CACHE_BACKEND = 'dummy://'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
PACKAGINATOR_HELP_TEXT = {
"REPO_URL": "Enter your project repo hosting URL here.<br />Example: https://github.com/opencomparison/opencomparison",
"PYPI_URL": "<strong>Leave this blank if this package does not have a PyPI release.</strong><br />What PyPI uses to index your package. <br />Example: django-uni-form",
}
PACKAGINATOR_SEARCH_PREFIX = "django"
# if set to False any auth user can add/modify packages
# only django admins can delete
RESTRICT_PACKAGE_EDITORS = True
# if set to False any auth user can add/modify grids
# only django admins can delete
RESTRICT_GRID_EDITORS = True
# package extenders are dicts that can include:
# form
# model
# grid_items
# package_displays
PACKAGE_EXTENDERS = []
CELERYD_TASK_TIME_LIMIT = 300
LAUNCHPAD_ACTIVE = False
LOCAL_INSTALLED_APPS = []
SUPPORTED_REPO = []
########################## Site specific stuff
FRAMEWORK_TITLE = "Django"
SITE_TITLE = "Django Packages"
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
if LOCAL_INSTALLED_APPS:
INSTALLED_APPS.extend(LOCAL_INSTALLED_APPS)
SUPPORTED_REPO.extend(["bitbucket", "github"])
if LAUNCHPAD_ACTIVE:
SUPPORTED_REPO += ["launchpad"]
AUTHENTICATION_BACKENDS = (
'social_auth.backends.contrib.github.GithubBackend',
'django.contrib.auth.backends.ModelBackend',
)
GITHUB_API_SECRET = environ.get('GITHUB_API_SECRET')
GITHUB_APP_ID = environ.get('GITHUB_APP_ID')
GITHUB_USERNAME = environ.get('GITHUB_USERNAME')
GITHUB_PASSWORD = environ.get('GITHUB_PASSWORD')
SOCIAL_AUTH_ENABLED_BACKENDS = ('github')
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'associate_complete'
SOCIAL_AUTH_DEFAULT_USERNAME = lambda u: slugify(u)
SOCIAL_AUTH_EXTRA_DATA = False
SOCIAL_AUTH_CHANGE_SIGNAL_ONLY = True
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
LOGIN_REDIRECT_URL = '/'
# associate user via email
#SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "oc", # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
},
}
WSGI_APPLICATION = 'wsgi.application'
if DEBUG:
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
ADMIN_URL_BASE = environ.get('ADMIN_URL_BASE', r"^admin/")
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logutils.colorize.ColorizingStreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
},
'loggers': {
'django': {
'handlers': ['console', ],
'propagate': True,
'level': 'ERROR',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'': {
'handlers': ['console', ],
'level': os.environ.get('DEBUG_LEVEL', 'ERROR'),
},
}
}
URL_REGEX_GITHUB = r'(?:http|https|git)://github.com/[^/]*/([^/]*)/{0,1}'
########### redis setup
import redis
from rq import Worker, Queue, Connection
########### end redis setup
|
mit
| 2,146,213,555,930,917,400
| 26.289773
| 172
| 0.664029
| false
| 3.485123
| false
| false
| false
|
lucyparsons/OpenOversight
|
OpenOversight/migrations/versions/8ce7926aa132_.py
|
1
|
1166
|
"""empty message
Revision ID: 8ce7926aa132
Revises: cfc5f3fd5efe
Create Date: 2018-06-07 18:53:47.656557
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '8ce7926aa132'
down_revision = 'cfc5f3fd5efe'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'notes_officer_id_fkey', 'notes', type_='foreignkey')
op.drop_constraint(u'notes_creator_id_fkey', 'notes', type_='foreignkey')
op.create_foreign_key(None, 'notes', 'officers', ['officer_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'notes', 'users', ['creator_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'notes', type_='foreignkey')
op.drop_constraint(None, 'notes', type_='foreignkey')
op.create_foreign_key(u'notes_creator_id_fkey', 'notes', 'users', ['creator_id'], ['id'])
op.create_foreign_key(u'notes_officer_id_fkey', 'notes', 'officers', ['officer_id'], ['id'])
# ### end Alembic commands ###
|
gpl-3.0
| 1,934,391,166,582,591,200
| 34.333333
| 96
| 0.66295
| false
| 3.068421
| false
| false
| false
|
pabloli/MinimalModelAlg
|
running.py
|
1
|
8071
|
#%%
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import textwrap
import pydot
import random, string
def drawGraph(data2Draw,current,trues,pos):
if drawActive == False:
return ""
text = str("Now we delete:"+textwrap.fill(str(current), 40)+"\n\n\n\nTrues: "+textwrap.fill(str(trues), 40))
#print (text)
#return
width = 10
height = 15
plt.figure(figsize=(width, height))
dx = nx.DiGraph()
dx = nx.nx.complete_multipartite_graph(1,2,3,4)
dx.add_nodes_from(data2Draw['members'])
dx.add_edges_from(data2Draw['edges'])
if pos == 0:
pos = nx.random_layout(dx,2,4)
nx.draw_networkx_labels(dx, pos, data2Draw['labels'], font_size=16)
nx.draw_networkx_nodes(dx, pos, nodelist=data2Draw['members'], node_color='r',node_size=700)
nx.draw_networkx_nodes(
dx, pos, nodelist=data2Draw['name'], node_color='y', node_shape='s', node_size=1300, linewidths=10)
nx.draw_networkx_edges(
dx, pos, edgelist=data2Draw['edges'], edge_color='b', arrows=True)
red_patch = mpatches.Patch(color='red', label='Members')
yellow_patch = mpatches.Patch(color='y', label='Lamdas')
plt.legend(handles=[red_patch,yellow_patch], loc=1)
plt.annotate(text,xy=(1.05,0.5),xycoords='axes fraction', fontsize=14)
plt.show()
return pos
def mergeDict(dictA, dictB):
for m in dictB.keys():
if m not in dictA.keys():
dictA[m] = dictB[m]
dictA[m] = (dictA[m])
return dictA
def mergeData(data1, data2):
allData = {}
allData['labels'] = mergeDict(data1['labels'], data2['labels'])
allData['edges'] = data1['edges'] | data2['edges']
allData['members'] = data1['members'] | data2['members']
allData['name'] = list(set().union(data1['name'],[data2['name']]))
return allData
def getData(sentence):
leftMembers = sentence['leftMembers']
name = sentence['name']
rightMembers = sentence['rightMembers']
labels = {}
edges = []
for m in leftMembers:
labels[m] = m
edges.append((m, name))
for m in rightMembers:
labels[m] = m
edges.append((name, m))
labels[name] = name
# if len (rightMembers) == 0:
# leftMembers = []
return ({'labels': (labels), 'edges': set(edges),
'members': set(leftMembers) | set(rightMembers), 'name': name})
def drawGraph2(data2Draw):
graph = pydot.Dot(graph_type='graph')
pyNodes = {}
for n in set(data2Draw['members']):
pyNodes[n] = pydot.Node(n, fillcolor='r')
for n in set(data2Draw['names']):
pyNodes[n] = pydot.Node(n, fillcolor="b", shape='box')
for edge in set(data2Draw['edges']):
graph.add_edge(pydot.Edge(edge[0], edge[1], label='a', color='r'))
graph.write_png('ojj.png')
def findIndependetMembers(data,val):
if val == 0:
independentMember = {key: 0 for key in set(data['name'])}
else:
independentMember = {key: 0 for key in set(data['members'])}
for k in data['edges']:
if k[1] in independentMember:
independentMember[k[1]] += 1
return [l for l in independentMember.keys() if independentMember[l]==0]
def updateGraph(dataToProcess, trues, pos):
dataToProcess, lamdasDeleted = removeLambda(dataToProcess, trues, pos)
dataToProcess, membersDeleted = removeMember(dataToProcess, trues, pos)
return dataToProcess, membersDeleted
def removeLambda(dataToProcess, trues, pos):
toDelete = findIndependetMembers(dataToProcess,0)
for e in [l for l in dataToProcess['edges'] if l[0] in toDelete]:
dataToProcess['edges'].remove(e)
for e in toDelete:
del dataToProcess['labels'][e]
dataToProcess['name'].remove(e)
drawGraph(dataToProcess,toDelete,trues, pos)
return dataToProcess, toDelete
def removeMember(dataToProcess,trues,pos):
toDelete = findIndependetMembers(dataToProcess,1)
for e in [l for l in dataToProcess['edges'] if l[0] in toDelete]:
dataToProcess['edges'].remove(e)
for e in toDelete:
del dataToProcess['labels'][e]
dataToProcess['members'].remove(e)
drawGraph(dataToProcess,toDelete,trues , pos)
return dataToProcess, toDelete
def updateSentencesAndGetTrues(sentences, membersToDelete):
retValue = []
for m in membersToDelete:
for s in sentences:
if [m] == s['rightMembers']:
retValue.append(m)
s['edges'] = []
continue;
if m in s['leftMembers']:
del s
continue
if m in s['rightMembers']:
s['rightMembers'].remove(m)
return set(retValue), sentences
def convertJson2Theory(sentences):
[print (l['name']+':'+str(l['leftMembers'])+'->'+str(l['rightMembers'])) for l in sentences]
def generateRandomSentences(num,lenght,members,size):
retValue = []
for i in range(num):
val = {}
lengthR = lengthL = 0
while lengthL == 0 & lengthR == 0:
lengthR = int (random_num(lenght))
lengthL = int (random_num(lenght))
val = {}
val['name']= 'L' +str(i)
val['leftMembers']=[]
for j in range(lengthL):
val['leftMembers'].append(randomword(members,size))
val['rightMembers']=[]
for j in range(lengthR):
val['rightMembers'].append(randomword(members,size))
retValue.append(val)
return retValue
def random_num(length):
return random.randint(0, length)
def randomword(members,length):
return ''.join(random.choice(string.ascii_lowercase[:members]) for i in range(length))
def readFile(filename):
f = open(filename,'r').read().replace(':[',';').replace(']->[',';').replace(']','').replace("'",'').replace(" ",'')
allS = f.split('\n')
listSentences = []
for i in allS:
ele = {}
t = i.split(';')
ele ['name']=t[0]
if t[1] == '':
ele['leftMembers'] = []
else:
ele['leftMembers'] = t[1].split(',')
if t[2] == '':
ele['rightMembers'] = []
else:
ele['rightMembers'] = t[2].split(',')
listSentences.append(ele)
return listSentences
def insert_newlines(string, every=64):
return '\n'.join(string[i:i+every] for i in range(0, len(string), every))
def main(theory):
convertJson2Theory(theory)
trues = []
deletedMembers = []
data = {'name':[],'edges':set(),'members':set(),'labels':{}}
for oS in theory:
data = mergeData(data, getData(oS))
sentences = theory
pos = ""
pos = drawGraph(data,deletedMembers,trues, 0)
#drawGraph2(data)
lastState = []
curState = [len(data['name']),len(data['edges']),len(data['members'])]
while lastState != curState:
lastState = curState
data,membersDeleted = updateGraph(data,trues, pos)
curT, sentences = updateSentencesAndGetTrues(sentences, membersDeleted)
curState = [len(data['name']),len(data['edges']),len(data['members'])]
if len(curT)>0:
trues+=curT
trues+=data['members']
drawGraph(data,[''],trues,pos)
plt.clf()
print ('Trues:')
print (trues)
print ('--------Finished--------')
#%%
theory = [
{'leftMembers': [], 'name': ('L1'), 'rightMembers': ['a', 'b']},
{'leftMembers': ['b'], 'name': ('L2'), 'rightMembers': ['a']},
{'leftMembers': [], 'name': ('L3'), 'rightMembers': ['a', 'c']},
{'leftMembers': ['a'], 'name': ('L4'), 'rightMembers': ['d', 'e', 'f']},
{'leftMembers': ['e'], 'name': ('L5'), 'rightMembers': ['f']},
{'leftMembers': ['f'], 'name': ('L6'), 'rightMembers': ['e']},
]
drawActive = True
main(theory)
#%%
drawActive = True
theory = readFile('example 1.txt')
main(theory)
#%%
drawActive = False
for i in range(100):
print ('-------------------------')
print ("Running N-", i)
main(generateRandomSentences(10,2,15,1))
|
mit
| 3,964,466,049,553,769,000
| 32.629167
| 119
| 0.590633
| false
| 3.358718
| false
| false
| false
|
datawire/quark
|
quarkc/test/ffi/expected/py/signatures/generics/ccc/__init__.py
|
1
|
2758
|
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("generics.ccc")
import quark.reflect
class TLSContextInitializer(_QObject):
def _init(self):
pass
def __init__(self): self._init()
def getValue(self):
return _cast(None, lambda: Context)
def _getClass(self):
return u"generics.ccc.TLSContextInitializer"
def _getField(self, name):
return None
def _setField(self, name, value):
pass
TLSContextInitializer.generics_ccc_TLSContextInitializer_ref = None
class Context(_QObject):
def _init(self):
self.parent = None
def __init__(self, parent):
self._init()
@staticmethod
def current():
return _cast(None, lambda: Context)
@staticmethod
def global_():
return _cast(None, lambda: Context)
def _getClass(self):
return u"generics.ccc.Context"
def _getField(self, name):
if ((name) == (u"_global")):
return Context._global
if ((name) == (u"_current")):
return Context._current
if ((name) == (u"parent")):
return (self).parent
return None
def _setField(self, name, value):
if ((name) == (u"_global")):
Context._global = _cast(value, lambda: Context)
if ((name) == (u"_current")):
Context._current = _cast(value, lambda: TLS)
if ((name) == (u"parent")):
(self).parent = _cast(value, lambda: Context)
Context._global = None
Context._current = None
Context.generics_ccc_Context_ref = None
Context.generics_ccc_TLS_generics_ccc_Context__ref = None
class TLSInitializer(object):
def getValue(self):
raise NotImplementedError('`TLSInitializer.getValue` is an abstract method')
TLSInitializer.generics_ccc_TLSInitializer_quark_Object__ref = None
class TLS(_QObject):
def _init(self):
self._value = None
def __init__(self, initializer):
self._init()
def getValue(self):
return _cast(None, lambda: T)
def _getClass(self):
return u"generics.ccc.TLS<quark.Object>"
def _getField(self, name):
if ((name) == (u"_value")):
return (self)._value
return None
def _setField(self, name, value):
if ((name) == (u"_value")):
(self)._value = _cast(value, lambda: T)
def _lazy_import_quark_ffi_signatures_md():
import quark_ffi_signatures_md
globals().update(locals())
_lazyImport("import quark_ffi_signatures_md", _lazy_import_quark_ffi_signatures_md)
_lazyImport.pump("generics.ccc")
|
apache-2.0
| -3,211,641,821,664,854,500
| 23.40708
| 84
| 0.614213
| false
| 3.73207
| false
| false
| false
|
rodo/django-perf
|
foo/offset/management/commands/keypage_run.py
|
1
|
1911
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013,2014 Rodolphe Quiédeville <rodolphe@quiedeville.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import imp
from django.core.management.base import BaseCommand
from optparse import make_option
from django.core.paginator import Paginator
from foo.offset.models import Log
from foo.july.models import BigBook
import logging
from datetime import datetime
class Command(BaseCommand):
help = 'Import datas'
def handle(self, *args, **options):
"""
Use keyset pagination
"""
log = Log.objects.create(name='keypage',
start=datetime.now(),
stop=datetime.now())
nb = 0
keyid = 0
while True:
queryset = BigBook.objects.filter(serie=3)
books = queryset.filter(keyid__gt=keyid).order_by('keyid')[:250]
for book in books:
keyid = book.keyid
# do want you want here
if book.nbpages > 500:
nb = nb + 1
if len(books) < 250:
break
log.stop = datetime.now()
log.save()
print "keypage", log.stop - log.start, nb
|
gpl-3.0
| 2,428,596,492,873,267,700
| 30.833333
| 76
| 0.618848
| false
| 4.04661
| false
| false
| false
|
paulscottrobson/wallpaper-one
|
software/minol - arithmetic/generate.py
|
1
|
1446
|
#
# Generate math tests
#
import random
random.seed(412)
monitor = [ord(x) for x in open("..\monitor_rom\monitor.bin","rb").read(-1)]
mvars = { "C":10,"D":20,"Z":33 }
def rnd(maxval):
n = maxval+1
term = ""
while n >= maxval:
n = random.randrange(0,255)
term = str(n)
if random.randrange(0,3) == 0:
k = mvars.keys()
term = k[random.randrange(0,len(k))]
n = mvars[term]
if random.randrange(0,5) == 0:
n = random.randrange(32,96)
term = "'"+chr(n)+"'"
if n==34 or n == 0x27 or n == ord("\\"):
n = maxval+1
if random.randrange(0,5) == 0:
h = random.randrange(0,8)
h = [h,str(h)]
if random.randrange(0,8) == 0:
h = rnd(8)
l = rnd(256)
n = monitor[h[0]*256+l[0]]
term = "({0},{1})".format(h[1],l[1])
return [n,term]
ptr = 0x9300
while ptr < 0xFF00:
n1 = rnd(255)
result = n1[0]
expr = n1[1]
for parts in range(0,random.randrange(2,7)):
op = random.randrange(0,4)
if op < 2:
n1 = rnd(255)
result = result + (n1[0] if op == 0 else -n1[0])
result = (result + 256) & 255
expr = expr + ("+" if op == 0 else "-") + str(n1[1])
if op == 2 and result < 50 and result > 0:
n1 = rnd(int(255/result))
result = result * n1[0]
expr = expr + "*" + n1[1]
if op == 3 and result > 10:
n1 = rnd(int(result/2))
if n1[0] > 0:
result = int(result / n1[0])
expr = expr + "/" + n1[1]
print(' db "{0}",0,{1}'.format(expr,result))
ptr = ptr + len(expr)+2
|
mit
| -4,739,302,166,664,086,000
| 22.322581
| 76
| 0.539419
| false
| 2.30622
| false
| false
| false
|
gem/sidd
|
sidd/ms/node.py
|
1
|
29915
|
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module class for statistic node handling
"""
from copy import deepcopy
from utils.xml import get_node_attrib
from sidd.constants import logAPICall
from sidd.ms.exceptions import StatisticNodeError
class StatisticModifier(object):
def __init__(self, name='', level=None):
self.name = name
self.level = level
self.values = {}
@property
def is_default(self):
return len(self.values) == 1 and self.values.keys()[0] is None
def iteritems(self):
return self.values.iteritems()
def keys(self):
return self.values.keys()
def value(self, key):
if self.values.has_key(key):
return self.values[key]
else:
return ''
def calculate_weights(self, count):
for k, v in self.values.iteritems():
self.values[k] = v * 100.0 / count
def update(self, key):
if not self.values.has_key(key):
self.values[key]=1
else:
self.values[key]+= 1
def __str__(self):
outstr=[]
outstr.append('(name: "%s" ' % self.name)
for k, v in self.values.iteritems():
outstr.append("%s: %2.1f%% " % (k, v))
outstr.append(')')
# return joined string
return ''.join(outstr)
@logAPICall
def to_xml(self, pretty=False):
""" generate XML representation of current node """
outstr = []
pad = ''
line_break=''
outstr.append('%s <modifier name="%s" level="%s">%s' % (pad, self.name, self.level, line_break))
for k, v in self.values.iteritems():
outstr.append('%s <modifiervalue value="%s" weight="%s" />%s'
% (pad, k, v, line_break))
outstr.append('%s </modifier>%s' % (pad, line_break))
# return joined string
return ''.join(outstr)
@logAPICall
def from_xml(self, xmlnode):
self.name = get_node_attrib(xmlnode, 'name')
self.level = get_node_attrib(xmlnode, 'level')
for mod_node in xmlnode.findall('modifiervalue'):
if mod_node.attrib['value'] == 'None':
val = None
else:
val = get_node_attrib(mod_node, 'value')
self.values[val]=float(get_node_attrib(mod_node, 'weight'))
class StatisticNode (object):
"""
A statistic node forms part of a statistic tree.
Each node stores structural as well as tree related information
structural related information
-value: taxonomy value representing a structural type
-count: count of values included
-weight: count of current node as percentage
of count of parent
-modifiers: these are less important features on the strutural type
this is used mainly to reduce the size of the statistical tree
tree related information
-level: level of node in a statistic tree
-children: collection of child StatisticNode
"""
# static members
###########################
# additional values to be attached to the node
AverageSize, UnitCost = range(2)
# constructor / destructor
###########################
def __init__(self, parent, name='', value='', level=0,
is_default=False, is_skipped=False):
""" constructor """
self.parent=parent
self.name=name
self.value=value
self.additional = {}
self.label_additional = ["avg_size", "unit_cost"]
self.is_skipped=is_skipped
self.is_default=is_default
self.count=0
self.weight=0.0
self.modifiers=[]
self.level=level
self.children=[]
def __del__(self):
""" destructor """
del self.parent
del self.name
del self.value
del self.is_default
del self.count
del self.modifiers
del self.level
for child in self.children:
del child
# property methods
###########################
@property
def is_leaf(self):
""" is leaf if does not have children """
return len(self.children) == 0
@property
def is_valid(self):
return round(sum([c.weight for c in self.children]),0) == 100
@property
def max_level(self):
""" get max level under current node """
level = self.level
for child in self.children:
if child.max_level > level:
level = child.level
return level
@property
def clone(self):
""" get a cloned copy of the node and all its children """
return deepcopy(self)
@property
def ancestor_names(self):
if self.parent is not None:
names = self.parent.ancestor_names
names.append(self.parent.name)
return names
else:
return []
@property
def descendant_names(self):
if self.is_leaf:
return []
else:
names = {}
for child in self.children:
names[child.name] = 1
for name in child.descendant_names:
names[name] = 1
return names.keys()
# serialize / deserialize
###########################
def __str__(self):
"""
get string representation of node.
it works by recursively retrieving string from its children nodes
"""
# use list to hold strings for each line and concatenate at the end of
# the function to avoid creating throw-away strings objects
outstr = []
# add space to make it pretty
outstr.append(' ' * self.level)
# add current node
outstr.append('%s:[%s=%s (%s, %s, %2.1f%% - %d)]'
% (self.level, self.name, self.value, self.is_default,
self.is_skipped, self.weight, self.count))
# add modifiers for current node
for mod in self.modifiers:
outstr.append(str(mod))
# show additional data in leaf node
if self.is_leaf:
outstr.append(str(self.additional))
outstr.append('\n')
# add children
for child in self.children:
outstr.append(str(child))
# return joined string
return ''.join(outstr)
@logAPICall
def to_xml(self, pretty=False):
""" generate XML representation of current node """
outstr = []
pad = ''
line_break=''
if (pretty):
for i in range(self.level):
pad += ' '
line_break='\n'
outstr.append('%s<node attribute="%s" value="%s" level="%d" is_default="%s" is_skipped="%s" weight="%f">%s'
% (pad, self.name, self.value, self.level, self.is_default,
self.is_skipped, self.weight, line_break))
for key,value in self.additional.iteritems():
outstr.append('%s <additional %s="%s" />' % (pad, self.label_additional[key], value))
outstr.append('%s <modifiers>%s' % (pad, line_break))
for mod in self.modifiers:
outstr.append(mod.to_xml(pretty))
outstr.append('%s </modifiers>%s' % (pad, line_break))
if not self.is_leaf:
outstr.append('%s <children>%s' % (pad, line_break))
for child in self.children:
outstr.append(child.to_xml(pretty))
outstr.append('%s </children>%s' % (pad, line_break))
outstr.append('%s </node>%s' % (pad, line_break))
return ''.join(outstr)
@logAPICall
def from_xml(self, xmlnode):
""" construct node and children from XML """
self.name = get_node_attrib(xmlnode, 'attribute')
self.value = get_node_attrib(xmlnode, 'value')
self.level = int(get_node_attrib(xmlnode, 'level'))
self.weight = float(get_node_attrib(xmlnode, 'weight'))
self.count = self.weight
self.is_default = str(get_node_attrib(xmlnode, 'is_default')).upper()=='TRUE'
self.is_skipped = str(get_node_attrib(xmlnode, 'is_skipped')).upper()=='TRUE'
for add_node in xmlnode.findall('additional'):
for idx, label in enumerate(self.label_additional):
add_value = get_node_attrib(add_node, label)
if add_value != '':
self.additional[idx]=add_value
for mod_node in xmlnode.findall('modifiers/modifier'):
mod = StatisticModifier()
mod.from_xml(mod_node)
self.modifiers.append(mod)
for childnode in xmlnode.findall('children/node'):
logAPICall.log('created new child with xmlnode %s' % childnode, logAPICall.DEBUG_L2)
node = StatisticNode(self)
node.from_xml(childnode)
self.children.append(node)
# readonly methods
###########################
@logAPICall
def leaves(self, taxonomy,
with_modifier=True, order_attributes=False,
parent_nodes=None, parent_weight = 1.0):
if parent_nodes is None:
parent_nodes = []
branch_nodes = {"":[]}
branch_weights = {"":1.0}
# generate modifier branch if requested
if with_modifier:
for mod in self.modifiers:
# each modifier value will generate convolution with branch X modifiers.values
cur_branch_nodes = {}
cur_branch_weights = {}
for mod_val, mod_weight in mod.iteritems(): # loop through modifiers.values
mod_weight /= 100.0
for branch, value in branch_nodes.iteritems(): # loop through existing branches
branch_weight = branch_weights[branch]
# case that can occur are
# 1. modifier value is not None
# append modifier value and update weight
# 2. modifier value is None
# No new branch is created in this case. the weight of the branch is
# updated with modifier value
if ( mod_val is not None ): # case 1
if branch != "": # case 1.1
branch_key = branch + "|"+ mod_val
cur_branch_nodes[branch_key] = []
cur_branch_nodes[branch_key].append(mod_val)
cur_branch_weights[branch_key] = branch_weight * mod_weight
else: # case 1.2
cur_branch_nodes[mod_val] = []
cur_branch_nodes[mod_val].append(mod_val)
cur_branch_weights[mod_val] = branch_weight * mod_weight
else: # case 2
cur_branch_weights[branch] = branch_weight * mod_weight
branch_nodes = cur_branch_nodes
branch_weights = cur_branch_weights
for branch_key, nodes in branch_nodes.iteritems():
# root node (level=0) does not have taxonomy value attached
# but could still have modifier attached
added = 0
if self.level > 0:
if str(self.value) != "None":
parent_nodes.append(self.value)
added +=1
# modifier values
for node in nodes:
parent_nodes.append(node)
added +=1
weight = branch_weights[branch_key]
if (self.is_leaf):
leaf_value = taxonomy.to_string(parent_nodes, order_attributes)
yield leaf_value, parent_weight * self.weight / 100.0 * weight, self
for child in self.children:
for l in child.leaves(taxonomy, with_modifier, order_attributes,
parent_nodes, parent_weight * self.weight / 100.0 * weight):
yield l
# remove nodes
for i in range(added):
parent_nodes.pop()
# weight related methods
###########################
@logAPICall
def set_child_weights(self, weights):
"""
set weights for all children nodes
throws exception
if weights do not addup to 100
if number of children does not equal to number of weights
"""
# assert valid condition
if sum(weights) != 100:
raise StatisticNodeError('weight must added up to 100')
if len(weights) != len(self.children):
raise StatisticNodeError('number of weights does not equal number of children')
# set weight
for child, w in map(None, self.children, weights):
child.weight = w
@logAPICall
def calculate_weights(self):
"""
convert count into percentage relative to sum of count for all siblings
"""
# calculate weight for children based on count
if self.parent is not None:
if (self.parent.count != 0):
self.weight = self.count * 100.0 / self.parent.count
else:
self.weight = 0
else:
self.weight = 100
# calculate weight for attached modifiers based on count
for mod in self.modifiers:
mod.calculate_weights(self.count)
if self.is_leaf:
# update additional values
total_size = self.count # set to default for unitCost calculation
if self.additional.has_key(self.AverageSize):
total_size = self.additional[self.AverageSize]
self.additional[self.AverageSize] = float(total_size) / self.count
if self.additional.has_key(self.UnitCost):
# total_size defaults to count,
# so should not break even if AverageSize is not set
self.additional[self.UnitCost] /= total_size
# recursively travese down to all children
# will be skipped by leaf nodes
for child in self.children:
child.calculate_weights()
@logAPICall
def balance_weights(self):
"""
adjust its weights to make sure it adds up to 100%
"""
sum_weights = sum([child.weight for child in self.children])
total_children = len(self.children)
adj_factor = sum_weights / 100
for child in self.children:
if adj_factor == 0:
child.weight = 100.0 / total_children
else:
child.weight = child.weight / adj_factor
# tree modifying methods
###########################
@logAPICall
def add(self, attr_vals, parse_order, level, additional_data={}):
"""
recursively update statistic @ node and @ child nodes
using attr_val, defaults, skips at idx
"""
# increment count of current node
self.count+=1
# the ending condition for the recursive call
# NOTE: is_leaf is not used here, this process should work on a empty tree
if (len(parse_order) <= level):
# leaf node also aggregate additional data
self.increment_additonal(self.AverageSize, additional_data)
self.increment_additonal(self.UnitCost, additional_data)
return
logAPICall.log('processing %d %s' %(level, parse_order[level]), logAPICall.DEBUG)
# get value to add/update children
# NOTE: value for current node is already set by its parent
# all processing/parsing is to work on its children
attr_name = parse_order[level]
value = None
for val in attr_vals:
if val.attribute.name == attr_name:
value = val
break
# handle default cases
is_default = False
if value is None:
is_default = True
elif value is not None and (str(value) == value.attribute.default or str(value) == value.attribute.group.default):
value = None
is_default = True
logAPICall.log('\tnode:%s' %(value), logAPICall.DEBUG_L2)
child_found = False
# find children and add value/modifier
for child in self.children:
if (child.value is None and value is None) or str(child.value) == str(value):
logAPICall.log('found child with %s' % value, logAPICall.DEBUG_L2)
child_found = True
# recursive call to process next level
child.add(attr_vals, parse_order, level+1, additional_data)
return
# if no children found, then add new node for value and add modifier
if not child_found:
logAPICall.log('create new child with %s' % value, logAPICall.DEBUG_L2)
child = StatisticNode(self, attr_name, value, self.level+1, is_default, False)
self.children.append(child)
# recursive call to process next level
child.add(attr_vals, parse_order, level+1, additional_data)
return
@logAPICall
def eliminate_empty(self):
"""
traverse current tree and eliminating nodes with value=None and no sibling
"""
for child in self.children:
child.eliminate_empty()
if len(self.children) == 1:
child = self.children[0]
if child.value is None:
# eliminate
self.children = []
for grandchild in child.children:
grandchild.parent = self
self.children.append(grandchild)
grandchild.set_level_recursive(self.level+1)
del child
@logAPICall
def get_modifiers(self, max_level):
"""
generator providing access to all modifiers from node and children nodes
up to given max_level
"""
if self.level <= max_level: #and not self.is_leaf:
# own modifiers
for idx, mod in enumerate(self.modifiers):
# generator return
yield self, idx, mod
# traverse through children nodes
for child in self.children:
# recursively return children's modifiers with generator
for node, idx, mod in child.get_modifiers(max_level):
yield node, idx, mod
# else
# reached leaf or modifier from max depth level defined.
# return
@logAPICall
def delete_node(self, child):
"""
delete given node from children list, distribute its weight to
sibling nodes equally
throws exception if node is only child
"""
# assert valid condition
total_children = len(self.children)
if total_children == 1:
raise StatisticNodeError('only child. cannot be deleted')
# delete, wrap in exception to catch miss-matched children
try:
# remove child
weight = child.weight
self.children.remove(child)
total_children -= 1
# evenly distribute deleted weight to sibling
for child in self.children:
child.weight = child.weight + (weight / total_children)
except:
raise StatisticNodeError('unknown error while deleting node')
@logAPICall
def add_modifier(self, val, mod_idx=0):
""" update statistic for specified modifier """
if len(self.modifiers) <= mod_idx:
mod = StatisticModifier(self.name, self.level)
mod.update(val)
self.modifiers.append(mod)
else:
self.modifiers[mod_idx].update(val)
@logAPICall
def set_level_recursive(self, level):
"""
recursively set level of node and all children
this allows node to be attached at random level in tree
NOTE: use negative value for inc to decrease level
"""
if level <= 0:
raise StatisticNodeError('resulting level must be > 0')
self.level = level
# adjust children
if not self.is_leaf:
for child in self.children:
child.set_level_recursive(level + 1)
@logAPICall
def matches(self, node):
"""
test to see if node matches self or any descendant
"""
if self == node:
return True
if self.is_leaf:
return False
for child in self.children:
if (child.matches(node)):
return True
return False
@logAPICall
def update_children(self, attribute, values, weights):
"""
simply update children based on given values/weights without checking
for position of values
"""
if sum(weights) <> 100:
raise StatisticNodeError('weight does not equal to 100')
to_add = len(values) - len(self.children)
if to_add > 0:
# need to add more nodes
for i in range(to_add):
child = StatisticNode(self, attribute, '', self.level+1)
self.children.append(child)
elif to_add < 0:
# need to delete nodes
start=len(values)
for i in range(to_add):
self.children.remove(self.children[start+i])
# set value/weights
idx = 0
for val, weight in map(None, values, weights):
child = self.children[idx]
child.value = val
child.weight = weight
idx += 1
@logAPICall
def update_children_complex(self, attribute, values, weights):
"""
update children based on given values/weights
"""
# check to make sure given values/weights can be used to
# update node's children
# check for following conditions
# 1. weights do not add up to 100.
# FAIL, cannot update
# 2. values not changed, only weights were updated
# ALLOW, update children with new weights
# 3. new values are added
# ALLOW, add new value nodes, update all weights
# 4. some values are deleted
# ALLOW, delete child node(s), update all weights
sum_weight = 0 # for case 1
# sum up weights,
# check for added/deleted nodes
added = []
to_delete = []
for v, w in map(None, values, weights):
# check deleted
if w == 0:
to_delete.append(v)
continue
# update sum
sum_weight += w
# check added
child_found = False
for child in self.children:
if child.value == v:
child_found = True
if not child_found:
added.append(v)
# find additional child nodes already deleted
for child in self.children:
try:
values.index(child.value)
except:
if len(added) > 0:
# reuse to_delete to host the to_add
# this is can help in case a value is changed to another one
# the children of the node to delete can still be preserved
child.value = added[0]
added.remove(child.value)
else:
# nothing to add, remove the children
to_delete.append(child.value)
# case 1
if sum_weight <> 100:
raise StatisticNodeError('weight does not equal to 100')
# case 3, new values added
for v in added:
child = StatisticNode(self, attribute, v, self.level+1)
self.children.append(child)
# case 4, some values are deleted
for v in to_delete:
for child in self.children:
if child.value == v:
self.delete_node(child)
# after changes to node, the resulting
# case 2, only weight update needed
for child in self.children:
try:
weight = weights[values.index(child.value)]
except:
weight = 0
child.weight = weight
child.count = weight
def set_modifier(self, modidx, modifier):
"""
set modifier to given modidx if modidx is valid,
otherwise, add modifier as new modifier to node's list
"""
if modidx >= 0 and modidx < len(self.modifiers):
self.modifiers[modidx] = modifier
else:
self.modifiers.append(modifier)
def update_modifier(self, values, weights, modidx=-1):
"""
update node's (modidx) modifier with given values/weights list
raise exception if modidx is not valid index.
if no modidx is given as input, a new modifier is created and attached to
the node
"""
# check to make sure given values/weights can be used to
# update node's modifier
# check for following conditions
# 1. weights do not add up to 100.
# FAIL, cannot update
# 2. modidx exceed max index of node's modifier list
# FAIL, cannot update
# 3. modidx is negative,
# ALLOW, add new modifier with given values/weight
# 4. modidx is correct index for node's modifier list
# ALLOW, update new
# test case 1
if sum(weights) <> 100:
raise StatisticNodeError('weight does not equal to 100')
# test case 2
if len(self.modifiers) <= modidx:
raise StatisticNodeError('modifier with index %s does not exist' % modidx)
mod = StatisticModifier("User", self.level)
for v, w in map(None, values, weights):
mod.values[v]=w
#mod = {}
#for v, w in map(None, values, weights):
# mod[v]=w
if modidx < 0:
# case 3
self.modifiers.append(mod)
else:
# case 4
self.modifiers[modidx] = mod
def remove_modifier(self, modidx):
"""
remove node's (modidx) modifier.
raise exception if modidx is not valid index
"""
if modidx < 0 or len(self.modifiers) <= modidx:
raise StatisticNodeError('modifier with index %s does not exist' % modidx)
del self.modifiers[modidx]
def increment_additonal(self, key, values):
if values.has_key(key):
if not self.additional.has_key(key):
self.additional[key]=0
self.additional[key]+= values[key]
def set_additional(self, key, value):
if self.is_leaf:
self.additional[key]=value
else:
for child in self.children:
child.set_additional(key, value)
def get_additional(self, key):
return self.additional[key] if self.additional.has_key(key) else ''
def get_additional_float(self, key):
try:
return float(self.additional[key])
except:
return 0
|
agpl-3.0
| -2,564,562,895,248,160,300
| 36.802853
| 123
| 0.516396
| false
| 4.628655
| false
| false
| false
|
shendo/websnort
|
tests/test_config.py
|
1
|
1499
|
# Websnort - Web service for analysing pcap files with snort
# Copyright (C) 2013-2015 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from websnort.config import Config
def test_config():
conf = Config('websnort.conf.complex')
assert len(conf.modules) == 3
assert conf.modules.get('snort_community')
assert conf.modules.get('snort_vrt')
assert conf.modules.get('suricata_et')
assert conf.modules['snort_community']['name'] == 'snort_community'
assert conf.modules['snort_community']['module'] == 'snort'
assert conf.modules['snort_community']['path'] == 'snort'
assert conf.modules['snort_community']['ruleset'] == 'Community Rules'
assert conf.modules['snort_community']['config'] == '/etc/snort/snort.conf'
assert not conf.modules['snort_community']['extra_args']
|
gpl-3.0
| -6,451,567,111,327,162,000
| 41.828571
| 79
| 0.728486
| false
| 3.728856
| false
| false
| false
|
openmotics/gateway-frontend
|
tools/json-sorter.py
|
1
|
1571
|
#!/usr/bin/env python
# Copyright (C) 2016 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if __name__ == '__main__':
import os
import sys
import json
if len(sys.argv) != 2:
print('Usage: ./tools/json-sorter.py <path to json file>')
print('Example: ./tools/json-sorter.py ./src/locale/en/translation.json')
sys.exit(1)
path = sys.argv[1]
if os.path.exists(path):
with open(path, 'r+', encoding='utf8') as json_file:
try:
contents = json.load(json_file)
json_file.seek(0)
contents = json.dumps(contents, indent=4, sort_keys=True, ensure_ascii=False)
json_file.write('{0}\n'.format(contents))
json_file.truncate()
except Exception as ex:
print('Error processing file: {0}'.format(ex))
else:
print('Path "{0}" does not exist'.format(path))
sys.exit(1)
|
agpl-3.0
| -7,325,323,666,220,852,000
| 38.275
| 93
| 0.63972
| false
| 3.841076
| false
| false
| false
|
jacob-ogre/excel2text
|
excel2text.py
|
1
|
2724
|
#! /usr/bin/python
# excel2text.py
# A simple program to convert Excel files to text with user-defined delimiters.
#
# Copyright (C) 2013 copyright Jacob Malcom, jacob.malcom@utexas.edu
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import sys
import xlrd
def main():
"""
Convert Excel files to text.
USAGE:
excel2text <infile> <delimiter>
ARGS:
infile, an Excel workbook of .xls or .xlsx format
delimiter, one of 'tab', 'comma', or 'space'
RETURNS:
One text file per worksheet in the infile
COMMENTS:
Writes one output file per worksheet (tab) with user-defined field
delimiters, with file base name from the worksheet name. The file suffix
is .csv (delimiter= 'comma'), .tab (= 'tab'), or .txt (= 'space').
"""
delim, suffix = get_suffix_delim()
outbase = infile.split(".")[0] + "_files/"
if not os.path.exists(outbase):
os.mkdir(outbase)
process_file(suffix, delim, outbase)
def process_file(suffix, delim, outbase):
"""Read Excel row-by-row and write each sheet to file."""
fil = xlrd.open_workbook(infile)
for sheet in fil.sheet_names():
cur_sheet = fil.sheet_by_name(sheet)
new_fil = outbase + sheet + suffix
with open(new_fil, 'wb') as out:
for rown in range(cur_sheet.nrows):
to_write = []
for cel in range(len(cur_sheet.row(rown))):
to_write.append(cur_sheet.cell_value(rown,cel))
to_write = [str(x) for x in to_write]
out.write(delim.join(to_write) + "\n")
def get_suffix_delim():
"""Return delimiter and file suffix given argv."""
if delimiter == "tab":
return "\t", ".tab"
elif delimiter == "comma":
return ",", ".csv"
elif delimiter == "space":
return " ", ".txt"
else:
print "Please use 'tab', 'comma' or 'space' as delimiters."
sys.exit(2)
if __name__ == '__main__':
if len(sys.argv) != 3:
print main.__doc__
sys.exit()
infile = sys.argv[1]
delimiter = str(sys.argv[2])
main()
|
gpl-2.0
| 776,733,743,265,624,700
| 33.923077
| 80
| 0.626652
| false
| 3.696065
| false
| false
| false
|
bzshang/yelp-photo-classification
|
scripts/get_image_features.py
|
1
|
2060
|
"""
Extract image features from next to last layer (global_pool)
"""
__author__ = 'bshang'
import numpy as np
import h5py
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
import sys
sys.path.append('/home/ubuntu/yelp/mxnet/python')
import mxnet as mx
MODEL = 'inception-v3'
MODEL_PATH = '/data/checkpoint/{0}'.format(MODEL)
LAYER = 'global_pool_output'
NUM_EPOCH = 30
TEST = False
if TEST:
FEATURES_PATH = '/data/test_image_features_{0}_{1}_{2}.h5'.format(MODEL, LAYER, NUM_EPOCH)
REC_FILE = '/data/rec/test_imgs.rec'
LAB_FILE = '/data/rec/test_imgs.lst'
else:
FEATURES_PATH = '/data/train_image_features_{0}_{1}_{2}.h5'.format(MODEL, LAYER, NUM_EPOCH)
REC_FILE = '/data/rec/train_imgs.rec'
LAB_FILE = '/data/rec/train_imgs.lst'
f = h5py.File(FEATURES_PATH, 'w')
filenames = f.create_dataset('pids', (0,), maxshape=(None,))
feature = f.create_dataset('feature', (0, 2048), maxshape=(None, 2048)) # 2048 features in global_pool
f.close()
with open(LAB_FILE, 'r') as f:
pids = [line.split('\t')[-1].split('.')[0] for line in f]
with h5py.File(FEATURES_PATH, 'r+') as f:
f['pids'].resize((len(pids),))
f['pids'][0: len(pids)] = np.array(pids, dtype=np.int64)
model = mx.model.FeedForward.load(MODEL_PATH, NUM_EPOCH, ctx=mx.gpu())
fea_symbol = model.symbol.get_internals()[LAYER]
feature_extractor = mx.model.FeedForward(
ctx=mx.gpu(),
symbol=fea_symbol,
arg_params=model.arg_params,
aux_params=model.aux_params,
allow_extra_params=True)
model_iter = mx.io.ImageRecordIter(
path_imgrec = REC_FILE,
mean_r = 117,
mean_g = 117,
mean_b = 117,
data_shape = (3, 299, 299),
batch_size = 32,
rand_crop = False,
rand_mirror = False,
path_imglist= LAB_FILE,
label_width = 9
)
features = feature_extractor.predict(model_iter)
features = features[:, :, 0, 0]
with h5py.File(FEATURES_PATH, 'r+') as f:
f['feature'].resize((features.shape[0], features.shape[1]))
f['feature'][0: features.shape[0], :] = features
|
apache-2.0
| -2,569,413,815,894,601,700
| 26.837838
| 103
| 0.647573
| false
| 2.77628
| false
| false
| false
|
LukeCarrier/py3k-pexpect
|
tools/websync.py
|
1
|
1347
|
#!/usr/bin/env python
# I use this to keep the sourceforge pages up to date with the
# latest documentation and I like to keep a copy of the distribution
# on the web site so that it will be compatible with
# The Vaults of Parnasus which requires a direct URL link to a
# tar ball distribution. I don't advertise the package this way.
import pexpect, pyed
import getpass
import sys, os
X = getpass.getpass('Password: ')
pp_pattern=["(?i)password:", "(?i)enter passphrase for key '.*?':"]
p = pexpect.spawn ('scp -r doc/. noah@shell.sourceforge.net:/home/groups/p/pe/pexpect/htdocs/.')
p.logfile_read = sys.stdout
p.expect (pp_pattern)
p.sendline (X)
p.expect (pexpect.EOF)
print(p.before)
p = pexpect.spawn ('scp doc/clean.css doc/email.png noah@shell.sourceforge.net:/home/groups/p/pe/pexpect/htdocs/clean.css')
p.logfile_read = sys.stdout
p.expect (pp_pattern)
p.sendline (X)
p.expect (pexpect.EOF)
print(p.before)
#p = pexpect.spawn ('ssh noah@use-pr-shell1.sourceforge.net "cd htdocs;tar zxvf pexpect-doc.tgz"')
#p.logfile_read = sys.stdout
#p.expect ('password:')
#p.sendline (X)
#p.expect (pexpect.EOF)
#print p.before
p = pexpect.spawn ('scp dist/pexpect-*.tar.gz noah@shell.sourceforge.net:/home/groups/p/pe/pexpect/htdocs/.')
p.logfile_read = sys.stdout
p.expect (pp_pattern)
p.sendline (X)
p.expect (pexpect.EOF)
print(p.before)
|
mit
| 4,307,543,370,111,144,000
| 30.325581
| 123
| 0.729027
| false
| 2.853814
| false
| false
| false
|
akrzos/cfme_tests
|
cfme/tests/cloud/test_providers.py
|
1
|
10044
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# pylint: disable=W0621
import fauxfactory
import uuid
import pytest
import utils.error as error
from cfme import Credential
from cfme.exceptions import FlashMessageException
from cfme.cloud.provider import (discover, EC2Provider, wait_for_a_provider,
Provider, OpenStackProvider, prop_region)
from cfme.web_ui import fill, flash
from utils import testgen, version
from utils.providers import get_credentials_from_config
from utils.update import update
pytest_generate_tests = testgen.generate(testgen.cloud_providers, scope="function")
@pytest.mark.tier(3)
def test_empty_discovery_form_validation():
""" Tests that the flash message is correct when discovery form is empty."""
discover(None, d_type="Amazon")
ident = version.pick({version.LOWEST: 'User ID',
'5.4': 'Username'})
flash.assert_message_match('{} is required'.format(ident))
@pytest.mark.tier(3)
def test_discovery_cancelled_validation():
""" Tests that the flash message is correct when discovery is cancelled."""
discover(None, cancel=True, d_type="Amazon")
msg = version.pick(
{version.LOWEST: 'Amazon Cloud Providers Discovery was cancelled by the user',
'5.5': 'Cloud Providers Discovery was cancelled by the user'})
flash.assert_message_match(msg)
@pytest.mark.tier(3)
def test_add_cancelled_validation(request):
"""Tests that the flash message is correct when add is cancelled."""
prov = EC2Provider()
request.addfinalizer(prov.delete_if_exists)
prov.create(cancel=True)
flash.assert_message_match({
version.LOWEST: 'Add of new Cloud Provider was cancelled by the user',
'5.5': 'Add of Cloud Provider was cancelled by the user'})
@pytest.mark.tier(3)
def test_password_mismatch_validation():
cred = Credential(
principal=fauxfactory.gen_alphanumeric(5),
secret=fauxfactory.gen_alphanumeric(5),
verify_secret=fauxfactory.gen_alphanumeric(7))
discover(cred, d_type="Amazon")
flash.assert_message_match('Password/Verify Password do not match')
@pytest.mark.tier(3)
@pytest.mark.uncollect()
@pytest.mark.usefixtures('has_no_cloud_providers')
def test_providers_discovery_amazon():
amazon_creds = get_credentials_from_config('cloudqe_amazon')
discover(amazon_creds, d_type="Amazon")
flash.assert_message_match('Amazon Cloud Providers: Discovery successfully initiated')
wait_for_a_provider()
@pytest.mark.tier(3)
@pytest.mark.usefixtures('has_no_cloud_providers')
def test_provider_add_with_bad_credentials(provider):
""" Tests provider add with bad credentials
Metadata:
test_flag: crud
"""
provider.credentials['default'] = get_credentials_from_config('bad_credentials')
with error.expected('Login failed due to a bad username or password.'):
provider.create(validate_credentials=True)
@pytest.mark.tier(2)
@pytest.mark.usefixtures('has_no_cloud_providers')
def test_provider_crud(provider):
""" Tests provider add with good credentials
Metadata:
test_flag: crud
"""
provider.create()
provider.validate_stats(ui=True)
old_name = provider.name
with update(provider):
provider.name = str(uuid.uuid4()) # random uuid
with update(provider):
provider.name = old_name # old name
provider.delete(cancel=False)
provider.wait_for_delete()
@pytest.mark.tier(3)
def test_type_required_validation(request, soft_assert):
"""Test to validate type while adding a provider"""
prov = Provider()
request.addfinalizer(prov.delete_if_exists)
if version.current_version() < "5.5":
with error.expected('Type is required'):
prov.create()
else:
pytest.sel.force_navigate("clouds_provider_new")
fill(prov.properties_form.name_text, "foo")
soft_assert("ng-invalid-required" in prov.properties_form.type_select.classes)
soft_assert(not prov.add_provider_button.can_be_clicked)
@pytest.mark.tier(3)
def test_name_required_validation(request):
"""Tests to validate the name while adding a provider"""
prov = EC2Provider(
name=None,
region='us-east-1')
request.addfinalizer(prov.delete_if_exists)
if version.current_version() < "5.5":
with error.expected("Name can't be blank"):
prov.create()
else:
# It must raise an exception because it keeps on the form
with error.expected(FlashMessageException):
prov.create()
assert prov.properties_form.name_text.angular_help_block == "Required"
@pytest.mark.tier(3)
def test_region_required_validation(request, soft_assert):
"""Tests to validate the region while adding a provider"""
prov = EC2Provider(
name=fauxfactory.gen_alphanumeric(5),
region=None)
request.addfinalizer(prov.delete_if_exists)
if version.current_version() < "5.5":
with error.expected('Region is not included in the list'):
prov.create()
else:
with error.expected(FlashMessageException):
prov.create()
soft_assert(
"ng-invalid-required" in prov.properties_form.region_select.classes)
@pytest.mark.tier(3)
def test_host_name_required_validation(request):
"""Test to validate the hostname while adding a provider"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=None,
ip_address=fauxfactory.gen_ipaddr(prefix=[10]))
request.addfinalizer(prov.delete_if_exists)
if version.current_version() < "5.5":
with error.expected("Host Name can't be blank"):
prov.create()
else:
# It must raise an exception because it keeps on the form
with error.expected(FlashMessageException):
prov.create()
assert prov.properties_form.hostname_text.angular_help_block == "Required"
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() > '5.4')
def test_ip_address_required_validation(request):
"""Test to validate the ip address while adding a provider"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(5),
ip_address=None)
request.addfinalizer(prov.delete_if_exists)
with error.expected("IP Address can't be blank"):
prov.create()
@pytest.mark.tier(3)
def test_api_port_blank_validation(request):
"""Test to validate blank api port while adding a provider"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(5),
ip_address=fauxfactory.gen_ipaddr(prefix=[10]),
api_port='')
request.addfinalizer(prov.delete_if_exists)
if version.current_version() < "5.5":
prov.create()
else:
# It must raise an exception because it keeps on the form
with error.expected(FlashMessageException):
prov.create()
assert prov.properties_form.api_port.angular_help_block == "Required"
@pytest.mark.tier(3)
def test_user_id_max_character_validation():
cred = Credential(principal=fauxfactory.gen_alphanumeric(51))
discover(cred, d_type="Amazon")
@pytest.mark.tier(3)
def test_password_max_character_validation():
password = fauxfactory.gen_alphanumeric(51)
cred = Credential(
principal=fauxfactory.gen_alphanumeric(5),
secret=password,
verify_secret=password)
discover(cred, d_type="Amazon")
@pytest.mark.tier(3)
def test_name_max_character_validation(request):
"""Test to validate max character for name field"""
prov = EC2Provider(
name=fauxfactory.gen_alphanumeric(255),
region='us-east-1')
request.addfinalizer(prov.delete_if_exists)
prov.create()
@pytest.mark.tier(3)
def test_hostname_max_character_validation(request):
"""Test to validate max character for hostname field"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(255),
ip_address=fauxfactory.gen_ipaddr(prefix=[10]))
request.addfinalizer(prov.delete_if_exists)
prov.create()
@pytest.mark.tier(3)
def test_ip_max_valid_character_validation(request):
"""Test to validate max character for ip address field with valid ip address"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(5),
ip_address=fauxfactory.gen_ipaddr(prefix=[10]))
request.addfinalizer(prov.delete_if_exists)
prov.create()
@pytest.mark.tier(3)
def test_ip_max_invalid_character_validation(request):
"""Test to validate max character for ip address field using random string"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(5),
ip_address=fauxfactory.gen_alphanumeric(15))
request.addfinalizer(prov.delete_if_exists)
prov.create()
@pytest.mark.tier(3)
def test_api_port_max_character_validation(request):
"""Test to validate max character for api port field"""
prov = OpenStackProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(5),
ip_address=fauxfactory.gen_ipaddr(prefix=[10]),
api_port=fauxfactory.gen_alphanumeric(15))
request.addfinalizer(prov.delete_if_exists)
prov.create()
@pytest.mark.tier(3)
@pytest.mark.uncollectif(lambda: version.current_version() < "5.5")
@pytest.mark.meta(blockers=[1278036])
def test_openstack_provider_has_api_version():
"""Check whether the Keystone API version field is present for Openstack."""
prov = Provider()
pytest.sel.force_navigate("clouds_provider_new")
fill(prop_region.properties_form, {"type_select": "OpenStack"})
pytest.sel.wait_for_ajax()
assert pytest.sel.is_displayed(
prov.properties_form.api_version), "API version select is not visible"
|
gpl-2.0
| 2,334,746,530,047,233,000
| 32.704698
| 90
| 0.692055
| false
| 3.704906
| true
| false
| false
|
ldamewood/figures
|
scripts/interpolate.py
|
1
|
5456
|
import numpy
__all__ = [
"interpolate",
]
class interpolate:
# Convert two vectors into a normalzied coordinate system via GS orthogonalization
@staticmethod
def plane_to_cs(cs):
# Normalize vectors
cs[0] = cs[0]/numpy.linalg.norm(cs[0])
cs[1] = cs[1]/numpy.linalg.norm(cs[1])
# Orthogonalize second vector to first
cs[1] = cs[1] - cs[0] * numpy.dot(cs[0],cs[1])
# Return array with third vector orthogonal to other two
return numpy.vstack([cs,numpy.cross(cs[0],cs[1])])
# Genreate a grid on a 2d plane in 3d space
@staticmethod
def grid_intersection(plane, res, dim):
# Create grid
x, y, z = numpy.mgrid[0:res[0],0:res[1],0.:1.]
# Center grid
x -= numpy.floor(res[0]/2); y -= numpy.floor(res[1]/2)
# Scale grid
x *= 1.*dim[0]/res[0]
y *= 1.*dim[1]/res[1]
# List of points in the grid
element = numpy.array([x.flatten(),y.flatten(),z.flatten()])
# Generate coordinate system
cs = interpolate.plane_to_cs(plane)
# Rotate points to plane cs
return x,y,numpy.dot(element.T,cs)
# Linearlly interpolate 3d periodic data on a plane grid
@staticmethod
def interpolate_plane(datain, cell=[[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]],
plane = [[1.,0.,0.],[0.,0.,1.]], center = [0.5,0.5,0.5],
dim = [1.,1.], res = [100.,100.]):
# Convert to numpy
cell = numpy.array(cell)
center = numpy.array(center)
plane = numpy.array(plane)
datain = numpy.array(datain)
# Define the cell size
boxsize = max(sum(abs(cell)))
# Generate grid in Cartesian coordinates
x,y,elements = interpolate.grid_intersection(plane,res,dim)
# Scale the coordinates to the size of the box
x *= boxsize; y *= boxsize; elements *= boxsize
# Rotate points to primitive cell coordinates
rr = numpy.linalg.solve(cell.T,elements.T)
# Add the center point to all elements
rr += numpy.array([center]).T.repeat(rr.shape[1],1)
# Interpolate the density on the plane
dataout = numpy.reshape(interpolate.pinterpn(datain,rr.T),res)
# Return x,y,z data
return x[:,:,0],y[:,:,0],dataout
# Interpolate regularly spaced periodic nd data at an arbitrary point.
@staticmethod
def pinterpn(datain, rr):
# Grid dimensions. e.g. [100,100,100]
grid = datain.shape
# Number of grid dimensions. e.g. 3
dim = len(grid)
# Number of points to interpolate. e.g. rr is 100x3
nelem = rr.shape[0]
# Dimension of points should agree with grid dimension.
assert rr.shape[1] == dim
# Force rr to be between 0 and 1
rr = numpy.mod(numpy.mod(rr, 1) + 1, 1)
# allocate space for the results
data = numpy.zeros((nelem, 1),dtype=datain.dtype);
# dimmatrix is a nelem list of the grid dimentions.
# Ex: [[100,100,100],
# [100,100,100],
# ...
# [100,100,100]]
dimmatrix = numpy.tile(grid, (nelem, 1))
# ir and pr are zero-indexed integers that define two opposite corners of an
# n-dimensional box with density points defined at all of the corners given
# by datain. First, scale rr points to the size of the grid and then round
# to smallest integer to define the "lower corner".
ir = numpy.fix(numpy.dot(rr, numpy.diag(grid)))
# Find the "higher corner" by adding 1 to each lower corner and then wrapping
# the edges back to zero
pr = numpy.mod(ir + 1, dimmatrix)
# Check if any upper corners are on the boundary,
idx = (pr == dimmatrix)
# and wrap the boundary points back to zero.
pr[idx] =- dimmatrix[idx]
# xr is a vector from the lower corner of a box to the position of
# the interpolation point.
xr = numpy.dot(rr, numpy.diag(grid)) - ir
# Iterator over the 2^d corners of each box
corners = range(2 ** dim)
# Iterator over the dimensions of the space
dims = range(dim)
# Distance to nearest edge in each dimension. Initialize to zero
r = dim * [0]
# Lower and upper corners
ir = numpy.array([ir, pr])
# Distance weight factors on lower and upper corners
xr = numpy.array([1 - xr, xr])
# Loop over each position
for j in range(nelem):
# Initialize density value to zero
denval = 0
# Loop over each corner
# Add up the contribution from each corner weighted
# by the distance to that corner
for corner in corners:
x = 1
# loop over each dimension
for dim in dims:
# determine if this corner is low or high,
lohi = corner % 2
# and remove last bit.
corner /= 2
# distance weight factor
x *= xr[lohi, j, dim]
# nearest edge
r[dim] = ir[lohi, j, dim]
denval += x * datain[tuple(r)]
data[j] = denval
return data
|
mit
| -1,390,636,003,431,998,700
| 37.978571
| 86
| 0.548021
| false
| 3.92518
| false
| false
| false
|
sileht/deb-openstack-keystone
|
keystone/identity/core.py
|
1
|
21922
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
import uuid
import urllib
import urlparse
from keystone import config
from keystone import exception
from keystone import policy
from keystone import token
from keystone.common import manager
from keystone.common import wsgi
CONF = config.CONF
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
class Driver(object):
"""Interface description for an Identity driver."""
def authenticate(self, user_id=None, tenant_id=None, password=None):
"""Authenticate a given user, tenant and password.
Returns: (user, tenant, metadata).
"""
raise exception.NotImplemented()
def get_tenant(self, tenant_id):
"""Get a tenant by id.
Returns: tenant_ref or None.
"""
raise exception.NotImplemented()
def get_tenant_by_name(self, tenant_name):
"""Get a tenant by name.
Returns: tenant_ref or None.
"""
raise exception.NotImplemented()
def get_user(self, user_id):
"""Get a user by id.
Returns: user_ref or None.
"""
raise exception.NotImplemented()
def get_user_by_name(self, user_name):
"""Get a user by name.
Returns: user_ref or None.
"""
raise exception.NotImplemented()
def get_role(self, role_id):
"""Get a role by id.
Returns: role_ref or None.
"""
raise exception.NotImplemented()
def list_users(self):
"""List all users in the system.
NOTE(termie): I'd prefer if this listed only the users for a given
tenant.
Returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented()
def list_roles(self):
"""List all roles in the system.
Returns: a list of role_refs or an empty list.
"""
raise exception.NotImplemented()
# NOTE(termie): seven calls below should probably be exposed by the api
# more clearly when the api redesign happens
def add_user_to_tenant(self, tenant_id, user_id):
raise exception.NotImplemented()
def remove_user_from_tenant(self, tenant_id, user_id):
raise exception.NotImplemented()
def get_all_tenants(self):
raise exception.NotImplemented()
def get_tenants_for_user(self, user_id):
"""Get the tenants associated with a given user.
Returns: a list of tenant ids.
"""
raise exception.NotImplemented()
def get_roles_for_user_and_tenant(self, user_id, tenant_id):
"""Get the roles associated with a user within given tenant.
Returns: a list of role ids.
"""
raise exception.NotImplemented()
def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id):
"""Add a role to a user within given tenant."""
raise exception.NotImplemented()
def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id):
"""Remove a role from a user within given tenant."""
raise exception.NotImplemented()
# user crud
def create_user(self, user_id, user):
raise exception.NotImplemented()
def update_user(self, user_id, user):
raise exception.NotImplemented()
def delete_user(self, user_id):
raise exception.NotImplemented()
# tenant crud
def create_tenant(self, tenant_id, tenant):
raise exception.NotImplemented()
def update_tenant(self, tenant_id, tenant):
raise exception.NotImplemented()
def delete_tenant(self, tenant_id, tenant):
raise exception.NotImplemented()
# metadata crud
def get_metadata(self, user_id, tenant_id):
raise exception.NotImplemented()
def create_metadata(self, user_id, tenant_id, metadata):
raise exception.NotImplemented()
def update_metadata(self, user_id, tenant_id, metadata):
raise exception.NotImplemented()
def delete_metadata(self, user_id, tenant_id, metadata):
raise exception.NotImplemented()
# role crud
def create_role(self, role_id, role):
raise exception.NotImplemented()
def update_role(self, role_id, role):
raise exception.NotImplemented()
def delete_role(self, role_id):
raise exception.NotImplemented()
class PublicRouter(wsgi.ComposableRouter):
def add_routes(self, mapper):
tenant_controller = TenantController()
mapper.connect('/tenants',
controller=tenant_controller,
action='get_tenants_for_token',
conditions=dict(methods=['GET']))
class AdminRouter(wsgi.ComposableRouter):
def add_routes(self, mapper):
# Tenant Operations
tenant_controller = TenantController()
mapper.connect('/tenants',
controller=tenant_controller,
action='get_all_tenants',
conditions=dict(method=['GET']))
mapper.connect('/tenants/{tenant_id}',
controller=tenant_controller,
action='get_tenant',
conditions=dict(method=['GET']))
# User Operations
user_controller = UserController()
mapper.connect('/users/{user_id}',
controller=user_controller,
action='get_user',
conditions=dict(method=['GET']))
# Role Operations
roles_controller = RoleController()
mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles',
controller=roles_controller,
action='get_user_roles',
conditions=dict(method=['GET']))
mapper.connect('/users/{user_id}/roles',
controller=user_controller,
action='get_user_roles',
conditions=dict(method=['GET']))
class TenantController(wsgi.Application):
def __init__(self):
self.identity_api = Manager()
self.policy_api = policy.Manager()
self.token_api = token.Manager()
super(TenantController, self).__init__()
def get_all_tenants(self, context, **kw):
"""Gets a list of all tenants for an admin user."""
self.assert_admin(context)
tenant_refs = self.identity_api.get_tenants(context)
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_tenant_list(tenant_refs, **params)
def get_tenants_for_token(self, context, **kw):
"""Get valid tenants for token based on token used to authenticate.
Pulls the token from the context, validates it and gets the valid
tenants for the user in the token.
Doesn't care about token scopedness.
"""
try:
token_ref = self.token_api.get_token(context=context,
token_id=context['token_id'])
except exception.NotFound:
raise exception.Unauthorized()
user_ref = token_ref['user']
tenant_ids = self.identity_api.get_tenants_for_user(
context, user_ref['id'])
tenant_refs = []
for tenant_id in tenant_ids:
tenant_refs.append(self.identity_api.get_tenant(
context=context,
tenant_id=tenant_id))
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_tenant_list(tenant_refs, **params)
def get_tenant(self, context, tenant_id):
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
tenant = self.identity_api.get_tenant(context, tenant_id)
if tenant is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
return {'tenant': tenant}
# CRUD Extension
def create_tenant(self, context, tenant):
tenant_ref = self._normalize_dict(tenant)
self.assert_admin(context)
tenant_id = (tenant_ref.get('id')
and tenant_ref.get('id')
or uuid.uuid4().hex)
tenant_ref['id'] = tenant_id
tenant = self.identity_api.create_tenant(
context, tenant_id, tenant_ref)
return {'tenant': tenant}
def update_tenant(self, context, tenant_id, tenant):
self.assert_admin(context)
if self.identity_api.get_tenant(context, tenant_id) is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
tenant_ref = self.identity_api.update_tenant(
context, tenant_id, tenant)
return {'tenant': tenant_ref}
def delete_tenant(self, context, tenant_id, **kw):
self.assert_admin(context)
if self.identity_api.get_tenant(context, tenant_id) is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
self.identity_api.delete_tenant(context, tenant_id)
def get_tenant_users(self, context, tenant_id, **kw):
self.assert_admin(context)
if self.identity_api.get_tenant(context, tenant_id) is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
user_refs = self.identity_api.get_tenant_users(context, tenant_id)
return {'users': user_refs}
def _format_tenant_list(self, tenant_refs, **kwargs):
marker = kwargs.get('marker')
page_idx = 0
if marker is not None:
for (marker_idx, tenant) in enumerate(tenant_refs):
if tenant['id'] == marker:
# we start pagination after the marker
page_idx = marker_idx + 1
break
else:
msg = 'Marker could not be found'
raise exception.ValidationError(message=msg)
limit = kwargs.get('limit')
if limit is not None:
try:
limit = int(limit)
if limit < 0:
raise AssertionError()
except (ValueError, AssertionError):
msg = 'Invalid limit value'
raise exception.ValidationError(message=msg)
tenant_refs = tenant_refs[page_idx:limit]
for x in tenant_refs:
if 'enabled' not in x:
x['enabled'] = True
o = {'tenants': tenant_refs,
'tenants_links': []}
return o
class UserController(wsgi.Application):
def __init__(self):
self.identity_api = Manager()
self.policy_api = policy.Manager()
self.token_api = token.Manager()
super(UserController, self).__init__()
def get_user(self, context, user_id):
self.assert_admin(context)
user_ref = self.identity_api.get_user(context, user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
return {'user': user_ref}
def get_users(self, context):
# NOTE(termie): i can't imagine that this really wants all the data
# about every single user in the system...
self.assert_admin(context)
user_refs = self.identity_api.list_users(context)
return {'users': user_refs}
# CRUD extension
def create_user(self, context, user):
user = self._normalize_dict(user)
self.assert_admin(context)
tenant_id = user.get('tenantId', None)
if (tenant_id is not None
and self.identity_api.get_tenant(context, tenant_id) is None):
raise exception.TenantNotFound(tenant_id=tenant_id)
user_id = uuid.uuid4().hex
user_ref = user.copy()
user_ref['id'] = user_id
new_user_ref = self.identity_api.create_user(
context, user_id, user_ref)
if tenant_id:
self.identity_api.add_user_to_tenant(context, tenant_id, user_id)
return {'user': new_user_ref}
def update_user(self, context, user_id, user):
# NOTE(termie): this is really more of a patch than a put
self.assert_admin(context)
if self.identity_api.get_user(context, user_id) is None:
raise exception.UserNotFound(user_id=user_id)
user_ref = self.identity_api.update_user(context, user_id, user)
return {'user': user_ref}
def delete_user(self, context, user_id):
self.assert_admin(context)
if self.identity_api.get_user(context, user_id) is None:
raise exception.UserNotFound(user_id=user_id)
self.identity_api.delete_user(context, user_id)
def set_user_enabled(self, context, user_id, user):
return self.update_user(context, user_id, user)
def set_user_password(self, context, user_id, user):
return self.update_user(context, user_id, user)
def update_user_tenant(self, context, user_id, user):
"""Update the default tenant."""
# ensure that we're a member of that tenant
tenant_id = user.get('tenantId')
self.identity_api.add_user_to_tenant(context, tenant_id, user_id)
return self.update_user(context, user_id, user)
class RoleController(wsgi.Application):
def __init__(self):
self.identity_api = Manager()
self.token_api = token.Manager()
self.policy_api = policy.Manager()
super(RoleController, self).__init__()
# COMPAT(essex-3)
def get_user_roles(self, context, user_id, tenant_id=None):
"""Get the roles for a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant ID required')
user = self.identity_api.get_user(context, user_id)
if user is None:
raise exception.UserNotFound(user_id=user_id)
tenant = self.identity_api.get_tenant(context, tenant_id)
if tenant is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
roles = self.identity_api.get_roles_for_user_and_tenant(
context, user_id, tenant_id)
return {'roles': [self.identity_api.get_role(context, x)
for x in roles]}
# CRUD extension
def get_role(self, context, role_id):
self.assert_admin(context)
role_ref = self.identity_api.get_role(context, role_id)
if not role_ref:
raise exception.RoleNotFound(role_id=role_id)
return {'role': role_ref}
def create_role(self, context, role):
role = self._normalize_dict(role)
self.assert_admin(context)
role_id = uuid.uuid4().hex
role['id'] = role_id
role_ref = self.identity_api.create_role(context, role_id, role)
return {'role': role_ref}
def delete_role(self, context, role_id):
self.assert_admin(context)
self.get_role(context, role_id)
self.identity_api.delete_role(context, role_id)
def get_roles(self, context):
self.assert_admin(context)
roles = self.identity_api.list_roles(context)
# TODO(termie): probably inefficient at some point
return {'roles': roles}
def add_role_to_user(self, context, user_id, role_id, tenant_id=None):
"""Add a role to a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
if self.identity_api.get_user(context, user_id) is None:
raise exception.UserNotFound(user_id=user_id)
if self.identity_api.get_tenant(context, tenant_id) is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
if self.identity_api.get_role(context, role_id) is None:
raise exception.RoleNotFound(role_id=role_id)
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant
self.identity_api.add_user_to_tenant(context, tenant_id, user_id)
self.identity_api.add_role_to_user_and_tenant(
context, user_id, tenant_id, role_id)
role_ref = self.identity_api.get_role(context, role_id)
return {'role': role_ref}
def remove_role_from_user(self, context, user_id, role_id, tenant_id=None):
"""Remove a role from a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
if self.identity_api.get_user(context, user_id) is None:
raise exception.UserNotFound(user_id=user_id)
if self.identity_api.get_tenant(context, tenant_id) is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
if self.identity_api.get_role(context, role_id) is None:
raise exception.RoleNotFound(role_id=role_id)
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant, so we must follow up on that
self.identity_api.remove_role_from_user_and_tenant(
context, user_id, tenant_id, role_id)
roles = self.identity_api.get_roles_for_user_and_tenant(
context, user_id, tenant_id)
if not roles:
self.identity_api.remove_user_from_tenant(
context, tenant_id, user_id)
return
# COMPAT(diablo): CRUD extension
def get_role_refs(self, context, user_id):
"""Ultimate hack to get around having to make role_refs first-class.
This will basically iterate over the various roles the user has in
all tenants the user is a member of and create fake role_refs where
the id encodes the user-tenant-role information so we can look
up the appropriate data when we need to delete them.
"""
self.assert_admin(context)
user_ref = self.identity_api.get_user(context, user_id)
tenant_ids = self.identity_api.get_tenants_for_user(context, user_id)
o = []
for tenant_id in tenant_ids:
role_ids = self.identity_api.get_roles_for_user_and_tenant(
context, user_id, tenant_id)
for role_id in role_ids:
ref = {'roleId': role_id,
'tenantId': tenant_id,
'userId': user_id}
ref['id'] = urllib.urlencode(ref)
o.append(ref)
return {'roles': o}
# COMPAT(diablo): CRUD extension
def create_role_ref(self, context, user_id, role):
"""This is actually used for adding a user to a tenant.
In the legacy data model adding a user to a tenant required setting
a role.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
tenant_id = role.get('tenantId')
role_id = role.get('roleId')
self.identity_api.add_user_to_tenant(context, tenant_id, user_id)
self.identity_api.add_role_to_user_and_tenant(
context, user_id, tenant_id, role_id)
role_ref = self.identity_api.get_role(context, role_id)
return {'role': role_ref}
# COMPAT(diablo): CRUD extension
def delete_role_ref(self, context, user_id, role_ref_id):
"""This is actually used for deleting a user from a tenant.
In the legacy data model removing a user from a tenant required
deleting a role.
To emulate this, we encode the tenant and role in the role_ref_id,
and if this happens to be the last role for the user-tenant pair,
we remove the user from the tenant.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
role_ref_ref = urlparse.parse_qs(role_ref_id)
tenant_id = role_ref_ref.get('tenantId')[0]
role_id = role_ref_ref.get('roleId')[0]
self.identity_api.remove_role_from_user_and_tenant(
context, user_id, tenant_id, role_id)
roles = self.identity_api.get_roles_for_user_and_tenant(
context, user_id, tenant_id)
if not roles:
self.identity_api.remove_user_from_tenant(
context, tenant_id, user_id)
|
apache-2.0
| 3,092,222,762,643,436,000
| 34.937705
| 79
| 0.603686
| false
| 4.052875
| false
| false
| false
|
truedays/sandbox
|
python/auto.py
|
1
|
3096
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# CTA API CLI YAH
import sys
import requests
#from bs4 import BeautifulSoup
from xmltodict import parse
import datetime
from time import gmtime, strftime
# enable debugging
import cgitb
cgitb.enable()
# get API key from file
f = open('./.cta-api.key', 'r')
APIKEY = "?key=" + f.read(25)
f.close()
URL="http://www.ctabustracker.com/bustime/api/v1/"
apicmd = "getpredictions"
showResponse = ["stpnm","stpid","vid","rt","rtdir","prdtm"]
def getPred(localurl):
r = requests.get(localurl)
out = parse(r.text)
if "error" in out['bustime-response']:
#print "+" + str(out) + "+"
#print "localurl: " + str(localurl)
print "xXxXxXx\nERROR: " + out['bustime-response']['error']['msg']
print "xXxXxXx\n"
#sys.exit(1)
return
print "___"
# Lame safety check:
if "prdtm" in out['bustime-response']['prd']:
#print "tmpstmp: " + out['bustime-response']['prd']['tmstmp']
for x in showResponse:
print x + ": " + out['bustime-response']['prd'][x]
#out['bustime-response']['prd']:
#print key
#print x
# true == multiple predictions returned
if isinstance(out['bustime-response']['prd'], list):
for x in range(0,len(out['bustime-response']['prd'])):
if out['bustime-response']['prd'][x]:
hourNow=int(out['bustime-response']['prd'][x]['tmstmp'][9:11])
minNow=int(out['bustime-response']['prd'][x]['tmstmp'][12:14])
hourPred=int(out['bustime-response']['prd'][x]['prdtm'][9:11])
minPred=int(out['bustime-response']['prd'][x]['prdtm'][12:14])
timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)
for response in showResponse:
print response + "[" + str(x) + "]" + ": " + out['bustime-response']['prd'][x][response]
print "Minutes remaining: " + str(timeRemain)
print "___"
else:
if "tmstmp" in out['bustime-response']['prd']:
# print out['bustime-response']['prd']['tmstmp'][9:11]
# print out['bustime-response']['prd']['tmstmp'][12:14]
# print out['bustime-response']['prd']['prdtm'][9:11]
# print out['bustime-response']['prd']['prdtm'][12:14]
hourNow=int(out['bustime-response']['prd']['tmstmp'][9:11])
minNow=int(out['bustime-response']['prd']['tmstmp'][12:14])
hourPred=int(out['bustime-response']['prd']['prdtm'][9:11])
minPred=int(out['bustime-response']['prd']['prdtm'][12:14])
timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)
print "Minutes remaining: " + str(timeRemain)
print "___"
if int(strftime("%H", gmtime())) > 18 or int(strftime("%H", gmtime())) < 6 :
# Heading home
heading = "home"
stops = ["&rt=78&stpid=11401", "&rt=56&stpid=14101"]
else:
# heading to work
heading = "work"
stops = ["&rt=78&stpid=11321", "&rt=56&stpid=5586"]
#print "Content-Type: text/plain;charset=utf-8"
print
print "<html><title>Bus times - Heading to " + heading + "</title><body>"
print "<pre>"
print "hour: " + str(int(strftime("%H", gmtime())))
for stop in stops:
fullurl = URL + apicmd + APIKEY + stop
getPred(fullurl)
print "</pre>"
print """
<FORM>
<INPUT TYPE="button" onClick="history.go(0)" VALUE="Refresh">
</FORM>
"""
|
gpl-3.0
| -1,918,603,070,295,321,000
| 31.25
| 93
| 0.630814
| false
| 2.766756
| false
| false
| false
|
bslatkin/8-bits
|
appengine-ndb/ndb/blobstore_test.py
|
1
|
10271
|
"""Tests for blobstore.py."""
import cgi
import cStringIO
import datetime
import pickle
import unittest
from .google_imports import namespace_manager
from .google_imports import datastore_types
from . import blobstore
from . import model
from . import tasklets
from . import test_utils
class BlobstoreTests(test_utils.NDBTest):
def setUp(self):
super(BlobstoreTests, self).setUp()
self.testbed.init_blobstore_stub()
the_module = blobstore
def testConstants(self):
# This intentionally hardcodes the values. I'd like to know when
# they change.
self.assertEqual(blobstore.BLOB_INFO_KIND, '__BlobInfo__')
self.assertEqual(blobstore.BLOB_MIGRATION_KIND, '__BlobMigration__')
self.assertEqual(blobstore.BLOB_KEY_HEADER, 'X-AppEngine-BlobKey')
self.assertEqual(blobstore.BLOB_RANGE_HEADER, 'X-AppEngine-BlobRange')
self.assertEqual(blobstore.UPLOAD_INFO_CREATION_HEADER,
'X-AppEngine-Upload-Creation')
self.assertEqual(blobstore.MAX_BLOB_FETCH_SIZE, 1015808)
def testExceptions(self):
self.assertTrue(issubclass(blobstore.Error, Exception))
self.assertTrue(issubclass(blobstore.InternalError, blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobFetchSizeTooLargeError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobNotFoundError, blobstore.Error))
self.assertTrue(issubclass(blobstore.DataIndexOutOfRangeError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.PermissionDeniedError,
blobstore.Error))
self.assertTrue(issubclass(blobstore.BlobInfoParseError, blobstore.Error))
def create_blobinfo(self, blobkey):
"""Handcraft a dummy BlobInfo."""
b = blobstore.BlobInfo(key=model.Key(blobstore.BLOB_INFO_KIND, blobkey),
content_type='text/plain',
creation=datetime.datetime(2012, 1, 24, 8, 15, 0),
filename='hello.txt',
size=42,
md5_hash='xxx')
model.Model._put_async(b).check_success()
return b
def testBlobInfo(self):
b = self.create_blobinfo('dummy')
self.assertEqual(b._get_kind(), blobstore.BLOB_INFO_KIND)
self.assertEqual(b.key(), blobstore.BlobKey('dummy'))
self.assertEqual(b.content_type, 'text/plain')
self.assertEqual(b.creation, datetime.datetime(2012, 1, 24, 8, 15, 0))
self.assertEqual(b.filename, 'hello.txt')
self.assertEqual(b.md5_hash, 'xxx')
def testBlobInfo_PutErrors(self):
b = self.create_blobinfo('dummy')
self.assertRaises(Exception, b.put)
self.assertRaises(Exception, b.put_async)
self.assertRaises(Exception, model.put_multi, [b])
self.assertRaises(Exception, model.put_multi_async, [b])
def testBlobInfo_Get(self):
b = self.create_blobinfo('dummy')
c = blobstore.BlobInfo.get(b.key())
self.assertEqual(c, b)
self.assertTrue(c is not b)
c = blobstore.BlobInfo.get('dummy')
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobInfo_GetAsync(self):
b = self.create_blobinfo('dummy')
cf = blobstore.BlobInfo.get_async(b.key())
self.assertTrue(isinstance(cf, tasklets.Future))
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
df = blobstore.BlobInfo.get_async(str(b.key()))
self.assertTrue(isinstance(df, tasklets.Future))
d = df.get_result()
self.assertEqual(d, b)
self.assertTrue(d is not b)
def testBlobInfo_GetMulti(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
d, e = blobstore.BlobInfo.get_multi([b.key(), str(c.key())])
self.assertEqual(d, b)
self.assertEqual(e, c)
def testBlobInfo_GetMultiAsync(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
df, ef = blobstore.BlobInfo.get_multi_async([str(b.key()), c.key()])
self.assertTrue(isinstance(df, tasklets.Future))
self.assertTrue(isinstance(ef, tasklets.Future))
d, e = df.get_result(), ef.get_result()
self.assertEqual(d, b)
self.assertEqual(e, c)
def testBlobInfo_Delete(self):
b = self.create_blobinfo('dummy')
c = blobstore.get(b._key.id())
self.assertEqual(c, b)
b.delete()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobInfo_DeleteAsync(self):
b = self.create_blobinfo('dummy')
df = b.delete_async()
self.assertTrue(isinstance(df, tasklets.Future), df)
df.get_result()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_Get(self):
b = self.create_blobinfo('dummy')
c = blobstore.get(b.key())
self.assertEqual(c, b)
self.assertTrue(c is not b)
c = blobstore.get('dummy')
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobstore_GetAsync(self):
b = self.create_blobinfo('dummy')
cf = blobstore.get_async(b.key())
self.assertTrue(isinstance(cf, tasklets.Future))
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
cf = blobstore.get_async('dummy')
c = cf.get_result()
self.assertEqual(c, b)
self.assertTrue(c is not b)
def testBlobstore_Delete(self):
b = self.create_blobinfo('dummy')
blobstore.delete(b.key())
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_DeleteAsync(self):
b = self.create_blobinfo('dummy')
df = blobstore.delete_async(b.key())
self.assertTrue(isinstance(df, tasklets.Future), df)
df.get_result()
d = blobstore.get(b.key())
self.assertEqual(d, None)
def testBlobstore_DeleteMulti(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
blobstore.delete_multi([b.key(), str(c.key())])
d, e = blobstore.get_multi([b.key(), str(c.key())])
self.assertEqual(d, None)
self.assertEqual(e, None)
def testBlobstore_DeleteMultiAsync(self):
b = self.create_blobinfo('b')
c = self.create_blobinfo('c')
f = blobstore.delete_multi_async([b.key(), str(c.key())])
self.assertTrue(isinstance(f, tasklets.Future), f)
f.get_result()
d, e = blobstore.get_multi([b.key(), str(c.key())])
self.assertEqual(d, None)
self.assertEqual(e, None)
def testBlobstore_CreateUploadUrl(self):
url = blobstore.create_upload_url('/foo')
self.assertTrue('/_ah/upload/' in url, url)
def testBlobstore_CreateUploadUrlAsync(self):
urlf = blobstore.create_upload_url_async('/foo')
self.assertTrue(isinstance(urlf, tasklets.Future), urlf)
url = urlf.get_result()
self.assertTrue('/_ah/upload/' in url, url)
def testBlobstore_ParseBlobInfo_Errors(self):
nope = blobstore.parse_blob_info(None)
self.assertEqual(nope, None)
env = {'REQUEST_METHOD': 'POST'}
hdrs = {'content-disposition': 'blah; filename=hello.txt; name=hello',
'content-type': 'text/plain; blob-key=xxx'}
fd = cStringIO.StringIO(
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: hello\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: BLAH-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
self.assertRaises(blobstore.BlobInfoParseError,
blobstore.parse_blob_info, fs)
def testBlobstore_ParseBlobInfo(self):
env = {'REQUEST_METHOD': 'POST'}
hdrs = {'content-disposition': 'blah; filename=hello.txt; name=hello',
'content-type': 'text/plain; blob-key=xxx'}
fd = cStringIO.StringIO(
'Content-type: image/jpeg\n'
'Content-length: 42\n'
'X-AppEngine-Upload-Creation: 2012-01-24 17:35:00.000000\n'
'Content-MD5: eHh4\n'
'\n'
)
fs = cgi.FieldStorage(fd, headers=hdrs, environ=env)
bi = blobstore.parse_blob_info(fs)
self.assertTrue(isinstance(bi, blobstore.BlobInfo))
self.assertEqual(
bi,
blobstore.BlobInfo(key=model.Key(blobstore.BlobInfo, 'xxx'),
content_type='image/jpeg',
creation=datetime.datetime(2012, 1, 24, 17, 35),
filename='hello.txt',
md5_hash='xxx',
size=42))
def testBlobstore_FetchData(self):
self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
result = blobstore.fetch_data('xxx', 0, 3) # Range is inclusive!
self.assertEqual(result, 'abcd')
def testBlobstore_FetchDataAsync(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
fut = blobstore.fetch_data_async(b, 0, 2)
self.assertTrue(isinstance(fut, tasklets.Future), fut)
result = fut.get_result()
self.assertEqual(result, 'abc')
def testBlobInfo_Open(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
f = b.open()
self.assertEqual(f.read(3), 'abc')
self.assertEqual(f.read(3), 'de')
self.assertEqual(f.blob_info, b)
def testBlobReader(self):
b = self.create_blobinfo('xxx')
stub = self.testbed.get_stub('blobstore')
storage = stub.storage
storage._blobs['xxx'] = 'abcde'
f = blobstore.BlobReader('xxx')
self.assertEqual(f.read(), 'abcde')
self.assertEqual(f.blob_info, b)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
apache-2.0
| 4,325,438,816,037,139,000
| 33.236667
| 78
| 0.644825
| false
| 3.323948
| true
| false
| false
|
Mbrownshoes/ckanext-bcgov
|
ckanext/bcgov/logic/ofi/__init__.py
|
1
|
4636
|
# Copyright 2016, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
#
# HighwayThree Solutions Inc.
# Author: Jared Smith <jrods@github>
import logging
from functools import wraps
from pprint import pprint, pformat
import requests as reqs
from ckan.common import request
import ckan.plugins.toolkit as toolkit
import ckanext.bcgov.util.helpers as edc_h
log = logging.getLogger(u'ckanext.bcgov.logic.ofi')
def check_access(action):
"""
Decorator for call_action functions to check authorization.
Even if the call_action doesn't need any authorization checks, there should still be
a defined auth check for the call_action.
"""
@wraps(action)
def wrapper(context, data):
toolkit.check_access(action.__name__, context, data)
return action(context, data)
return wrapper
def setup_ofi_action(api_url=None):
"""
Decorator for call_action functions.
This decorator should be used last before the actual call to the
call_action function
This sets up common params and options for call_action functions.
The api_url should be used for prerequisite use only, such as getting
DWDS file formats or CRS Types, etc. It doesn't support OFI POST API calls.
:param api_url: An OFI DWDS API endpoint or NoneType
:returns: call_action function location from logic.ofi.call_action,
these args are manditory for call_actions:
def call_action(context, data, ofi_resp)
"""
def action_decorator(action):
@wraps(action)
def wrapper(context, data):
"""
Context and data are args for get_action calls
:returns: call_action function location from logic.ofi.call_action,
these args are manditory for call_actions:
def call_action(context, data, ofi_resp)
"""
if u'secure' not in data:
data[u'secure'] = False
# these action calls don't need to be the secure url
if action.__name__ in ['file_formats', 'crs_types']:
data.update(_prepare(False))
else:
data.update(_prepare(toolkit.asbool(data[u'secure'])))
if action.__name__ == 'edit_ofi_resources':
if u'package_id' not in data:
data[u'package_id'] = data.query_params.getone('package_id')
if u'object_name' not in data:
data[u'object_name'] = data.query_params.getone('object_name')
# allows the decorator to be used for just getting query params, cookies, etc.
if api_url is not None:
url = data[u'ofi_url'] + api_url
# expecting additonal pathing if incoming api endpoint ends with a '/'
if api_url.endswith(u'/'):
if 'object_name' in data:
url += data[u'object_name']
data[u'api_url'] = url
call_type = u'Secure' if data[u'secure'] else u'Public' # call_type is for logging purposes
ofi_resp = _make_api_call(url, call_type=call_type, cookies=data[u'cookies'])
else:
ofi_resp = {}
return action(context, data, ofi_resp)
return wrapper
return action_decorator
def _prepare(secure=False):
ofi_vars = {}
ofi_vars[u'config'] = edc_h.get_ofi_config()
ofi_vars[u'cookies'] = {
u'SMSESSION': request.cookies.get(u'SMSESSION', '')
}
try:
ofi_vars[u'query_params'] = request.params
except ValueError, inst:
log.info('Bad Action API request data: %s', inst)
return {}
ofi_vars[u'secure'] = secure
ofi_vars[u'ofi_url'] = edc_h._build_ofi_url(secure)
return ofi_vars
def _make_api_call(api_url, call_type='Public', cookies=None):
log.info(u'OFI outgoing, call type: %s, api url: %s', call_type, api_url)
resp = reqs.get(api_url, cookies=cookies)
_log_response(resp, call_type)
return resp
def _log_response(resp, call_type):
log.debug(u'OFI response, api response:\n %s', pformat({
u'url': resp.url,
u'status': resp.status_code,
u'reason': resp.reason,
u'headers': resp.headers,
u'cookies': resp.cookies,
u'elapsed': str(resp.elapsed.total_seconds()) + u's'
}))
log.debug(u'OFI api content: %s', pformat(resp.text))
class OFIServiceError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
agpl-3.0
| -3,168,087,227,463,341,000
| 30.537415
| 108
| 0.608067
| false
| 3.661927
| false
| false
| false
|
hasgeek/funnel
|
migrations/versions/c47007758ee6_add_email_address_active_at.py
|
1
|
1589
|
"""Add email_address.active_at.
Revision ID: c47007758ee6
Revises: b7fa6df99855
Create Date: 2020-08-20 21:47:43.356619
"""
from alembic import op
from sqlalchemy import column, table
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c47007758ee6'
down_revision = 'b7fa6df99855'
branch_labels = None
depends_on = None
class DELIVERY_STATE: # noqa: N801
SENT = 1
ACTIVE = 2
email_address = table(
'email_address',
column('id', sa.Integer()),
column('delivery_state', sa.Integer()),
column('delivery_state_at', sa.TIMESTAMP(timezone=True)),
column('active_at', sa.TIMESTAMP(timezone=True)),
)
def upgrade():
op.add_column(
'email_address',
sa.Column('active_at', sa.TIMESTAMP(timezone=True), nullable=True),
)
op.execute(
email_address.update()
.where(email_address.c.delivery_state == DELIVERY_STATE.ACTIVE)
.values(
{
'active_at': email_address.c.delivery_state_at,
'delivery_state': DELIVERY_STATE.SENT,
}
)
)
op.create_check_constraint(
'email_address_delivery_state_check',
'email_address',
'delivery_state IN (0, 1, 3, 4)',
)
def downgrade():
op.drop_constraint(
'email_address_delivery_state_check', 'email_address', type_='check'
)
op.execute(
email_address.update()
.where(email_address.c.active_at.isnot(None))
.values({'delivery_state': DELIVERY_STATE.ACTIVE})
)
op.drop_column('email_address', 'active_at')
|
agpl-3.0
| 8,731,557,630,074,718,000
| 23.446154
| 76
| 0.623033
| false
| 3.303534
| false
| false
| false
|
adrianomargarin/wttd-eventex
|
eventex/core/migrations/0007_course.py
|
1
|
1116
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-09 01:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20171008_2259'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Título')),
('start', models.TimeField(blank=True, null=True, verbose_name='Início')),
('description', models.TextField(blank=True, verbose_name='Descrição')),
('slots', models.IntegerField()),
('speakers', models.ManyToManyField(blank=True, to='core.Speaker', verbose_name='Palestrantes')),
],
options={
'verbose_name': 'Palestra',
'abstract': False,
'verbose_name_plural': 'Palestras',
},
),
]
|
gpl-3.0
| 6,264,919,628,078,854,000
| 34.870968
| 114
| 0.556655
| false
| 4.103321
| false
| false
| false
|
aswarren/GOGranny
|
tests/SteinerTreeTest.py
|
1
|
2069
|
from GOGranny import *
from networkx import *
import unittest
class TestSteinerTree(unittest.TestCase):
def stree(self, edges, voi):
g = Graph()
for edge in edges:
g.add_edge(edge[0],edge[1],weight=edge[2])
return make_steiner_tree(g, voi)
def testSteinerTrees(self):
edges = [("a", "b", 1), ("a", "c", 5), ("a", "e", 2), ("a", "d", 2), ("b", "c", 2), ("c", "d", 3), ("e", "d", 6)]
st = self.stree(edges, ['c', 'e', 'a'])
self.failUnless(st.edges(data=True) == [('a', 'b',{'weight':1}), ('a', 'e', {'weight':2}), ('c', 'b', {'weight':2})])
edges = [('a', 'b', 3), ('b', 'c', 4), ('c', 'd', 5), ('a', 'e', 1), ('e', 'd', 1)]
st = self.stree(edges, ['b', 'd'])
self.failUnless(st.edges(data=True) == [('a', 'b', {'weight':3}), ('a', 'e',{'weight':1}), ('e', 'd', {'weight':1})])
edges = [('a', 'b', 4), ('a', 'c', 4), ('b', 'c', 4)]
st = self.stree(edges, ['a', 'b', 'c'])
self.failUnless(st.edges(data=True) == [('a', 'c', {'weight':4}), ('a', 'b', {'weight':4})])
# from the markowsky paper
edges = [('v1', 'v9', 1), ('v1', 'v2', 10), ('v8', 'v9', .5), ('v9', 'v5', 1), ('v8', 'v7', .5), ('v7', 'v6', 1), ('v6', 'v5', 1), ('v2', 'v6', 1),
('v2', 'v3', 8), ('v3', 'v5', 2), ('v5', 'v4', 2), ('v3', 'v4', 9)]
st = self.stree(edges, ['v1', 'v2', 'v3', 'v4'])
self.failUnless(st.edges(data=True) == [('v1', 'v9', {'weight':1}), ('v2', 'v6', {'weight':1}), ('v3', 'v5', {'weight':2}), ('v4', 'v5', {'weight':2}), ('v5', 'v9', {'weight':1}), ('v5', 'v6', {'weight':1})])
edges = [('a', 'b', 0), ('b', 'c', 0), ('a', 'd', 3), ('b', 'd', 3), ('c', 'd', 3)]
st = self.stree(edges, ['a', 'b', 'c', 'd'])
self.failUnless(st.edges(data=True) == [('a', 'b', {'weight':0}), ('a', 'd', {'weight':3}), ('c', 'b', {'weight':0})])
edges = [('a', 'b', 0), ('b', 'c', 0), ('a', 'd', 3), ('b', 'd', 3), ('c', 'd', 3), ('d', 'e', 1)]
st = self.stree(edges, ['a', 'b', 'c', 'e'])
self.failUnless(st.edges(data=True) == [('a', 'b', {'weight':0}), ('a', 'd', {'weight':3}), ('c', 'b', {'weight':0}), ('e', 'd', {'weight':1})])
|
gpl-2.0
| -5,228,691,003,820,098,000
| 52.051282
| 209
| 0.429193
| false
| 2.217578
| false
| false
| false
|
padmashrimallapur/report
|
report.py
|
1
|
3153
|
#!/usr/local/bin/python
"""
Creating the report of top 50 domains for 30 days
It is assumed that mailing table is repopulated every day.
The report will display the top 50 domain sorting with their growth percentage.
"""
import MySQLdb
import datetime
mysqlconfig = {'host': 'localhost',
'username': 'root',
'password': 'mysqlroot',
'dbName': 'indexdb'}
def __test_insert():
conn = connection()
cur = conn.cursor()
cur.execute("TRUNCATE mailing")
conn.commit()
for i in range(0, 10):
insert = "INSERT INTO mailing (addr) VALUES ('id%s@xyz.com')" % i
cur.execute(insert)
for i in range(0, 5):
insert = "INSERT INTO mailing (addr) VALUES ('id%s@abc.com')" % i
cur.execute(insert)
for i in range(0, 25):
insert = "INSERT INTO mailing (addr) VALUES ('id%s@test.com')" % i
cur.execute(insert)
conn.commit()
def connection():
conn = MySQLdb.connect(mysqlconfig['host'],
mysqlconfig['username'],
mysqlconfig['password'],
mysqlconfig['dbName'])
return conn
def daterange():
previous_month_day = datetime.datetime.now() - datetime.timedelta(days=30)
earlier = previous_month_day.strftime("%Y-%m-%d")
current_day = datetime.datetime.now().strftime("%Y-%m-%d")
return earlier, current_day
def updateDomains(today):
conn = connection()
cursor = conn.cursor()
cursor.execute("SELECT addr FROM mailing")
for email in cursor.fetchall():
domain = email[0].split("@")[1]
cursor.execute("SELECT count FROM domain_count WHERE domain = %s AND date = %s", (domain, today))
count = cursor.fetchone()
if count is not None:
count = count[0] + 1
cursor.execute("UPDATE domain_count SET count= %s WHERE domain = %s AND date = %s", (count, domain, today))
else:
count = 1
cursor.execute("INSERT INTO domain_count (domain, count, date)VALUES (%s, %s, %s)", (domain, count, today))
conn.commit()
def printReport(fromdate, todate):
conn = connection()
cursor = conn.cursor()
cursor.execute("SELECT SUM(COUNT) FROM domain_count")
total = cursor.fetchone()[0]
cursor.execute("SELECT count,domain FROM `domain_count` WHERE date> %s or date< %s ORDER BY count DESC LIMIT 50",
(fromdate, todate))
total_domains = cursor.fetchall()
if len(total_domains) > 0:
print("Top 50 domains of last 30 days")
fmt = "{0}{1}{0}{2}{0}".format("+", "-"*14, "-"*13)
print fmt
print "{0} {1:9} {0:>4} {2:>4} {0}".format("|", "Domain", "Growth in %")
print fmt
for row in total_domains:
growth = (row[0] * 100) / total
print "{0} {1:9} {0:>4} {2:>4} {0:>7}".format("|", row[1], round(growth, 2))
else:
print "No data available"
if __name__ == '__main__':
#This is only for test insert...
__test_insert()
earlier_str, today = daterange()
updateDomains(today)
printReport(earlier_str, today)
|
gpl-2.0
| -6,065,980,197,356,007,000
| 29.028571
| 119
| 0.579448
| false
| 3.683411
| false
| false
| false
|
rjdp/cement
|
cement/utils/misc.py
|
1
|
6108
|
"""Misc utilities."""
import os
import sys
import logging
import hashlib
from textwrap import TextWrapper
from random import random
def rando(salt=None):
"""
Generate a random MD5 hash for whatever purpose. Useful for testing
or any other time that something random is required.
:param salt: Optional 'salt', if None then random() is used.
:returns: Random MD5 hash (str).
"""
if salt is None:
salt = random()
return hashlib.md5(str(salt).encode()).hexdigest()
# class NullLogger(object):
# def __init__(self, namespace, debug, *args, **kw):
# pass
# def info(self, *args, **kw):
# pass
# def warn(self, *args, **kw):
# pass
# def error(self, *args, **kw):
# pass
# def fatal(self, *args, **kw):
# pass
# def debug(self, *args, **kw):
# pass
class MinimalLogger(object):
def __init__(self, namespace, debug, *args, **kw):
self.namespace = namespace
self.backend = logging.getLogger(namespace)
formatter = logging.Formatter(
"%(asctime)s (%(levelname)s) %(namespace)s : %(message)s"
)
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.INFO)
self.backend.setLevel(logging.INFO)
# FIX ME: really don't want to hard check sys.argv like this but
# can't figure any better way get logging started (only for debug)
# before the app logging is setup.
if '--debug' in sys.argv or debug:
console.setLevel(logging.DEBUG)
self.backend.setLevel(logging.DEBUG)
self.backend.addHandler(console)
def _get_logging_kwargs(self, namespace, **kw):
if not namespace:
namespace = self.namespace
if 'extra' in kw.keys() and 'namespace' in kw['extra'].keys():
pass
elif 'extra' in kw.keys() and 'namespace' not in kw['extra'].keys():
kw['extra']['namespace'] = namespace
else:
kw['extra'] = dict(namespace=namespace)
return kw
@property
def logging_is_enabled(self):
if 'CEMENT_FRAMEWORK_LOGGING' in os.environ.keys():
if is_true(os.environ['CEMENT_FRAMEWORK_LOGGING']):
res = True
else:
res = False
else:
res = True
return res
def info(self, msg, namespace=None, **kw):
if self.logging_is_enabled:
kwargs = self._get_logging_kwargs(namespace, **kw)
self.backend.info(msg, **kwargs)
def warn(self, msg, namespace=None, **kw):
if self.logging_is_enabled:
kwargs = self._get_logging_kwargs(namespace, **kw)
self.backend.warn(msg, **kwargs)
def error(self, msg, namespace=None, **kw):
if self.logging_is_enabled:
kwargs = self._get_logging_kwargs(namespace, **kw)
self.backend.error(msg, **kwargs)
def fatal(self, msg, namespace=None, **kw):
if self.logging_is_enabled:
kwargs = self._get_logging_kwargs(namespace, **kw)
self.backend.fatal(msg, **kwargs)
def debug(self, msg, namespace=None, **kw):
if self.logging_is_enabled:
kwargs = self._get_logging_kwargs(namespace, **kw)
self.backend.debug(msg, **kwargs)
def init_defaults(*sections):
"""
Returns a standard dictionary object to use for application defaults.
If sections are given, it will create a nested dict for each section name.
:arg sections: Section keys to create nested dictionaries for.
:returns: Dictionary of nested dictionaries (sections)
:rtype: dict
.. code-block:: python
from cement.core import foundation
from cement.utils.misc import init_defaults
defaults = init_defaults('myapp', 'section2', 'section3')
defaults['myapp']['debug'] = False
defaults['section2']['foo'] = 'bar
defaults['section3']['foo2'] = 'bar2'
app = foundation.CementApp('myapp', config_defaults=defaults)
"""
defaults = dict()
for section in sections:
defaults[section] = dict()
return defaults
def minimal_logger(namespace, debug=False):
"""
Setup just enough for cement to be able to do debug logging. This is the
logger used by the Cement framework, which is setup and accessed before
the application is functional (and more importantly before the
applications log handler is usable).
:param namespace: The logging namespace. This is generally '__name__' or
anything you want.
:param debug: Toggle debug output. Default: False
:type debug: boolean
:returns: Logger object
.. code-block:: python
from cement.utils.misc import minimal_logger
LOG = minimal_logger('cement')
LOG.debug('This is a debug message')
"""
return MinimalLogger(namespace, debug)
def is_true(item):
"""
Given a value, determine if it is one of [True, 'True', 'true', 1, '1'].
:param item: The item to convert to a boolean.
:returns: True if `item` is in ``[True, 'True', 'true', 1, '1']``, False
otherwise.
:rtype: boolean
"""
if item in [True, 'True', 'true', 1, '1']:
return True
else:
return False
def wrap(text, width=77, indent='', long_words=False, hyphens=False):
"""
Wrap text for cleaner output (this is a simple wrapper around
`textwrap.TextWrapper` in the standard library).
:param text: The text to wrap
:param width: The max width of a line before breaking
:param indent: String to prefix subsequent lines after breaking
:param long_words: Break on long words
:param hyphens: Break on hyphens
:returns: str(text)
"""
if type(text) != str:
raise TypeError("`text` must be a string.")
wrapper = TextWrapper(subsequent_indent=indent, width=width,
break_long_words=long_words,
break_on_hyphens=hyphens)
return wrapper.fill(text)
|
bsd-3-clause
| -3,778,632,260,896,210,000
| 28.941176
| 78
| 0.609037
| false
| 4.007874
| false
| false
| false
|
bjuvensjo/scripts
|
vang/git/patchish_refs.py
|
1
|
3165
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from pprint import pprint
from re import match
from vang.pio.shell import run_command
from vang.pio.synchronize_dirs import synchronize_dirs
def apply_patch(patch_repo, apply_repo, ref):
print('Apply patch', ref, patch_repo, apply_repo, ref)
rc, output = run_command(f'git checkout {ref}', True, patch_repo)
print(output)
synchronize_dirs(patch_repo, apply_repo)
rc, output = run_command('git status', True, apply_repo)
print(output)
if 'nothing to commit' in output:
rc, output = run_command(f'git tag -a {ref} -m {ref}', True, apply_repo)
print(output)
else:
for cmd in [
'git add --all',
f'git commit -m {ref}',
f'git tag -a {ref} -m {ref}',
]:
rc, output = run_command(cmd, True, apply_repo)
print(output)
return ref
def get_refs(repo, ref_pattern):
return [
ref for ref in run_command('git tag', True, repo)[1].split('\n')
if match(r'{}'.format(ref_pattern), ref)
]
def get_unpatched_refs(patchs_refs, applied_refs):
return [p for p in patchs_refs if p not in applied_refs]
def is_valid(patchs_refs, applied_refs):
return all([p == a for p, a in zip(patchs_refs, applied_refs)
]) and not len(applied_refs) > len(patchs_refs)
def main(patch_repo, ref_pattern, apply_repo):
patchs_refs = get_refs(patch_repo, ref_pattern)
applied_refs = get_refs(apply_repo, ref_pattern)
if is_valid(patchs_refs, applied_refs):
unpatched_refs = get_unpatched_refs(patchs_refs, applied_refs)
applied_patches = []
for ref in unpatched_refs:
apply_patch(patch_repo, apply_repo, ref)
applied_patches.append(ref)
return applied_patches
else:
raise ValueError('Tags are not valid.')
def parse_args(args):
parser = ArgumentParser(description='Create patches of refs and applies, ' +
'commits and refs them in another repo.')
parser.add_argument('ref_pattern', help='A ref pattern.')
parser.add_argument('apply_repo', help='The repo to apply patches to.')
parser.add_argument(
'-p',
'--patch_repo',
help='The repo to patch from.',
default='.',
)
parser.add_argument(
'-o',
'--output',
help='A directory to put patches in.',
default='./patch',
)
return parser.parse_args(args)
# rm -rf sign/patch; rm -rf lf-sign; md lf-sign; cd lf-sign; git init
# main(
# '/Users/ei4577/slask/slask/PCS1806/sign',
# 'release.*',
# '/Users/ei4577/slask/slask/PCS1806/lf-sign',
# )
# rm -rf lf-process.mortgage; md lf-process.mortgage; cd lf-process.mortgage; git init
the_applied_patches = main(
'/Users/ei4577/slask/slask/PCS1806/process.mortgage',
'release.*',
'/Users/ei4577/slask/slask/PCS1806/lf-process.mortgage',
)
pprint(the_applied_patches)
# if __name__ == '__main__': # pragma: no cover
# args = parse_args(argv[1:])
# main(args.patch_repo, args.ref_pattern, args.output, args.apply_repo)
|
apache-2.0
| 6,775,928,350,807,584,000
| 30.336634
| 86
| 0.617378
| false
| 3.283195
| false
| false
| false
|
amirgeva/coide
|
system.py
|
1
|
2487
|
import utils
import re
from multiprocessing import Process, Queue
import callbacks
def libraryDirs():
out,err=utils.call('.','ld','--verbose')
return re.findall('SEARCH_DIR\("=([^"]+)"\);',out)
def listAllPackages():
res=set()
try:
all,err=utils.call('.','pkg-config','--list-all')
lines=all.splitlines()
for line in lines:
name=(line.split(' '))[0]
res.add(name)
except OSError:
pass
return sorted(list(res))
def symbolScan(q,ws):
import symbolscanner
q.put(symbolscanner.getLibrarySymbols(ws))
noMP=False
scanq=Queue()
workspacePath=''
scannerProcess=None
scanStarted=False
libSyms=None
wsSyms=None
wsLibs=None
def isScannerDone():
if scanq:
return not scanq.empty()
return True
def disableSymbolScan():
global libSyms
global wsSyms
global wsLibs
global scanq
libSyms={}
wsSyms={}
wsLibs={}
scanq=None
def startSymbolScan(ws):
utils.timestamp('start scan process')
if not noMP:
global scannerProcess
global scanStarted
global workspacePath
if scanq and not scanStarted:
scanStarted=True
workspacePath=ws
scannerProcess=Process(target=symbolScan,args=(scanq,workspacePath))
scannerProcess.start()
else:
global libSyms
global wsSyms
global wsLibs
import symbolscanner
(libSyms,wsSyms,wsLibs)=symbolscanner.getLibrarySymbols(workspacePath)
def getLibrarySymbols():
global libSyms
global wsSyms
global wsLibs
global scannerProcess
global scanq
if not libSyms:
if not scanq:
libSyms={}
wsSyms={}
wsLibs={}
else:
utils.timestamp('Getting scan results from queue')
(libSyms,wsSyms,wsLibs)=scanq.get()
utils.timestamp('Done queue get')
if scannerProcess:
utils.timestamp('Joining scan process')
scannerProcess.join()
utils.timestamp('Done join')
scannerProcess=None
if scanq:
scanq.close()
scanq=None
import symbolscanner
symbolscanner.setInitialResults(workspacePath,libSyms,wsSyms,wsLibs)
return libSyms
def getWorkspaceSymbols():
getLibrarySymbols()
return wsSyms
callbacks.closeCallbacks.append(getLibrarySymbols)
if __name__=='__main__':
getLibrarySymbols()
|
gpl-2.0
| 7,818,529,345,900,477,000
| 22.471698
| 80
| 0.622437
| false
| 3.966507
| false
| false
| false
|
Workday/OpenFrame
|
tools/telemetry/telemetry/internal/platform/profiler/android_profiling_helper_unittest.py
|
1
|
7180
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pickle
import re
import shutil
import tempfile
import time
import unittest
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.platform.profiler import android_profiling_helper
from telemetry.testing import simple_mock
from telemetry.testing import tab_test_case
def _GetLibrariesMappedIntoProcesses(device, pids):
libs = set()
for pid in pids:
maps_file = '/proc/%d/maps' % pid
maps = device.ReadFile(maps_file, as_root=True).splitlines()
for map_line in maps:
lib = re.match(r'.*\s(/.*[.]so)$', map_line)
if lib:
libs.add(lib.group(1))
return libs
class TestFileMetadataMatches(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename_a = os.path.join(self.tempdir, 'filea')
self.filename_b = os.path.join(self.tempdir, 'fileb')
with open(self.filename_a, 'w') as f:
f.write('testing')
def tearDown(self):
shutil.rmtree(self.tempdir)
def testDoesntMatchNonExistant(self):
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchJustExistence(self):
with open(self.filename_b, 'w') as f:
f.write('blah')
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchCopy(self):
# This test can run so fast that the file system doesn't have enough
# accuracy to differentiate between the copy and initial file times.
# Hence we need to guarantee a delay here.
time.sleep(3)
shutil.copy(self.filename_a, self.filename_b)
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testMatchesAfterCopy2(self):
shutil.copy2(self.filename_a, self.filename_b)
self.assertTrue(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchAfterCopy2ThenModify(self):
shutil.copy2(self.filename_a, self.filename_b)
filea = open(self.filename_a, 'w')
filea.write('moar testing!')
filea.close()
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testDoesntMatchAfterCopy2ThenModifyStats(self):
shutil.copy2(self.filename_a, self.filename_b)
os.utime(self.filename_a, (20, 20))
self.assertFalse(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
def testMatchesAfterCopyStatWithDifferentContent(self):
fileb = open(self.filename_b, 'w')
fileb.write('blahing')
fileb.close()
shutil.copystat(self.filename_a, self.filename_b)
self.assertTrue(
android_profiling_helper._FileMetadataMatches(
self.filename_a, self.filename_b))
class TestAndroidProfilingHelper(unittest.TestCase):
def testGetRequiredLibrariesForPerfProfile(self):
perf_output = os.path.join(
util.GetUnittestDataDir(), 'sample_perf_report_output.txt')
with open(perf_output) as f:
perf_output = f.read()
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn([None, perf_output])
mock_subprocess = simple_mock.MockObject()
mock_subprocess.ExpectCall(
'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
real_subprocess = android_profiling_helper.subprocess
android_profiling_helper.subprocess = mock_subprocess
try:
libs = android_profiling_helper.GetRequiredLibrariesForPerfProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-2/libchrome.2016.0.so',
'/system/lib/libart.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.subprocess = real_subprocess
@decorators.Enabled('android')
def testGetRequiredLibrariesForVTuneProfile(self):
vtune_db_output = os.path.join(
util.GetUnittestDataDir(), 'sample_vtune_db_output')
with open(vtune_db_output, 'rb') as f:
vtune_db_output = pickle.load(f)
mock_cursor = simple_mock.MockObject()
mock_cursor.ExpectCall(
'execute').WithArgs(simple_mock.DONT_CARE).WillReturn(vtune_db_output)
mock_conn = simple_mock.MockObject()
mock_conn.ExpectCall('cursor').WillReturn(mock_cursor)
mock_conn.ExpectCall('close')
mock_sqlite3 = simple_mock.MockObject()
mock_sqlite3.ExpectCall(
'connect').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_conn)
real_sqlite3 = android_profiling_helper.sqlite3
android_profiling_helper.sqlite3 = mock_sqlite3
try:
libs = android_profiling_helper.GetRequiredLibrariesForVTuneProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-1/libchrome.2019.0.so',
'/system/lib/libdvm.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.sqlite3 = real_sqlite3
class TestAndroidProfilingHelperTabTestCase(tab_test_case.TabTestCase):
def setUp(self):
super(TestAndroidProfilingHelperTabTestCase, self).setUp()
# pylint: disable=protected-access
browser_backend = self._browser._browser_backend
self._device = browser_backend.device()
@decorators.Enabled('android')
def testCreateSymFs(self):
# pylint: disable=protected-access
browser_pid = self._browser._browser_backend.pid
pids = ([browser_pid] +
self._browser._platform_backend.GetChildPids(browser_pid))
libs = _GetLibrariesMappedIntoProcesses(self._device, pids)
assert libs
symfs_dir = tempfile.mkdtemp()
try:
kallsyms = android_profiling_helper.CreateSymFs(self._device, symfs_dir,
libs)
# Check that we have kernel symbols.
assert os.path.exists(kallsyms)
is_unstripped = re.compile(r'^/data/app(-lib)?/.*\.so$')
has_unstripped = False
# Check that all requested libraries are present.
for lib in libs:
has_unstripped = has_unstripped or is_unstripped.match(lib)
assert os.path.exists(os.path.join(symfs_dir, lib[1:])), \
'%s not found in symfs' % lib
# Make sure we found at least one unstripped library.
assert has_unstripped
finally:
shutil.rmtree(symfs_dir)
# Test fails: crbug.com/437081
# @decorators.Enabled('android')
@decorators.Disabled('all')
def testGetToolchainBinaryPath(self):
with tempfile.NamedTemporaryFile() as libc:
self._device.PullFile('/system/lib/libc.so', libc.name)
path = android_profiling_helper.GetToolchainBinaryPath(libc.name,
'objdump')
assert path and os.path.exists(path)
|
bsd-3-clause
| 6,226,470,640,319,619,000
| 33.190476
| 80
| 0.683426
| false
| 3.64467
| true
| false
| false
|
jaeilepp/mne-python
|
mne/tests/test_dipole.py
|
1
|
15664
|
import os
import os.path as op
import sys
import warnings
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_allclose
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events)
from mne.dipole import get_phantom_dipoles
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import (run_tests_if_main, _TempDir, slow_test, requires_mne,
run_subprocess)
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert_equal(orig.name, new.name)
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert_equal(len(dip), n_dipoles)
assert_equal(dip.pos.shape, (n_dipoles, 3))
assert_equal(dip.ori.shape, (n_dipoles, 3))
assert_equal(dip.gof.shape, (n_dipoles,))
assert_equal(dip.amplitude.shape, (n_dipoles,))
@testing.requires_testing_data
def test_io_dipoles():
"""Test IO for .dip files."""
tempdir = _TempDir()
dipole = read_dipole(fname_dip)
print(dipole) # test repr
out_fname = op.join(tempdir, 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True)
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere)
@slow_test
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting():
"""Test dipole fitting."""
amp = 10e-9
tempdir = _TempDir()
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
factor = 1.
# XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
# Anaconda bug for now...
if os.getenv('TRAVIS', 'false') == 'true' and \
sys.version[:3] in ('3.5', '2.7'):
factor = 0.8
assert_true((data_rms > factor * resi_rms).all(),
msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), factor))
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
assert_equal(fwd['src'][0]['coord_frame'], 5)
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
out = dip.crop(dip_c.times[0], dip_c.times[-1])
assert_true(dip is out)
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did at least as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
assert_true(dists[0] >= dists[1] * factor, 'dists: %s' % dists)
assert_true(corrs[0] <= corrs[1] / factor, 'corrs: %s' % corrs)
assert_true(gc_dists[0] >= gc_dists[1] * factor,
'gc-dists (ori): %s' % gc_dists)
assert_true(amp_errs[0] >= amp_errs[1] * factor,
'amplitude errors: %s' % amp_errs)
assert_true(gofs[0] <= gofs[1] / factor, 'gof: %s' % gofs)
@testing.requires_testing_data
def test_dipole_fitting_fixed():
"""Test dipole fitting with a fixed position."""
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert_equal(len(evoked_crop.times), 1)
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert_true(isinstance(dip_seq, Dipole))
assert_equal(len(dip_seq.times), 1)
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert_true(isinstance(dip_free, Dipole))
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert_true(np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid, resid_free[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert_true(isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid, resid_fixed[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed)
# Degenerate conditions
evoked_nan = evoked.copy().crop(0, 0)
evoked_nan.data[0, 0] = None
assert_raises(ValueError, fit_dipole, evoked_nan, cov, sphere)
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert_true((np.percentile(ds, [50, 90]) < [0.0005, perc_90]).all())
@testing.requires_testing_data
def test_dipole_fixed():
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
# print the representation of the objet DipoleFixed
print(dip)
_check_roundtrip_fixed(dip)
with warnings.catch_warnings(record=True) as w: # unused fields
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_true(any('extra fields' in str(ww.message) for ww in w))
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with warnings.catch_warnings(record=True): # unused fields
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip):
"""Helper to test roundtrip IO for fixed dipoles."""
tempdir = _TempDir()
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times)
assert_equal(dip_read.info['xplotter_layout'], dip.info['xplotter_layout'])
assert_equal(dip_read.ch_names, dip.ch_names)
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert_equal(ch_1['ch_name'], ch_2['ch_name'])
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
def test_get_phantom_dipoles():
"""Test getting phantom dipole locations."""
assert_raises(ValueError, get_phantom_dipoles, 0)
assert_raises(ValueError, get_phantom_dipoles, 'foo')
for kind in ('vectorview', 'otaniemi'):
pos, ori = get_phantom_dipoles(kind)
assert_equal(pos.shape, (32, 3))
assert_equal(ori.shape, (32, 3))
run_tests_if_main(False)
|
bsd-3-clause
| 2,016,151,737,440,554,000
| 40.112861
| 79
| 0.617658
| false
| 2.947131
| true
| false
| false
|
mikeboers/aque
|
aque/commands/submit.py
|
1
|
2219
|
"""aque submit - Schedule a shell command.
Schedules to given command to run on the queue. The environment will have an
extra $AQUE_TID variable containing the ID of the running task.
E.g.:
$ aque submit --shell 'echo $AQUE_TID says: "$@"' one two three
"""
import argparse
import os
import sys
from aque.commands.main import main, command, argument
from aque import utils
@command(
argument('--cwd', help='where to run the task (default: current directory)'),
#argument('--stdin', help='path to read stdin from; "-" means this stdin (which is fully read before the task is submitted)'),
#argument('--stdout', help='path to write stdout to'),
#argument('--stderr', help='path to write stderr to'),
argument('-n', '--name', help='the task\'s name (for `aque status`)'),
argument('-p', '--priority', type=int, help='higher ones go first'),
argument('-c', '--cpus', type=int, help='how many CPUs to use per task'),
argument('--host', help='the host(s) to run on'),
argument('--platform', help='the platform to run on'),
argument('-s', '--shell', action='store_true', help='''the first argument is
executed as a shell script, with the rest provided to it as arguments'''),
argument('-w', '--watch', action='store_true', help='watch the stdout/stderr of the task as it executes'),
argument('command', nargs=argparse.REMAINDER, metavar='COMMAND', help='the command to run'),
help='schedule a shell command',
description=__doc__,
aliases=['s', 'sub'],
)
def submit(args):
cmd = list(args.command)
if args.shell:
cmd.insert(0, os.environ.get('SHELL', '/bin/bash'))
cmd.insert(1, '-c')
cmd.insert(3, 'aque-submit')
options = {'environ': os.environ}
for k in ('cpus', 'cwd', 'host', 'platform', 'priority'):
v = getattr(args, k, None)
if v is not None:
options[k] = getattr(args, k)
options.setdefault('io_paths', utils.paths_from_args(cmd))
name = args.name or ' '.join(cmd)
future = args.queue.submit_ex(pattern='shell', args=cmd, name=name, **options)
if args.watch:
return main(['output', '--watch', str(future.id)])
print future.id
|
bsd-3-clause
| -4,217,119,700,614,682,000
| 32.621212
| 130
| 0.632267
| false
| 3.686047
| false
| false
| false
|
iofun/colony
|
colony/system/kmeans.py
|
1
|
19928
|
# -*- coding: utf-8 -*-
'''
Colony k-means clustering system logic.
'''
# This file is part of colony.
__author__ = 'Team Machine'
__doc__ = '''
#k-means algorithm
k-means clustering is a method of vector quantization,
popular for cluster analysis in data mining.
k-means clustering aims to partition n observations
into k clusters in which each observation belongs to
the cluster with the nearest mean, serving as a prototype
of the cluster.
This results in a partitioning of the data space into Voronoi cells.
'''
# data wrangler
from sklearn.feature_extraction import DictVectorizer
# dimensionality reduction
from sklearn.decomposition import TruncatedSVD
# text data wrangling stuff
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# scikit-learn parallel tools
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer
# scikit-learn k-means clusters
from sklearn.cluster import KMeans, MiniBatchKMeans
# scikit-learn metrics
from sklearn import metrics
# seconds since the epoch
from time import time
import numpy as np
# standard python tools
import arrow
import motor
import msgpack
import uuid
# this colony code base!
from colony.messages import kmeans
from colony.messages import Feature, Forecast, Metric
from colony.messages import Resource, ClusterResource
from colony.tools import clean_structure
from colony.tools import clean_result
# tornado old gen
from tornado import gen
class Prediction(object):
'''
KMeans cluster prediction
'''
@gen.engine
def new_prediction(self, dataset, callback):
'''
# new prediction dataset
'''
try:
message = Forecast(dataset)
message.validate()
except Exception, e:
callback(None, e)
return
message = clean_structure(message)
message = yield gen.Task(self.db.predictions.insert, message)
message, error = message.args
if error:
callback(None, error)
return
#message = {
# 'uuid': message.get('uuid'),
# 'forecast': np.asarray(message.get('doing!'))
#}
callback(message.get('uuid'), None)
@gen.engine
def get_prediction(self, model_uuid, prediction_uuid, callback):
'''
Get specific k-means model.
'''
if not model_type:
model_type = 'k-means'
try:
message = yield motor.Op(
self.db.predictions.find_one, {
'uuid':prediction_uuid,
'model_type': model_type
},{'_id':0}
)
if message:
message = kmeans.Cluster(message)
message.validate()
except Exception, e:
callback(None, e)
return
callback(message, None)
@gen.engine
def get_predictions(self, model_uuid, page_num, callback):
'''
Get k-means models.
'''
model_type = 'k-means'
page_num = int(page_num)
page_size = self.settings['page_size']
result = []
query = self.db.predictions.find({'active':True},{'_id':0})
query = query.sort([
('uuid', -1)
]).skip(page_num * page_size).limit(page_size)
try:
for record in (yield motor.Op(query.to_list)):
result.append(kmeans.Cluster(record))
struct = {'result': result}
message = kmeans.ClusterResult(struct)
message.validate()
except Exception, e:
callback(None, e)
return
message = clean_result(message)
callback(message, None)
class Trainer(object):
'''
Cluster trainer
'''
@gen.engine
def get_feature(self, feature_uuid, callback):
'''
'''
pass
@gen.engine
def gen_features(self, feature_uuid, callback):
'''
'''
pass
@gen.engine
def new_feature_set(self, dataset, callback):
'''
# new feature dataset
'''
try:
features = Feature(dataset)
features.validate()
except Exception, e:
callback(None, e)
return
features = clean_structure(features)
message = yield gen.Task(self.db.features.insert, features)
message, error = message.args
if error:
callback(None, error)
return
message = {
'uuid': features.get('uuid'),
'features': np.asarray(features.get('dimensions'))
}
callback(message, None)
@gen.engine
def train_new_cluster(self, features, centroids, callback):
'''
Do the actual clustering.
'''
number_seeds = (centroids if centroids else self.settings['number_seeds'])
minibatch = self.settings['minibatch']
max_iter = self.settings['max_iter']
number_init = self.settings['number_init']
max_no_improvement = self.settings['max_no_improvement']
batch_size = self.settings['batch_size']
verbose = self.settings['verbose']
try:
if minibatch:
km = MiniBatchKMeans(
n_clusters=number_seeds,
init='k-means++',
max_iter=max_iter,
max_no_improvement=max_no_improvement,
n_init=number_init,
batch_size=batch_size,
verbose=verbose
)
else:
km = KMeans(
n_clusters=number_seeds,
init='k-means++',
max_iter=max_iter,
n_init=number_init,
verbose=verbose
)
start = time()
km.fit(features)
cluster_time = time() - start
message = {
'cluster':km,
'cluster_time': cluster_time
}
except Exception, e:
callback(None, e)
return
callback(message, None)
class Wrangler(object):
'''
Data Wrangler
'''
@gen.engine
def dict_feature_extraction(self, dataset, callback):
'''
# dictionary feature extraction.
This transformer turns lists of mappings (dict-like objects)
of feature names to feature values into Numpy arrays or
scipy.sparse matrices for use with scikit-learn estimators.
'''
use_sparse = self.settings['use_sparse']
start = time()
if use_sparse:
print("Extracting features from the training dataset using sparse vectorizer")
vectorizer = DictVectorizer(sparce=use_sparse)
else:
print("Extracting features from the training dataset")
vectorizer = DictVectorizer(sparse=False)
features = vectorizer.fit_transform(dataset)
print("done in %fs" % (time() - start))
print("n_samples: %d, n_features: %d" % features.shape)
return features
@gen.engine
def text_feature_extraction(self, callback):
'''
# text feature extraction.
'''
use_hashing = self.settings['use_hashing']
use_idf = self.settings['use_idf']
n_features = self.settings['n_features']
print('''Extracting features from the training \
dataset using a sparse vectorizer''')
# we're don't using it, why?
start = time()
if use_hashing:
if use_idf:
# Perform an IDF normalization
# on the output of HashingVectorizer
hasher = HashingVectorizer(
n_features=n_features,
stop_words='english',
non_negative=True,
norm=None,
binary=False
)
vectorizer = Pipeline((
('hasher', hasher),
('tf_idf', TfidfTransformer())
))
else:
vectorizer = HashingVectorizer(
n_features=n_features,
stop_words='english',
non_negative=False,
norm='l2',
binary=False
)
else:
vectorizer = TfidfVectorizer(
max_df=0.5,
max_features=n_features,
stop_words='english',
use_idf=use_idf
)
return vectorizer
@gen.engine
def dimensionality_reduction(self, feature, callback):
'''
Performing dimensionality reduction using LSA
'''
n_components = self.settings['n_components']
if n_components:
print("Performing dimensionality reduction using LSA")
start = time()
# Vectorizer results are normalized,
# which makes KMeans behave as spherical k-means for better results.
lsa = TruncatedSVD(n_components)
feature = lsa.fit_transform(feature)
# Since LSA/SVD results are not normalized,
# we have to redo the normalization.
feature = Normalizer(copy=False).fit_transform(feature)
print("done in %fs" % (time() - start))
return feature
class Cluster(object):
'''
k-means cluster logic
'''
@property
def n_clusters(self):
'''
The number of clusters to form as well
as the number of centroids to generate.
'''
return self.n_clusters
@property
def max_iter(self):
'''
Maximum number of iterations
of the k-means algorithm for a single run.
'''
return self.max_iter
@property
def n_init(self):
'''
Number of time the k-means algorithm will be run
with different centroid seeds. The final results
will be the best output of n_init consecutive runs
in terms of inertia.
'''
return self.n_init
@property
def init(self):
'''
Method for initialization, defaults to
'k-means++', 'random' or an ndarray.
'''
return self.init
@property
def precompute_distances(self):
'''
Precompute distances (faster but takes more memory).
'''
return self.precompute_distances
@property
def tol(self):
'''
Relative tolerance w.r.t. inertia to declare convergence
float, optional default: 1e-4
'''
return self.tol
@property
def n_jobs(self):
'''
The number of jobs to use for the computation.
This works by breaking down the pairwise matrix
into n_jobs even slices and computing them in parallel.
'''
return self.n_jobs
@property
def random_state(self):
'''
The generator used to initialize the centers.
If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
'''
return self.random_state
@gen.engine
def new_model(self, struct, callback):
'''
Create a new cluster model.
'''
try:
cluster = kmeans.Cluster(struct)
cluster.validate()
except Exception, e:
callback(None, e)
return
cluster = clean_structure(cluster)
message = yield gen.Task(self.db.models.insert, cluster)
message, error = message.args
if error:
callback(None, error)
return
# push message to the right channel
callback(cluster.get('uuid'), None)
@gen.engine
def delete_model(self, model_uuid, callback):
'''
Delete k-means model
'''
try:
result = yield motor.Op( self.db.models.remove,
{'uuid':model_uuid} )
except Exception, e:
callback(None, e)
return
callback(result, None)
@gen.engine
def replace_model(self, struct, model_uuid, callback):
'''
Replace k-means model
'''
try:
cluster = kmeans.Cluster(struct)
cluster.validate()
except Exception, e:
callback(None, e)
return
cluster = clean_structure(cluster)
message = yield gen.Task( self.db.models.update,
{'uuid': model_uuid},
cluster )
message, error = message.args
# missing crash_and_die
if error:
callback(None, error)
return
if not message.get('updatedExisting'):
error = {'uuid': model_uuid, 'replaced': False}
callback(None, error)
return
# push message to the right channel
callback(model_uuid, None)
@gen.engine
def get_model(self, model_type, model_uuid, callback):
'''
Get specific k-means model.
'''
if not model_type:
model_type = 'k-means'
try:
message = yield motor.Op(
self.db.models.find_one, {
'uuid':model_uuid,
'model_type': model_type
},{'_id':0}
)
if message:
message = kmeans.Cluster(message)
message.validate()
except Exception, e:
callback(None, e)
return
callback(message, None)
@gen.engine
def get_models(self, model_type, page_num, callback):
'''
Get k-means models.
'''
model_type = 'k-means'
page_num = int(page_num)
page_size = self.settings['page_size']
result = []
query = self.db.models.find({'active':True},{'_id':0})
query = query.sort([
('uuid', -1)
]).skip(page_num * page_size).limit(page_size)
try:
for record in (yield motor.Op(query.to_list)):
result.append(kmeans.Cluster(record))
struct = {'result': result}
message = kmeans.ClusterResult(struct)
message.validate()
except Exception, e:
callback(None, e)
return
message = clean_result(message)
callback(message, None)
@gen.engine
def new_resource(self, struct, callback):
'''
Create a new cluster resource
'''
try:
message = ClusterResource(struct)
message.validate()
message = message.to_primitive()
except Exception, e:
callback(None, e)
return
resource = ''.join(('resources.', message['resource']))
try:
message = yield motor.Op(
self.db.models.update,
{'uuid': message['model_uuid']},
{
'$addToSet': {
''.join((resource, '.contains')): message['uuid']
},
'$inc': {
'resources.total': 1,
''.join((resource, '.total')): 1
}
}
)
except Exception, e:
callback(None, e)
return
callback(message, None)
@gen.engine
def check_exist(self, model_uuid, callback):
'''
Check if cluster exist
'''
try:
exist = yield motor.Op(self.db.models.find_one,
{'uuid': model_uuid},
{'uuid':1, '_id':0})
exist = (True if exist else False)
except Exception, e:
callback(None, e)
callback(exist, None)
@gen.engine
def check_type(self, model_uuid, model_type, callback):
'''
Check cluster type
'''
try:
check_type = yield motor.Op(self.db.models.find_one,
{'uuid': model_uuid,
'model_type': model_type},
{'model_type':1,'_id':0})
check_type = (True if check_type else False)
except Exception, e:
callback(None, e)
callback(check_type, None)
@gen.engine
def get_centroids(self, model_uuid, callback):
'''
Get cluster centroid seeds
'''
try:
centroids = yield motor.Op(self.db.models.find_one,
{'uuid': model_uuid},
{'centroids':1,'_id':0})
centroids = (centroids if centroids else False)
except Exception, e:
callback(None, e)
callback(centroids, None)
@gen.engine
def new_kmeans_unit(self, cluster_labels, cluster_unique_labels, cluster_centers, callback):
'''
New kmeans cluster
The labels over the training data can be found in the labels attributes.
'''
try:
struct = {
'labels': cluster_labels,
'unique_labels': cluster_unique_labels,
'centers': cluster_centers
}
unit = Unit(struct)
unit.validate()
except Exception, e:
callback(None, e)
return
unit = clean_structure(unit)
message = yield ge.Task(self.db.units.insert, unit)
message, error = message.args
if error:
callback(None, error)
return
callback({'uuid': unit.get('uuid')}, None)
@gen.engine
def new_metrics(self, feature_uuid, feature_data, feature_labels, cluster_labels, callback):
'''
Create new metrics
'''
try:
message = {
'homogeneity': metrics.homogeneity_score(feature_labels, cluster_labels),
'completeness': metrics.completeness_score(feature_labels, cluster_labels),
'v_measure': metrics.v_measure_score(feature_labels, cluster_labels),
'adjusted_rand': metrics.adjusted_rand_score(feature_labels, cluster_labels),
'silhouette': metrics.silhouette_score(feature_data, feature_labels, sample_size=1000),
'feature_uuid': feature_uuid,
}
metric = Metric(message)
metric.validate()
except Exception, e:
callback(None, e)
return
metric = clean_structure(metric)
message = yield gen.Task(self.db.metrics.insert, metric)
message, error = message.args
if error:
callback(None, error)
return
callback({'uuid': metric.get('uuid')}, None)
@gen.engine
def set_cluster_time(self, model_uuid, cluster_time, callback):
'''
Set cluster time
'''
try:
message = yield motor.Op(self.db.models.update,
{'uuid': model_uuid},
{'$set': {'cluster_time': cluster_time}})
except Exception, e:
callback(None, e)
return
callback(message, None)
|
agpl-3.0
| -1,745,465,895,318,321,200
| 25.607477
| 103
| 0.518115
| false
| 4.622593
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.