repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
stanta/darfchain
|
darfchain/__manifest__.py
|
Python
|
gpl-3.0
| 668
| 0.004491
|
{
'name': "Blockchain Waves Synchro",
'version': '1.0',
'depends': ['base',
'sale',
|
'sales_team',
'delivery',
'barcodes',
'mail',
'report',
|
'portal_sale',
'website_portal',
'website_payment',],
'author': "Sergey Stepanets",
'category': 'Application',
'description': """
Module for blockchain synchro
""",
'data': [
'views/setting.xml',
'data/cron.xml',
'views/clients.xml',
'views/sale_order.xml',
'views/journal_signature.xml',
# 'views/report.xml',
],
}
|
Kriegspiel/ks-python-api
|
kriegspiel_api_server/kriegspiel/migrations/0004_move_created_at.py
|
Python
|
mit
| 507
| 0
|
# -*- coding: utf-8 -*-
# Generat
|
ed by Django 1.10.4 on 2017-01-13 20:45
fro
|
m __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('kriegspiel', '0003_auto_20170113_2035'),
]
operations = [
migrations.AddField(
model_name='move',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
facebookexperimental/eden-hg
|
eden/hg/eden/hgext3rd_init.py
|
Python
|
gpl-2.0
| 497
| 0.002012
|
# Copyright (c) 2017-present, Facebook, Inc.
# All Rights Reserved.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import, division, print_func
|
tion, unicode_literals
import pkgutil
# Indicate that hgext3rd is a namspace package, and other python path
# directories may still be
|
searched for hgext3rd extensions.
__path__ = pkgutil.extend_path(__path__, __name__) # type: ignore # noqa: F821
|
AsimmHirani/ISpyPi
|
tensorflow/contrib/tensorflow-master/tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py
|
Python
|
apache-2.0
| 24,371
| 0.006729
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
distributions_py = distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag _sample_n."""
data_container = []
true_mvndiag_sample_n = distributions_py.MultivariateNormalDiag._sample_n
def _capturing_mvndiag_sample_n(self, n, seed=None):
samples = true_mvndiag_sample_n(self, n=n, seed=seed)
data_container.append(samples)
return samples
distributions_py.MultivariateNormalDiag._sample_n = (
_capturing_mvndiag_sample_n)
yield data_container
distributions_py.MultivariateNormalDiag._sample_n = true_mvndiag_sample_n
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal _sample_n."""
data_container = []
true_normal_sample_n = distributions_py.Normal._sample_n
def _capturing_normal_sample_n(self, n, seed=None):
samples = true_normal_sample_n(self, n=n, seed=seed)
data_container.append(samples)
return samples
distributions_py.Normal._sample_n = _capturing_normal_sample_n
yield data_container
distributions_py.Normal._sample_n = true_normal_sample_n
def make_univariate_mixture(batch_shape, num_components):
logits = random_ops.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=dtypes.float32) - 50.
components = [
distributions_py.Normal(
loc=np.float32(np.random.randn(*list(batch_shape))),
scale=np.float32(10 * np.random.rand(*list(batch_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=dtypes.int32)
return distributions_py.Mixture(cat, components)
def make_multivariate_mixture(batch_shape, num_components, event_shape):
logits = random_ops.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=dtypes.float32) - 50.
components = [
distributions_py.MultivariateNormalDiag(
loc=np.float32(np.random.randn(*list(batch_shape + event_shape))),
scale_diag=np.float32(10 * np.random.rand(
*list(batch_shape + event_shape)))) for _ in range(num_components)
]
cat = distributions_py.Categorical(logit
|
s, dtype=dtypes.int32)
return distributions_py.Mixture(cat, components)
class MixtureTest(test.TestCase):
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event
|
_shape_tensor().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual(event_shape, dist.event_shape)
self.assertAllEqual(event_shape, dist.event_shape_tensor().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.5]), # 2 classes
[distributions_py.Normal(loc=1.0, scale=2.0)])
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
distributions_py.Mixture(
distributions_py.Categorical([-0.5, 0.5]), # scalar batch
[
distributions_py.Normal(
loc=1.0, scale=2.0), # scalar dist
distributions_py.Normal(
loc=[1.0, 1.0], scale=[2.0, 2.0])
])
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = array_ops.placeholder(shape=[1, None], dtype=dtypes.float32)
distributions_py.Mixture(
distributions_py.Categorical(cat_logits),
[distributions_py.Normal(
loc=[1.0], scale=[2.0])])
def testBrokenShapesDynamic(self):
with self.test_session():
d0_param = array_ops.placeholder(dtype=dtypes.float32)
d1_param = array_ops.placeholder(dtype=dtypes.float32)
d = distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.2]), [
distributions_py.Normal(
loc=d0_param, scale=d0_param), distributions_py.Normal(
loc=d1_param, scale=d1_param)
],
validate_args=True)
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
distributions_py.Mixture(None, [])
cat = distributions_py.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
distributions_py.Mixture(cat, [None])
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
distributions_py.Mixture(
cat, [
distributions_py.Normal(loc=[1.0], scale=[2.0]),
distributions_py.Normal(loc=[np.float16(1.0)],
scale=[np.float16(2.0)]),
])
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
distributions_py.Mixture(distributions_py.Categorical([0.3, 0.2]), None)
with self.assertRaisesWithPredicateMatch(TypeError,
"either be continuous or not"):
distributions_py.Mixture(
cat, [
distributions_py.Normal(loc=[1.0], scale=[2.0]),
distributions_py.Bernoulli(dtype=dtypes.float32, logits=[1.0]),
])
def testMeanUnivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, n
|
bitmovin/bitcodin-python
|
bitcodin/test/statistics/testcase_get_statistics_current.py
|
Python
|
unlicense
| 614
| 0.001629
|
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
import unittest
import bitcodin
from bitcodin.test.bitcodin_test_case import BitcodinTestCase
from bitcodin.rest import RestClient
cl
|
ass GetStatisticsCurrentMonthTestCase(BitcodinTestCase):
def setUp(self):
super(GetStatisticsCurrentMonthTestCase, self).setUp()
def runTest(self):
response = RestClient.get(url=bitcodin.get_api_base()+'/statistics', headers=bitcodin.create_headers())
de
|
f tearDown(self):
super(GetStatisticsCurrentMonthTestCase, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
lduarte1991/edx-platform
|
common/djangoapps/third_party_auth/tests/specs/base.py
|
Python
|
agpl-3.0
| 49,331
| 0.003324
|
"""Base integration test for provider implementations."""
import unittest
import json
import mock
from contextlib import contextmanager
from django import test
from django.contrib import auth
from django.contrib.auth import models as auth_models
from django.contrib.messages.storage import fallback
from django.contrib.sessions.backends import cache
from django.core.urlresolvers import reverse
from django.test import utils as django_utils
from django.conf import settings as django_settings
from social_core import actions, exceptions
from social_django import utils as social_utils
from social_django import views as social_views
from lms.djangoapps.commerce.tests import TEST_API_URL
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student import models as student_models
from student import views as student_views
from student.tests.factories import UserFactory
from student_account.views import account_settings_context
from third_party_auth import middleware, pipeline
from third_party_auth.tests import testutil
class IntegrationTestMixin(object):
"""
Mixin base class for third_party_auth integration tests.
This class is newer and simpler than the 'IntegrationTest' alternative below, but it is
currently less comprehensive. Some providers are tested with this, others with
IntegrationTest.
"""
# Provider information:
PROVIDER_NAME = "override"
PROVIDER_BACKEND = "override"
PROVIDER_ID = "override"
# Information about the user expected from the provider:
USER_EMAIL = "override"
USER_NAME = "override"
USER_USERNAME = "override"
def setUp(self):
super(IntegrationTestMixin, self).setUp()
self.login_page_url = reverse('signin_user')
self.register_page_url = reverse('register_user')
patcher = testutil.patch_mako_templates()
patcher.start()
self.addCleanup(patcher.stop)
# Override this method in a subclass and enable at least one provider.
def test_register(self, **extra_defaults):
# The user goes to the register page, and sees a button to register with the provider:
provider_register_url = self._check_register_page()
# The user clicks on the Dummy button:
try_login_response = self.client.get(provider_register_url)
# The user should be redirected to the provider's login page:
self.assertEqual(try_login_response.status_code, 302)
provider_response = self.do_provider_login(try_login_response['Location'])
# We should be redirected to the register screen since this account is not linked to an edX account:
self.assertEqual(provider_response.status_code, 302)
self.assertEqual(provider_response['Location'], self.url_prefix + self.register_page_url)
register_response = self.client.get(self.register_page_url)
tpa_context = register_response.context["data"]["third_party_auth"]
self.assertEqual(tpa_context["errorMessage"], None)
# Check that the "You've successfully signed into [PROVIDER_NAME]" message is shown.
self.assertEqual(tpa_context["currentProvider"], self.PROVIDER_NAME)
# Check that the data (e.g. email) from the provider is displayed in the form:
form_data = register_response.context['data']['registration_form_desc']
form_fields = {field['name']: field for field in form_data['fields']}
self.assertEqual(form_fields['email']['defaultValue'], self.USER_EMAIL)
self.assertEqual(form_fields['name']['defaultValue'], self.USER_NAME)
self.assertEqual(form_fields['username']['defaultValue'], self.USER_USERNAME)
for field_name, value in extra_defaults.items():
self.assertEqual(form_fields[field_name]['defaultValue'], value)
registration_values = {
'email': 'email-edited@tpa-test.none',
'name': 'My Customized Name',
'username': 'new_username',
'honor_code': True,
}
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
registration_values
)
self.assertEqual(ajax_register_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(tpa_context["finishAuthUrl"])
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again, whether or not we have yet verified the account:
self.client.logout()
self._test_return_login(user_is_activated=False)
self.client.logout()
self.verify_user_email('email-edited@tpa-test.none')
self._test_return_login(user_is_activated=True)
def test_login(self):
self.user = UserFactory.create() # pylint: disable=attribute-defined-outside-init
# The user goes to the login page, and sees a button to login with this provider:
provider_login_url = self._check_login_page()
# The user clicks on the provider's button:
try_login_response = self.client.get(provider_login_url)
# The user should be redirected to the provider's login page:
self.assertEqual(try_login_response.status_code, 302)
complete_response = self.do_provider_login(try_login_response['Location'])
# We should be redirected to the login screen since this account is not linked to an edX account:
self.assertEqual(complete_response.status_code, 302)
self.assertEqual(complete_response['Location'], self.url_prefix + self.login_page_url)
login_response = self.client.get(self.login_page_url)
tpa_context = login_response
|
.context["data"]["third_party_auth"]
self.assertEqual(tpa_context["errorMessage"], None)
# Check that the "You've successfully signed into [PROVIDER_NAME]" message is shown.
self.assertEqual(tpa_context["currentProvider"], self.PROVIDER_NAME)
# Now the user enters their username and password.
# The AJAX on the page will log them in:
ajax_login_response = self.client.post(
reverse('user_api_login_session'),
{'email'
|
: self.user.email, 'password': 'test'}
)
self.assertEqual(ajax_login_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(tpa_context["finishAuthUrl"])
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._test_return_login()
def do_provider_login(self, provider_redirect_url):
"""
mock logging in to the provider
Should end with loading self.complete_url, which should be returned
"""
raise NotImplementedError
def _test_return_login(self, user_is_activated=True, previous_session_timed_out=False):
""" Test logging in to an account that is already linked. """
# Make sure we're not logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 302)
# The user goes to the login page, and sees a button to login with this provider:
provider_login_url = self._check_login_page()
# The user clicks on the provider's login button:
try_login_response = self.client.get(provider_login_url)
# The user should be redirected to the provider:
self.assertEqual(try_login_response.status_code, 302)
login_response = self.do_provider_login(try_login_response['Location'])
# If the previous session was manually logged out, there will be one weird redirect
# required to set the login cookie (it sticks around if the main session times out):
if not previ
|
MugFoundation/versioneer
|
cli/versioneer.py
|
Python
|
mit
| 743
| 0.012113
|
#!/usr/bin/env
#(C) Mugfoundation 2014
#Available under MIT license
import click
import hashlib
c = hashlib.sha512()
@click.command()
@click.option('--setup', 'setup', help='Setup new
|
project', type=str)
@click.option('-x', '--major', 'major', help='major version setter', type=int)
@click.option('-y', '--minor', 'minor', help='minor version setter', type=int)
@click.option('-z', '--patch', 'patch', help='patch version setter', type=int)
|
@click.option('-e', '--extras', 'extras', help='major version setter', type=int)
@click.option('-h', '--hash', 'hash', help='file to extract the sha512 hash', type=str)
@click.option('-sr', '--sign', 'sign', help='sign the release using open pgp (available only on linux)', type=str)
def main():
|
DigitalHills/Esse
|
dpeer/nexus.py
|
Python
|
apache-2.0
| 17,520
| 0.00468
|
'''
Copyright 2017 Digital Hills, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import threading
from time import sleep
from queue import Queue
from json import loads, dumps
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from random import choice, SystemRandom
from tcplib import query
from settings import targeting
from settings import node_settings
from display import cout, ctxt
# How much propagation history to take. Enforce int type
__NEXUS__PROP_HISTORY_LENGTH__ = 2000
__NEXUS__PROP_HISTORY_CUT__ = int(__NEXUS__PROP_HISTORY_LENGTH__ / 2)
# Network variables
__NEXUS__PING_TIMEOUT__ = 1
__NEXUS__PING_CAP__ = 10
# Time Variables
__NEXUS__BASE_EPOCH_SECONDS__ = 40
# Console out peers dropping offline
__NEXUS__SHOW_PEER_DROPS__ = False
class PeerNexus(threading.Thread):
def __init__(self, memPool, chain):
threading.Thread.__init__(self)
self.signal = True
# A logger
self.logger = logging.getLogger(__name__)
# Supress console messages from Nexus
self.supressText = False
'''
Time Information
'''
# Initialization time
self.uptime = datetime.utcnow()
# Network time - Exists as None until on a network
self.utc_network = None
# Block epochs for PoT
self.next_epoch = None
self.prev_epoch = None
# Number of epochs we've been alive for
self.epochs_passed = 0
# Get seconds from a time difference
# use: self.timeDeltaToSeconds(endTime - StartTime)
self.timeDeltaToSeconds = lambda x: int(str(x).split(".")[0].split(":")[2])
'''
References to the DHNode Information
-memPool = Transaction pool (yet-to-be-mined)
-chain = The current block chain
'''
self.memPool = memPool
self.chain = chain
'''
Network/ Peer Information
-propagated = A list that ensures we don't send
previouslt sent requests
-peers = List of active (online) peers
'''
# Things that we've already propageted
self.propagated = []
# All known peers - Load from whatever file
self.peers = []
# Know thyself - Ensure we list ourselves as a node.
# Ignored by the '_ALL_' propagation tag
self.addNewPeer("127.0.0.1", node_settings["port"], self.next_epoch, self.uptime)
# Eventual constant node
self.addNewPeer("digitalhills.space", node_settings["port"], None, None)
# Different outgoing types to talk with peers
self.peerQueueEvent = {
"sync_MemPool": "smp",
"sync_NextBlock": "snb",
"sync_KnownNodes": "skn",
"sync_BlockChain": "sbc",
"info_ReleasedBlock": "irb",
# Programmed into Handler or in-use elsewhere
"ping_peers": "pp",
"data_propagate": "dp"
}
# Queue of all things that need to be pushed to all peers
# - Updates to mempool, current block set aside (about to go out - to sync all blocks released)
# Messages will be sent every cycle of self.run
self.pushToPeers = Queue()
'''
Before we start running, load a list of expected peers, and attempt contact
The first ones we come in contact with will be our basis of synchronization
'''
# Load file
# Attempt pings
# If accept: Sync block chain - Sync epochs
cout("cyan", "\n> Initializing Peer Nexus\n> Attempting to join known peers...")
known_nodes = None
try:
known_nodes = open("dpeer/known_nodes.json", "rb").read().decode('utf-8')
except IOError:
cout("fail", "Unable to open known peers file - Assuming peer is alone")
if known_nodes is not None:
try:
known_nodes = loads(known_nodes)
except:
cout("fail", "Error loading known_nodes.json p Assuming peer is alone")
known_nodes = None
# Add nodes from file
for node in known_nodes:
self.addNewPeer(node["address"], node["port"], node["epochTime"], node["personalTime"])
# Attempt to sync with all nodes
self.synchroni
|
zeEpoch()
'''
Push an event into the pushToPeers Queue
'''
def addItemToPeerQueue(self, _peerQueueEvent, _peer, data=None):
self.pushToPeers.put((_peer, _peerQueueEvent, data))
'''
|
Add new peer
'''
def addNewPeer(self, address, port, epochTime, personalTime):
# To ensure nobody attempts changing what we think about ourselves
if address == "127.0.0.1":
port = node_settings["port"]
epochTime = str(self.next_epoch)
personalTime = str(self.uptime)
# Use address + str(port) to uniquely identify nodes behind same address
cleanse = None
for peer in self.peers:
if (peer["address"] + str(peer["port"])) == address + str(port):
cleanse = address + str(port)
# If the item exists, remove it to append with new information!
if cleanse is not None:
self.peers = [peer for peer in self.peers if (peer["address"] + str(peer["port"])) != cleanse]
# Append the peer as-if its a new peer
self.peers.append({
"address": str(address),
"port": int(port),
"lastConnect": str(datetime.now()),
"failedPings": 0,
"online": True,
"epochTime": epochTime,
"personalTime": personalTime
})
'''
Perform all pushes to peers
- Should only be called by this thread's run loop
'''
def __pushEventsToPeers(self):
while not self.pushToPeers.empty():
# Get an event from the queue, and break into its pieces
peerEvent = self.pushToPeers.get()
_peer = peerEvent[0]
_event = peerEvent[1]
_data = peerEvent[2]
'''
Build list of peers that will be receiving the information
'''
outgoing = []
if _peer == "_ALL_":
outgoing = [peer for peer in self.peers if peer["address"] != "127.0.0.1"]
else:
''' Find the peer to ensure it exists - Here we accept sending to local address '''
for _p in self.peers:
if _p["address"] == _peer:
outgoing.append(_p)
if len(outgoing) == 0:
self.logger.info("<Nexus unable to locate peer [{}] in peer list>".format(_peer))
if not self.supressText:
cout("fail", ("<Nexus unable to locate peer [{}] in peer list>".format(_peer)))
return
'''
Push the data to the selected peers
- Handle all self.peerQueueEvent
'''
for _p in outgoing:
'''
Ping Request
'''
if _event == self.peerQueueEvent["ping_peers"]:
query(_p["address"], _p["port"], "*", self.logger)
# Need to add all self.peerQueueEvent items here
'''
Data Propagation Request
'''
if _event == self.peerQueueEvent["data_propagate"]:
|
luojus/bankws
|
bankws/appresponse.py
|
Python
|
mit
| 15,773
| 0.000127
|
'
|
''
Appresponse module contains ApplicationResponse and FileDescriptor
classes which are used to parse response.
Usage:
>>> response = ApplicationResponse(xml_message)
>>> response.is_accepted() # Checks was the request accepted
'''
import os
import base64
import gzip
import logging
from lxml import etree
try:
from bankws.signature import validate
except ImportError:
from
|
signature import validate
class ApplicationResponse():
""" ApplicationResponse class is used to parse certificate responses
Public methods::
is_accepted: Checks if request was accepted (responsecode 00)
content: Returns content of message
references: Returns filereferences list.
@type _customerid: string
@ivar _customerid: Customer that send request.
@type _timestamp: string
@ivar _timestamp: Time and date when Application Response header was made.
@type _responsecode: string
@ivar _responsecode: Result of the request.
@type _responsetext: string
@ivar _responsetext: Human readable text telling meaning of response code.
@type _executionserial: string
@ivar _executionserial: Unique identifier for operation. [0..1]
@type _encrypted: boolean
@ivar _encrypted: Is content encrypted.
@type _encryptionmethod: string
@ivar _encryptionmethod: Name of the encryption algorithm.
@type _compressed: boolean
@ivar _compressed: Is content compressed.
@type _compressionmethod: string
@ivar _compressionmethod: Name of the compression algorithm.
@type _amounttotal: string
@ivar _amounttotal: Total sum of amounts in request.
@type _transactioncount: string
@ivar _transactioncount: Total number of transactions in the data.
@type _customerextension: Element
@ivar _customerextension: Extensions for schema.
@type _file_descriptors: List<FileDescriptor>
@ivar _file_descriptors: List of files founded from bank.
@type _filetype: string
@ivar _filetype: Type of the file.
@type _content: string
@ivar _content: Content of response (Usually empty, used in downloadfile
and schema validation error responses.)
"""
def __init__(self, message):
"""
Initializes ApplicationResponse class.
@type message: string
@param message: ApplicationResponse xml-message.
@raise ValueError: If message doesn't follow xml schema or
signature is invalid.
"""
self.logger = logging.getLogger("bankws")
self._accepted = True
# validate using schema
if not self._validate_with_schema(message):
# Some errors return invalid xml.
self.logger.error("Message doesn't follow schema.")
self._accepted = False
# raise ValueError('Failed to validate against schema')
# Check signature
if not validate(message):
raise ValueError('Failed to verify signature')
descriptors = None
self._content = None
tree = etree.fromstring(message)
# Parse elements from tree to variables.
for element in tree.iter():
if element.tag == "{http://bxd.fi/xmldata/}CustomerId":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._customerid = element.text
if element.tag == "{http://bxd.fi/xmldata/}Timestamp":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._timestamp = element.text
if element.tag == "{http://bxd.fi/xmldata/}ResponseCode":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._responsecode = element.text
if element.tag == "{http://bxd.fi/xmldata/}ResponseText":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._responsetext = element.text
if element.tag == "{http://bxd.fi/xmldata/}ExecutionSerial":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._executionserial = element.text
if element.tag == "{http://bxd.fi/xmldata/}Encrypted":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
value = element.text.lower()
self._encrypted = True if value == 'true' else False
if element.tag == "{http://bxd.fi/xmldata/}EncryptionMethod":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._encryptionmethod = element.text
if element.tag == "{http://bxd.fi/xmldata/}Compressed":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
value = element.text.lower()
if value == '1':
value = 'true'
self._compressed = True if value == 'true' else False
if element.tag == "{http://bxd.fi/xmldata/}CompressionMethod":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._compressionmethod = element.text
if element.tag == "{http://bxd.fi/xmldata/}AmountTotal":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._amounttotal = element.text
if element.tag == "{http://bxd.fi/xmldata/}TransactionCount":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._transactioncount = element.text
if element.tag == "{http://bxd.fi/xmldata/}CustomerExtension":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._customerextension = element
if element.tag == "{http://bxd.fi/xmldata/}FileDescriptors":
descriptors = element
if element.tag == "{http://bxd.fi/xmldata/}FileType":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._filetype = element.text
if element.tag == "{http://bxd.fi/xmldata/}Content":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
bytestring = bytes(element.text, 'utf-8')
self._content = base64.b64decode(bytestring)
# Parse filedescriptors
if descriptors is not None:
self._file_descriptors = []
for descriptor in descriptors:
fd = FileDescriptor()
for element in descriptor.iter():
if element.tag == "{http://bxd.fi/xmldata/}FileReference":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.reference = element.text
if element.tag == "{http://bxd.fi/xmldata/}TargetId":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.target = element.text
if element.tag == "{http://bxd.fi/xmldata/}ServiceId":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.serviceid = element.text
if element.tag == ("{http://bxd.fi/xmldata/}"
"ServiceIdOwnerName"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.serviceidownername = element.text
if element.tag == "{http://bxd.fi/xmldata/}UserFilename":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.userfilename = element.text
if element.tag == ("{http://bxd.fi/xmldata/}"
"ParentFileReference"):
|
joshmoore/zeroc-ice
|
py/test/Ice/faultTolerance/Client.py
|
Python
|
gpl-2.0
| 1,608
| 0.006219
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice, AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def usage(n):
sys.stderr.write("Usage: " + n + " port...\n")
def run(args, communicator):
ports = []
for arg in args[1:]:
if arg[0] == '-':
sys.stderr.write(args[0] + ": unknown option `" + arg + "'\n")
usage(args
|
[0])
return False
ports.append(int(arg))
if len(ports) == 0:
sys.stderr.write(args[0] + ": no ports specified\n")
usage(args[0]
|
)
return False
try:
AllTests.allTests(communicator, ports)
except:
traceback.print_exc()
test(False)
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
#
# This test aborts servers, so we don't want warnings.
#
initData.properties.setProperty('Ice.Warn.Connections', '0')
communicator = Ice.initialize(sys.argv, initData)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
RexFuzzle/sfepy
|
tests/test_input_linear_viscoelastic.py
|
Python
|
bsd-3-clause
| 207
| 0.009662
|
input_name = '../examples/linear_elasticity/linear_viscoelastic.py'
output_name_trunk = 'test_linear_viscoelast
|
ic'
from tests_basic import TestInputEvolutionary
class Test(TestInputEvolu
|
tionary):
pass
|
fuspu/RHP-POS
|
item_utils.py
|
Python
|
mit
| 837
| 0.009558
|
#!/usr/bin/env python3
"""
Item Related Objects.
"""
#-*- coding: utf-8 -*-
import re
from datetime import datetime
from db_related import DBConnect
class Item_Lookup(object):
"""
Returned Item Lookup Dictionary Structure:
item = {
upc: text
description: text
cost: decimal
price: decimal
taxable: True or False
on_hand_qty: decimal
stx: decimal
}
"""
def __init__(self, upc):
|
self.upc = upc
def GetBasics(self):
query = '''SELECT upc, description, cost, retail, taxable, onhandqty
FROM item_detailed
WHERE upc=(?)'''
data = [self.upc,]
returnd =
|
DBConnect(query, data).ALL()
|
ahclab/greedyseg
|
makeliblin.py
|
Python
|
gpl-2.0
| 4,282
| 0.02639
|
# coding: utf-8
import sys
from collections import defaultdict
sys.path.append('/project/nakamura-lab01/Work/yusuke-o/python')
from data.reader import dlmread
def addfeature(fs, fid, name, mode):
if mode == 'dev' or name in fid:
fs.append(fid[name])
def main():
if len(sys.argv) != 6:
print('USAGE: python3 makeliblin_greedy.py \\')
print(' <str: mode ("dev" or "test")>')
print(' <in-file: input sentence with POS> \\')
print(' <in-file: splitter table> \\')
print(' <(dev)out-file, (test)in-file: feature ID table> \\')
print(' <out-file: LIBLINEAR input data>')
return
mode = sys.argv[1]
fname_pos = sys.argv[2]
fname_splitter = sys.argv[3]
fname_fid = sys.argv[4]
fname_liblin = sys.argv[5]
if mode not in ['dev', 'test']:
sys.stderr.write('ERROR: u
|
nknown mode.\n')
return
# load word and pos
corpus_in_pos = [x for x in dlmread(fname_pos, ' ')]
for i in range(len(corpus_in_pos)):
corpus_in_pos[i] = [w.split('_') for w in corpus_in_pos[i]]
# load splitter
tab_sp = defaultdict(lambda: [])
with open(fname_splitter, 'r', encoding='utf-8') as fp:
for l in fp:
lineno, wordno = tuple(int(x) for x in l.strip().split(' '))
tab_sp[lineno].append(wordno)
# load or new feature id table
fid = defaultdict(lam
|
bda: len(fid)+1)
if mode == 'test':
with open(fname_fid, 'r', encoding='utf-8') as fp:
for l in fp:
ls = l.split()
k = ls[0]
v = int(ls[1])
fid[k] = v
# make/save training data
n = 0
with open(fname_liblin, 'w', encoding='utf-8') as fp:
for i in range(len(corpus_in_pos)):
data = [['<s>', '<s>']] * 2 + corpus_in_pos[i] + [['</s>', '</s>']] * 2
for j in range(len(data)-5): # ignore end of sentence
jj = j+2
features = []
# unigram words
# addfeature(features, fid, 'WORD[-2]=%s' % data[jj-2][0], mode)
addfeature(features, fid, 'WORD[-1]=%s' % data[jj-1][0], mode)
addfeature(features, fid, 'WORD[0]=%s' % data[jj+0][0], mode)
addfeature(features, fid, 'WORD[+1]=%s' % data[jj+1][0], mode)
addfeature(features, fid, 'WORD[+2]=%s' % data[jj+2][0], mode)
# unigram POSes
# addfeature(features, fid, 'POS[-2]=%s' % data[jj-2][1], mode)
addfeature(features, fid, 'POS[-1]=%s' % data[jj-1][1], mode)
addfeature(features, fid, 'POS[0]=%s' % data[jj+0][1], mode)
addfeature(features, fid, 'POS[+1]=%s' % data[jj+1][1], mode)
addfeature(features, fid, 'POS[+2]=%s' % data[jj+2][1], mode)
# bigram words
# addfeature(features, fid, 'WORD[-2:-1]=%s_%s' % (data[jj-2][0], data[jj-1][0]), mode)
addfeature(features, fid, 'WORD[-1:0]=%s_%s' % (data[jj-1][0], data[jj+0][0]), mode)
addfeature(features, fid, 'WORD[0:+1]=%s_%s' % (data[jj+0][0], data[jj+1][0]), mode)
addfeature(features, fid, 'WORD[+1:+2]=%s_%s' % (data[jj+1][0], data[jj+2][0]), mode)
# bigram POSes
# addfeature(features, fid, 'POS[-2:-1]=%s_%s' % (data[jj-2][1], data[jj-1][1]), mode)
addfeature(features, fid, 'POS[-1:0]=%s_%s' % (data[jj-1][1], data[jj+0][1]), mode)
addfeature(features, fid, 'POS[0:+1]=%s_%s' % (data[jj+0][1], data[jj+1][1]), mode)
addfeature(features, fid, 'POS[+1:+2]=%s_%s' % (data[jj+1][1], data[jj+2][1]), mode)
# trigram words
# addfeature(features, fid, 'WORD[-2:0]=%s_%s_%s' % (data[jj-2][0], data[jj-1][0], data[jj+0][0]), mode)
addfeature(features, fid, 'WORD[-1:+1]=%s_%s_%s' % (data[jj-1][0], data[jj+0][0], data[jj+1][0]), mode)
addfeature(features, fid, 'WORD[0:+2]=%s_%s_%s' % (data[jj+0][0], data[jj+1][0], data[jj+2][0]), mode)
# trigram POSes
# addfeature(features, fid, 'POS[-2:0]=%s_%s_%s' % (data[jj-2][1], data[jj-1][1], data[jj+0][1]), mode)
addfeature(features, fid, 'POS[-1:+1]=%s_%s_%s' % (data[jj-1][1], data[jj+0][1], data[jj+1][1]), mode)
addfeature(features, fid, 'POS[0:+2]=%s_%s_%s' % (data[jj+0][1], data[jj+1][1], data[jj+2][1]), mode)
line = '1 ' if j in tab_sp[i] else '2 '
line += ' '.join('%d:1'%f for f in sorted(features))
fp.write(line+'\n')
n += 1
# save feature id table
if mode == 'dev':
with open(fname_fid, 'w', encoding='utf-8') as fp:
for k, v in fid.items():
fp.write('%s\t%d\n' % (k, v))
if __name__ == '__main__':
main()
|
andyzsf/django
|
django/db/models/fields/__init__.py
|
Python
|
bsd-3-clause
| 84,287
| 0.000641
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.functional import cached_property, curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'EmailField', 'Empty', 'Field',
'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
|
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
undersc
|
ore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
|
StongeEtienne/dipy
|
dipy/align/tests/test_streamlinear.py
|
Python
|
bsd-3-clause
| 15,316
| 0
|
import numpy as np
from numpy.testing import (run_module_suite,
assert_,
assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
assert_raises)
from dipy.align.streamlinear import (compose_matrix44,
decompose_matrix44,
BundleSumDistanceMatrixMetric,
BundleMinDistanceMatrixMetric,
BundleMinDistanceMetric,
StreamlineLinearRegistration,
StreamlineDistanceMetric)
from dipy.tracking.streamline import (center_streamlines,
unlist_streamlines,
relist_streamlines,
transform_streamlines,
set_number_of_points)
from dipy.core.geometry import compose_matrix
from dipy.data import get_data, two_cingulum_bundles
from nibabel import trackvis as tv
from dipy.align.bundlemin import (_bundle_minimum_distance_matrix,
_bundle_minimum_distance,
distance_matrix_mdf)
def simulated_bundle(no_streamlines=10, waves=False, no_pts=12):
t = np.linspace(-10, 10, 200)
# parallel waves or parallel lines
bundle = []
for i in np.linspace(-5, 5, no_streamlines):
if waves:
pts = np.vstack((np.cos(t), t, i * np.ones(t.shape))).T
else:
pts = np.vstack((np.zeros(t.shape), t, i * np.ones(t.shape))).T
pts = set_number_of_points(pts, no_pts)
bundle.append(pts)
return bundle
def fornix_streamlines(no_pts=12):
fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [set_number_of_points(i[0], no_pts) for i in streams]
return streamlines
def evaluate_convergence(bundle, new_bundle2):
pts_static = np.concatenate(bundle, axis=0)
pts_moved = np.concatenate(new_bundle2, axis=0)
assert_array_almost_equal(pts_static, pts_moved, 3)
def test_rigid_parallel_lines():
bundle_initial = simulated_bundle()
bundle, shift = center_streamlines(bundle_initial)
mat = compose_matrix44([20, 0, 10, 0, 40, 0])
bundle2 = transform_streamlines(bundle, mat)
bundle_sum_distance = BundleSumDistanceMatrixMetric()
options = {'maxcor': 100, 'ftol': 1e-9, 'gtol': 1e-16, 'eps': 1e-3}
srr = StreamlineLinearRegistration(metric=bundle_sum_distance,
x0=np.zeros(6),
method='L-BFGS-B',
bounds=None,
options=options)
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_rigid_real_bundles():
bundle_initial = fornix_streamlines()[:20]
bundle, shift = center_streamlines(bundle_initial)
mat = compose_matrix44([0, 0, 20, 45., 0, 0])
bundle2 = transform_streamlines(bundle, mat)
bundle_sum_distance = BundleSumDistanceMatrixMetric()
srr = StreamlineLinearRegistration(bundle_sum_distance,
x0=np.zeros(6),
method='Powell')
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
bundle_min_distance = BundleMinDistanceMatrixMetric()
srr = StreamlineLinearRegistration(bundle_min_distance,
x0=np.zeros(6),
method='Powell')
new_bundle2 = srr.optimize(bundle, bundle2).transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
assert_raises(ValueError, StreamlineLinearRegistration, method='Whatever')
def test_rigid_partial_real_bundles():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
moving_center, shift2 = center_streamlines(moving)
print(shift2)
mat = compose_matrix(translate=np.array([0, 0, 0.]),
angles=np.deg2rad([40, 0, 0.]))
moved = transform_streamlines(moving_center, mat)
srr = StreamlineLinearRegistration()
srm = srr.optimize(static_center, moved)
print(srm.fopt)
print(srm.iterations)
print(srm.funcs)
moving_back = srm.transform(moved)
print(srm.matrix)
static_center = set_number_of_points(static_center, 100)
moving_center = set_number_of_points(moving_back, 100)
vol = np.zeros((100, 100, 100))
spts = np.concatenate(static_center, axis=0)
spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])
mpts = np.concatenate(moving_center, axis=0)
mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])
for index in spts:
i, j, k = index
vol[i, j, k] = 1
vol2 = np.zeros((100, 100, 100))
for index in mpts:
i, j, k = index
vol2[i, j, k] = 1
overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))
assert_equal(overlap * 100 > 40, True)
def test_stream_rigid():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
mat = compose_matrix44([0, 0, 0, 0, 40, 0])
moving = transform_streamlines(moving, mat)
srr = StreamlineLinearRegistration()
sr_params = srr.optimize(static, moving)
moved = transform_streamlines(moving, sr_params.matrix)
srr = StreamlineLinearRegistration(verbose=True)
srm = srr.optimize(static, moving)
moved2 = transform_streamlines(moving, srm.matrix)
moved3 = srm.transform(moving)
assert_array_almost_equal(moved[0], moved2[0], decimal=3)
assert
|
_array_almost_equal(moved2[0], moved3[0], decimal=3)
def test_min_v
|
s_min_fast_precision():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
static = [s.astype('f8') for s in static]
moving = [m.astype('f8') for m in moving]
bmd = BundleMinDistanceMatrixMetric()
bmd.setup(static, moving)
bmdf = BundleMinDistanceMetric()
bmdf.setup(static, moving)
x_test = [0.01, 0, 0, 0, 0, 0]
print(bmd.distance(x_test))
print(bmdf.distance(x_test))
assert_equal(bmd.distance(x_test), bmdf.distance(x_test))
def test_same_number_of_points():
A = [np.random.rand(10, 3), np.random.rand(20, 3)]
B = [np.random.rand(21, 3), np.random.rand(30, 3)]
C = [np.random.rand(10, 3), np.random.rand(10, 3)]
D = [np.random.rand(20, 3), np.random.rand(20, 3)]
slr = StreamlineLinearRegistration()
assert_raises(ValueError, slr.optimize, A, B)
assert_raises(ValueError, slr.optimize, C, D)
assert_raises(ValueError, slr.optimize, C, B)
def test_efficient_bmd():
a = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
streamlines = [a, a + 2, a + 4]
points, offsets = unlist_streamlines(streamlines)
points = points.astype(np.double)
points2 = points.copy()
D = np.zeros((len(offsets), len(offsets)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
assert_equal(np.sum(np.diag(D)), 0)
points2 += 2
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
streamlines2 = relist_streamlines(points2, offsets)
D2 = distance_matrix_mdf(streamlines, streamlines2)
assert_array_almost_equal(D, D2)
cols = D2.shape[1]
rows = D2.shape[0]
dist = 0.25 * (np.sum(np.min(D2, axis=0)) / float(cols) +
np.sum(np.min(D2, axis=1)) / float(rows)) ** 2
dist2 = _bundle_minimum_distance(points, points2,
|
RPGOne/Skynet
|
pytorch-master/torch/legacy/optim/rmsprop.py
|
Python
|
bsd-3-clause
| 2,014
| 0.001986
|
import torch
def rmsprop
|
(opfunc, x, config, state=None):
""" An implementation of RMSprop
ARGS:
- 'opfunc' : a function that takes a single input (X), the point
of a evaluation, and returns f(X) and df/dX
- 'x' : the initial point
- 'config` : a table with configura
|
tion parameters for the optimizer
- 'config['learningRate']' : learning rate
- 'config['alpha']' : smoothing constant
- 'config['epsilon']' : value with which to initialise m
- 'config['weightDecay']' : weight decay
- 'state' : a table describing the state of the optimizer;
after each call the state is modified
- 'state['m']' : leaky sum of squares of parameter gradients,
- 'state['tmp']' : and the square root (with epsilon smoothing)
RETURN:
- `x` : the new x vector
- `f(x)` : the function, evaluated before the update
"""
# (0) get/update state
if config is None and state is None:
raise ValueError("rmsprop requires a dictionary to retain state between iterations")
state = state if state is not None else config
lr = config.get('learningRate', 1e-2)
alpha = config.get('alpha', 0.99)
epsilon = config.get('epsilon', 1e-8)
wd = config.get('weightDecay', 0)
# (1) evaluate f(x) and df/dx
fx, dfdx = opfunc(x)
# (2) weight decay
if wd != 0:
dfdx.add_(wd, x)
# (3) initialize mean square values and square gradient storage
if 'm' not in state:
state['m'] = x.new().resize_as_(dfdx).zero_()
state['tmp'] = x.new().resize_as_(dfdx)
# (4) calculate new (leaky) mean squared values
state['m'].mul_(alpha)
state['m'].addcmul_(1.0 - alpha, dfdx, dfdx)
# (5) perform update
torch.sqrt(state['m'], out=state['tmp']).add_(epsilon)
x.addcdiv_(-lr, dfdx, state['tmp'])
# return x*, f(x) before optimization
return x, fx
|
getsenic/nuimo-linux-python
|
nuimo/__init__.py
|
Python
|
mit
| 130
| 0.007692
|
from .nuimo import ControllerManager, ControllerManagerLis
|
tener, Controller, ControllerListen
|
er, GestureEvent, Gesture, LedMatrix
|
ricardodani/django-simple-url-shortner
|
simple_url_shortner/urlshortener/views.py
|
Python
|
gpl-2.0
| 3,074
| 0.001627
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponsePermanentRedirect, Http404, HttpResponseRedirect
from django.views.decorators.http import require_GET
from django.contrib.auth import login, authenticate
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib import messages
from django.views.decorators.cache import cache_page
from .forms import UrlCreateForm
from .models import Url
@cache_page(60 * 60)
@require_GET
def redirect(request, short_code):
"""
Redirects Url
"""
if short_code:
try:
url = Url.objects.get(short_code=short_code)
except Url.DoesNotExist:
raise Http404()
return HttpResponsePermanentRedirect(url.original_url)
def register_user(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
|
username = request.POST['username']
password = request.POST['password1']
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, 'User registered and logged in with success.')
return HttpResponseRedirect(reverse_lazy('index'))
else:
context = {'user_register_form': form}
els
|
e:
context = {'user_register_form': UserCreationForm()}
return render(request, 'register.html', context)
def user_url_list(user, page, limit=20):
"""
Returns a paginator of a queryset with users Url's.
"""
url_list = Url.objects.filter(user=user)
paginator = Paginator(url_list, limit)
try:
url_list = paginator.page(page)
except PageNotAnInteger:
url_list = paginator.page(1)
except EmptyPage:
url_list = paginator.page(paginator.num_pages)
return url_list
def index(request):
"""
Main View, show form and list Url`s of the authenticated user.
"""
if request.user.is_authenticated():
context = {
# Returns the users ``Url.objects`` QuerySet or None if Anonymous.
'url_list': user_url_list(request.user, request.GET.get('page')),
'absolute_uri': request.build_absolute_uri(),
'user': request.user
}
else:
context = {
'user_login_form': AuthenticationForm(),
'user_register_form': UserCreationForm()
}
if request.method == "POST":
form = UrlCreateForm(request.POST)
if form.is_valid():
form.instance.user = (
request.user if request.user.is_authenticated() else None
)
instance = form.save()
context['short_url'] = request.build_absolute_uri() + instance.short_code
else:
form = UrlCreateForm()
context['change_form'] = form
return render(request, 'index.html', context)
|
bioinformatics-ua/catalogue
|
emif/utils/validate_questionnaire.py
|
Python
|
gpl-3.0
| 1,852
| 0.00432
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distrib
|
uted in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this pro
|
gram. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, HttpResponseRedirect
from questionnaire.models import *
from django.shortcuts import render_to_response, get_object_or_404
import sys
from searchengine.models import *
rem = 1
qu = get_object_or_404(Questionnaire, id=rem)
qsets = qu.questionsets()
for qs in qsets:
expected = qs.questions()
for q in expected:
slugs = Slugs.objects.filter(description__exact=q.text)
if len(slugs)!=1:
print "Error (multiple slugs to the description): " + q.number
for s in slugs:
try:
print s.slug1 + "| " + s.description + "| " + str(s.question.pk)
except:
print s.slug1 + "| " + str(s.question.pk)
continue
s = slugs[0]
if (s.slug1 != q.slug):
print "Error (slug1!=slug): " + q.number
print s.slug1 + "| " + s.description + "| " + str(s.question.pk)
continue
if (s.question.pk!=q.pk):
print "Error (q.pk!=pk): " + q.number
continue
|
sniperganso/python-manilaclient
|
manilaclient/v1/shares.py
|
Python
|
apache-2.0
| 1,210
| 0.000826
|
# Copyright 2012 NetApp
# Copyright 2015 Chuck Fouts
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
i
|
mport warnings
from manilaclient.v2 import shares
warnings.warn("Module manilaclient.v1.shares is deprecated (taken as "
"a basis for manilac
|
lient.v2.shares). "
"The preferable way to get a client class or object is to use "
"the manilaclient.client module.")
class MovedModule(object):
def __init__(self, new_module):
self.new_module = new_module
def __getattr__(self, attr):
return getattr(self.new_module, attr)
sys.modules["manilaclient.v1.shares"] = MovedModule(shares)
|
fiduswriter/fiduswriter
|
fiduswriter/user/migrations/0001_squashed_0003_auto_20151226_1110.py
|
Python
|
agpl-3.0
| 6,929
| 0.00101
|
# Generated by Django 1.11.13 on 2018-08-14 17:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [
("user", "0001_initial"),
("user", "0002_rename_account_tables"),
("user", "0003_auto_20151226_1110"),
]
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"password",
models.CharField(max_length=128, verbose_name="password"),
),
|
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField
|
(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"email",
models.EmailField(
blank=True,
max_length=254,
verbose_name="email address",
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"db_table": "auth_user",
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name="TeamMember",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("roles", models.CharField(blank=True, max_length=100)),
(
"leader",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="leader",
to=settings.AUTH_USER_MODEL,
),
),
(
"member",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="member",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("about", models.TextField(blank=True, max_length=500)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="teammember",
unique_together=set([("leader", "member")]),
),
]
|
viraja1/grammar-check
|
grammar_check/__init__.py
|
Python
|
lgpl-3.0
| 19,319
| 0.000207
|
# -*- coding: utf-8 -*-
# © 2012 spirit <hiddenspirit@gmail.com>
# © 2013-2014 Steven Myint
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""LanguageTool through server mode."""
import atexit
import glob
import http.client
import locale
import os
import re
import socket
import sys
import urllib.parse
import urllib.request
from collections import OrderedDict
from functools import total_ordering
from weakref import WeakValueDictionary
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
from .backports import subprocess
from .which import which
__version__ = '1.3.1'
__all__ = ['LanguageTool', 'Error', 'get_languages', 'correct', 'get_version',
'get_directory', 'set_directory']
JAR_NAMES = [
'languagetool-server.jar',
'languagetool-standalone*.jar', # 2.1
'LanguageTool.jar',
'LanguageTool.uno.jar'
]
FAILSAFE_LANGUAGE = 'en'
# https://mail.python.org/pipermail/python-dev/2011-July/112551.html
USE_URLOPEN_RESOURCE_WARNING_FIX = (3, 1) < sys.version_info < (3, 4)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
cache = {}
class Error(Exception):
"""LanguageTool Error."""
class ServerError(Error):
pass
class JavaError(Error):
pass
class PathError(Error):
pass
def get_replacement_list(string, sep='#'):
if isinstance(string, list):
return string
return string.split(sep) if string else []
def auto_type(string):
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return string
@total_ordering
class Match:
"""Hold information about where a rule matches text."""
_SLOTS = OrderedDict([
('fromy', int), ('fromx', int), ('toy', int), ('tox', int),
('ruleId', str), ('subId', str), ('msg', str),
('replacements', get_replacement_list),
('context', str), ('contextoffset', int),
('offset', int), ('errorlength', int),
('url', str), ('category', str), ('locqualityissuetype', str),
])
def __init__(self, attrib):
for k, v in attrib.items():
setattr(self, k, v)
def __repr__(self):
def _ordered_dict_repr():
slots = list(self._SLOTS)
slots += list(set(self.__dict__).difference(slots))
attrs = [slot for slot in slots
if slot in self.__dict__ and not slot.startswith('_')]
return '{{{}}}'.format(
', '.join([
'{!r}: {!r}'.format(attr, getattr(self, attr))
for attr in attrs
])
)
return '{}({})'.format(self.__class__.__name__, _ordered_dict_repr())
def __str__(self):
ruleId = self.ruleId
if self.subId is not None:
ruleId += '[{}]'.format(self.subId)
s = 'Line {}, column {}, Rule ID: {}'.format(
self.fromy + 1, self.fromx + 1, ruleId)
if self.msg:
s += '\nMessage: {}'.format(self.msg)
if self.replacements:
s += '\nSuggestion: {}'.format('; '.join(self.replacements))
s += '\n{}\n{}'.format(
self.context, ' ' * self.contextoffset + '^' * self.errorlength
)
return s
def __eq__(self, other):
return list(self) == list(other)
def __lt__(self, other):
return list(self) < list(other)
def __iter__(self):
return iter(getattr(self, attr) for attr in self._SLOTS)
def __setattr__(self, name, value):
try:
value = self._SLOTS[name](value)
except KeyError:
value = auto_type(value)
super().__setattr__(name, value)
def __getattr__(self, name):
if name not in self._SLOTS:
raise AttributeError('{!r} object has no attribute {!r}'
.format(self.__class__.__name__, name))
class LanguageTool:
"""Main class used for checking text agai
|
nst different rules."""
_HOST = socket.gethostbyname('localhost')
_MIN_PORT = 8081
_MAX_PORT = 8083
_TIMEOUT = 60
_port = _MIN_PORT
_serv
|
er = None
_instances = WeakValueDictionary()
_PORT_RE = re.compile(r"(?:https?://.*:|port\s+)(\d+)", re.I)
def __init__(self, language=None, motherTongue=None):
if not self._server_is_alive():
self._start_server_on_free_port()
if language is None:
try:
language = get_locale_language()
except ValueError:
language = FAILSAFE_LANGUAGE
self._language = LanguageTag(language)
self.motherTongue = motherTongue
# spell check rules are disabled by default
self.disabled = {'HUNSPELL_RULE', 'HUNSPELL_NO_SUGGEST_RULE', 'YOUR_NN', 'TRY_AND', 'PRP_PAST_PART',
'MORFOLOGIK_RULE_' + self.language.replace('-', '_').upper()}
self.enabled = set()
self._instances[id(self)] = self
def __del__(self):
if not self._instances and self._server_is_alive():
#self._terminate_server()
pass
def __repr__(self):
return '{}(language={!r}, motherTongue={!r})'.format(
self.__class__.__name__, self.language, self.motherTongue)
@property
def language(self):
"""The language to be used."""
return self._language
@language.setter
def language(self, language):
self._language = LanguageTag(language)
self.disabled.clear()
self.enabled.clear()
@property
def motherTongue(self):
"""The user's mother tongue or None.
The mother tongue may also be used as a source language for
checking bilingual texts.
"""
return self._motherTongue
@motherTongue.setter
def motherTongue(self, motherTongue):
self._motherTongue = (None if motherTongue is None
else LanguageTag(motherTongue))
@property
def _spell_checking_rules(self):
return {'HUNSPELL_RULE', 'HUNSPELL_NO_SUGGEST_RULE',
'MORFOLOGIK_RULE_' + self.language.replace('-', '_').upper()}
def check(self, text: str, srctext=None) -> [Match]:
"""Match text against enabled rules."""
root = self._get_root(self._url, self._encode(text, srctext))
return [Match(e.attrib) for e in root if e.tag == 'error']
def _check_api(self, text: str, srctext=None) -> bytes:
"""Match text against enabled rules (result in XML format)."""
root = self._get_root(self._url, self._encode(text, srctext))
return (b'<?xml version="1.0" encoding="UTF-8"?>\n' +
ElementTree.tostring(root) + b"\n")
def _encode(self, text, srctext=None):
params = {'language': self.language, 'text': text.encode('utf-8')}
if srctext is not None:
params['srctext'] = srctext.encode('utf-8')
if self.motherTongue is not None:
params['motherTongue'] = self.motherTongue
if self.disabled:
params['disabled'] = ','.join(self.disabled)
if self.enabled:
params['enabled'] = ','.join(self.enabled)
return urllib.parse.urlencode(params).encode()
def correct(self, text: str, srctext=None) -> str:
"""Automatically apply suggestions to the text."""
return correct(text, self.check(text, srctext))
def enable_s
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDanielyangNinja.py
|
Python
|
bsd-3-clause
| 362
| 0.035912
|
def extractDanielyangNinja(item):
'''
Parser for '
|
danielyang.ninja'
'''
vol, chp, frag, postfi
|
x = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "WATTT" in item['tags']:
return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix)
return False
|
openqt/algorithms
|
projecteuler/pe030-digit-fifth-powers.py
|
Python
|
gpl-3.0
| 507
| 0.021696
|
#!/usr/bin/env python
# coding=utf-8
"""30. Digit fifth powers
https://projecteuler.net/problem=30
Surprisingly there are only three numbers that can be written as the sum of
fourth powers of their digits:
> 1634 = 14 \+ 64 \+ 34 \+ 44
> 8208 = 84 \+ 24 \+ 04 \+ 84
> 9474 = 94 \+ 44 \+ 74 \+ 44
As 1 = 14 is not a sum it is not included.
The
|
sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all
|
the numbers that can be written as the sum of fifth powers
of their digits.
"""
|
prathamtandon/g4gproblems
|
Arrays/longest_increasing_subsequence_nlogn.py
|
Python
|
mit
| 1,625
| 0.004308
|
import unittest
"""
Given an unordered array of integers, find the length of longest increasing subsequence.
Input: 0, 8, 4, 12, 2, 10, 6, 14, 1, 9
|
, 5, 13, 3, 11, 7, 15
Ou
|
tput: 6 (0, 2, 6, 9, 11, 15)
"""
"""
A great explanation of the approach appears here:
http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/
"""
def find_ceil_index(list_of_numbers, ele):
"""
Returns the smallest element in list_of_numbers greater than or equal to ele.
"""
low = 0
high = len(list_of_numbers)-1
ans = -1
while low <= high:
mid = (low + high) / 2
if list_of_numbers[mid] >= ele:
ans = mid
high = mid - 1
else:
low = mid + 1
return ans
def find_longest_increasing_subsequence_length(list_of_numbers):
LCS = [list_of_numbers[0]]
for i in range(1, len(list_of_numbers)):
cur_ele = list_of_numbers[i]
k = find_ceil_index(LCS, cur_ele)
if k == -1:
LCS.append(cur_ele)
else:
LCS[k] = cur_ele
return len(LCS)
class TestLIS(unittest.TestCase):
def test_longest_increasing_subsequence(self):
list_of_numbers = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [2, 5, 3, 1, 2, 3, 4, 5, 6]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [5, 4, 3, 2, 1]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 1)
|
hilario/trep
|
src/system.py
|
Python
|
gpl-3.0
| 46,931
| 0.00358
|
import math
import inspect
import numpy as np
import numpy.linalg as linalg
import scipy as sp
import scipy.optimize
import scipy.io
from itertools import product
import trep
import _trep
from _trep import _System
from frame import Frame
from finput import Input
from config import Config
from force import Force
from constraint
|
import Constraint
from potential import Potential
from util import dynamics_indexing_decorator
|
class System(_System):
"""
The System class represents a complete mechanical system
comprising coordinate frames, configuration variables, potential
energies, constraints, and forces.
"""
def __init__(self):
"""
Create a new mechanical system.
"""
_System.__init__(self)
# _System variables need to be initialized (cleaner here than in C w/ ref counting)
self._frames = tuple()
self._configs = tuple()
self._dyn_configs = tuple()
self._kin_configs = tuple()
self._potentials = tuple()
self._forces = tuple()
self._inputs = tuple()
self._constraints = tuple()
self._masses = tuple()
self._hold_structure_changes = 0
self._structure_changed_funcs = []
# Hold off the initial structure update until we have a world
# frame.
self._hold_structure_changes = 1
self._world_frame = Frame(self, trep.WORLD, None, name="World")
self._hold_structure_changes = 0
self._structure_changed()
def __repr__(self):
return '<System %d configs, %d frames, %d potentials, %d constraints, %d forces, %d inputs>' % (
len(self.configs),
len(self.frames),
len(self.potentials),
len(self.constraints),
len(self.forces),
len(self.inputs))
@property
def nQ(self):
"""Number of configuration variables in the system."""
return len(self.configs)
@property
def nQd(self):
"""Number of dynamic configuration variables in the system."""
return len(self.dyn_configs)
@property
def nQk(self):
"""Number of kinematic configuration variables in the system."""
return len(self.kin_configs)
@property
def nu(self):
"""Number of inputs in the system."""
return len(self.inputs)
@property
def nc(self):
"""Number of constraints in the system."""
return len(self.constraints)
@property
def t(self):
"""Current time of the system."""
return self._time
@t.setter
def t(self, value):
self._clear_cache()
self._time = value
def get_frame(self, identifier):
"""
get_frame(identifier) -> Frame,None
Return the first frame with the matching identifier. The
identifier can be the frame name, index, or the frame itself.
Raise an exception if no match is found.
"""
return self._get_object(identifier, Frame, self.frames)
def get_config(self, identifier):
"""
get_config(identifier) -> Config,None
Return the first configuration variable with the matching
identifier. The identifier can be the config name, index, or
the config itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Config, self.configs)
def get_potential(self, identifier):
"""
get_potential(identifier) -> Potential,None
Return the first potential with the matching identifier. The
identifier can be the constraint name, index, or the
constraint itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Potential, self.potentials)
def get_constraint(self, identifier):
"""
get_constraint(identifier) -> Constraint,None
Return the first constraint with the matching identifier. The
identifier can be the constraint name, index, or the
constraint itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Constraint, self.constraints)
def get_force(self, identifier):
"""
get_force(identifier) -> Force,None
Return the first force with the matching identifier. The
identifier can be the force name, index, or the
force itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Force, self.forces)
def get_input(self, identifier):
"""
get_input(identifier) -> Input,None
Return the first input with the matching identifier. The
identifier can be the input name, index, or the
input itself. Raise an exception if no match is found.
"""
return self._get_object(identifier, Input, self.inputs)
def satisfy_constraints(self, tolerance=1e-10, verbose=False,
keep_kinematic=False, constant_q_list=None):
"""
Modify the current configuration to satisfy the system
constraints.
The configuration velocity (ie, config.dq) is simply set to
zero. This should be fixed in the future.
Passing True to keep_kinematic will not allow method to modify
kinematic configuration variables.
Passing a list (or tuple) of configurations to constant_q_list
will keep all elements in list constant. The method uses
trep.System.get_config so the list may contain configuration
objects, indices in Q, or names. Passing anything for
constant_list_q will overwrite value for keep_kinematic.
"""
self.dq = 0
if keep_kinematic:
names = [q.name for q in self.dyn_configs]
q0 = self.qd
else:
names = [q.name for q in self.configs]
q0 = self.q
if constant_q_list:
connames = [self.get_config(q).name for q in constant_q_list]
names = []
for q in self.configs:
if q.name not in connames:
names.append(q.name)
q0 = np.array([self.q[self.get_config(name).index] for name in names])
def func(q):
v = (q - q0)
return np.dot(v,v)
def fprime(q):
return 2*(q-q0)
def f_eqcons(q):
self.q = dict(zip(names,q))
return np.array([c.h() for c in self.constraints])
def fprime_eqcons(q):
self.q = dict(zip(names,q))
return np.array([[c.h_dq(self.get_config(q)) for q in names] for c in self.constraints])
(q_opt, fx, its, imode, smode) = sp.optimize.fmin_slsqp(func, q0, f_eqcons=f_eqcons,
fprime=fprime, fprime_eqcons=fprime_eqcons,
acc=tolerance, iter=100*self.nQ,
iprint=0, full_output=True)
if imode != 0:
raise StandardError("Minimization failed: %s" % smode)
self.q = dict(zip(names,q_opt))
return self.q
def minimize_potential_energy(self, tolerance=1e-10, verbose=False,
keep_kinematic=False, constant_q_list=None):
"""
Find a nearby configuration where the potential energy is
minimized. Useful for finding nearby equilibrium points.
If minimum is found, all constraints will be found as well
The configuration velocity (ie, config.dq) is set to
zero which ensures the kinetic energy is zero.
Passing True to keep_kinematic will not allow method to modify
kinematic configuration variables.
Passing a list (or tuple) of configurations to constant_q_list
will keep all elements in list constant. The method uses
trep.System.get_config so the list may contain configuration
objects, indices in Q, or names. Passing anything for
constant_list_q will overwrite value for keep_kinematic.
"
|
simone-campagna/petra
|
petra/token_list.py
|
Python
|
apache-2.0
| 2,481
| 0.003628
|
__all__ = (
'TokenList',
)
import collections
from .errors import TokenTypeError
class TokenList(collections.Sized, collections.Iterable, collections.Container):
def __init__(self, init=None, *, token_type=None):
if token_type is None:
token_type = object
self._token_type = token_type
self._tokens = collections.deque()
if init:
if not hasattr(init, '__iter__'):
raise TypeError("invalid value {!r}: no
|
t an iterable".format(init))
for token in init:
self.add(token)
@property
def token_type(self):
return self._token_type
def add(self, token, *, count=1):
if not isinstance(token, self._token_type):
raise TokenTypeError("invalid token {!r}: type is not {}".format
|
(token, self._token_type.__name__))
for i in range(count):
self._tokens.append(token)
def pop(self):
return self._tokens.popleft()
def remove(self, token):
for c, t in enumerate(self._tokens):
if t is token:
break
else:
return
del self._tokens[c]
#self._tokens.remove(token)
def copy(self):
return self.__class__(init=self, token_type=self.token_type)
def __iter__(self):
yield from self._tokens
def __len__(self):
return len(self._tokens)
def clear(self):
self._tokens.clear()
def extend(self, values):
if self._token_type is object:
self._tokens.extend(values)
else:
for value in values:
self.add(value)
def __contains__(self, value):
return value in self._tokens
def __repr__(self):
args = []
if self:
args.append(repr(list(self._tokens)))
if self._token_type is not object:
args.append("token_type={}".format(self._token_type.__name__))
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
if isinstance(other, TokenList):
if self._token_type != other.token_type:
return False
return self._tokens == other._tokens
else:
if len(self._tokens) != len(other):
return False
for a, b in zip(self._tokens, other):
if a != b:
return False
return True
|
mitsuhiko/sqlalchemy
|
test/orm/test_dynamic.py
|
Python
|
mit
| 30,088
| 0.002526
|
from sqlalchemy.testing import eq_, is_
from sqlalchemy.orm import backref, configure_mappers
from sqlalchemy import testing
from sqlalchemy import desc, select, func, exc
from sqlalchemy.orm import mapper, relationship, create_session, Query, \
attributes, exc as orm_exc, Session
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.testing import AssertsCompiledSQL, \
assert_raises_message, assert_raises
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
class _DynamicFixture(object):
def _user_address_fixture(self, addresses_args={}):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, lazy="dynamic",
**addresses_args)
})
mapper(Address, addresses)
return User, Address
def _order_item_fixture(self, items_args={}):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
|
self.classes.Item)
mapper(Order, orders, properties={
'items': relationshi
|
p(Item,
secondary=order_items,
lazy="dynamic",
**items_args
)
})
mapper(Item, items)
return Order, Item
class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL):
def test_basic(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
eq_([User(id=7,
addresses=[Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.all())
def test_statement(self):
"""test that the .statement accessor returns the actual statement that
would render, without any _clones called."""
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
u = q.filter(User.id == 7).first()
self.assert_compile(
u.addresses.statement,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM "
"addresses WHERE :param_1 = addresses.user_id",
use_default_dialect=True
)
def test_detached_raise(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
sess.expunge(u)
assert_raises(
orm_exc.DetachedInstanceError,
u.addresses.filter_by,
email_address='e'
)
def test_no_uselist_false(self):
User, Address = self._user_address_fixture(
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User, lazy='dynamic')
})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by":
addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(id=7,
addresses=[
Address(id=1, email_address='jack@bean.com')
])
]
)
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(items_args={
"backref": backref("orders", lazy="dynamic")
})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.ap
|
wtamu-cisresearch/nltksite
|
nltkapp/views.py
|
Python
|
mit
| 8,449
| 0.034797
|
from django.shortcuts import render
from .models import Document, Corpus
from django.http import JsonResponse
from django.conf import settings
import json
import os
import re
import nltk
from nltk.corpus import *
from nltk.collocations import *
import string
import logging
logger = logging.getLogger('nltksite.nltkapp')
# Create your views here.
# this is horrible
def clearencoding(str):
try:
json.dumps(str)
if len(str) == 1 and ord(str) > 128:
logger.warn("Unicode Error on str='%s' code=%s Skipping" % (repr(str), ord(str)))
str = ""
except UnicodeDecodeError:
logger.warn("Unicode Error on str='%s' code=%s Skipping" % (str, repr(str)))
str = str.decode('utf8', 'ignore')
return str
def index(request):
logger.debug("index requested.")
corpora = Corpus.objects.all()
context = {'corpora': corpora}
return render(request, 'nltkapp/index.html', context)
def sayhello(request):
logger.debug("say hello.")
return JsonResponse({'message': 'Hello World'})
def getdocuments(request):
corpus_id = request.GET.get('corpus_id', None)
c = Corpus.objects.get(pk=corpus_id)
logger.debug("Getting list of documents for corpus %s (id=%s)" % (c.name,corpus_id))
documents = c.document_set.all()
documents_list = []
for d in documents:
documents_list.append({'id': d.id, 'name': d.file.name})
return JsonResponse({'documents': documents_list})
def get_sentences(request):
corpus_id = request.GET.get('corpus_id', None)
document_ids = json.loads(request.GET.get('document_ids', None))
word = request.GET.get('word', None)
logger.debug("corpus_id=%s, document_ids=%s, word=%s" % (corpus_id, str(document_ids), word))
finalResult = {}
corpus, internal_filter = open_corpus(corpus_id, document_ids)
# \b is a word boundary match in regex, so we get government but not governmentally
pattern = "\\b" + word + "\\b"
# Chosen corpus is an nltk internal corpus (gutenberg, bible, inaugural addresses, etc...).
# We treat those slightly differently than user-mode corpora
fileids = []
if internal_filter:
fileids = [internal_filter]
else:
# Get array of fileids used by the NLTK corpus object from our own document ids
fileids = corpus.fileids()
logger.debug("fileids=%s", fileids)
for fileid in fileids:
if fileid in corpus.fileids():
sents = corpus.sents(fileid)
results = []
for sentence in sents:
combined = clearencoding(' '.join(sentence))
if re.search(pattern, combined):
results.append(combined)
if len(results) > 0:
finalResult[fileid] = results
# wdmatrix is a word-document matrix. finalResult['facebook.txt'] = [sentences]
return JsonResponse({'word': word, 'wdmatrix':finalResult})
def wordfreq(request):
corpus_id = request.GET.get('corpus_id', None)
document_ids = json.loads(request.GET.get('document_ids', None))
ngram = request.GET.get('ngram', None)
scoring_method = request.GET.get('scoring_method', None)
logger.debug("corpus_id=%s, document_ids=%s, ngram=%s, scoring_method=%s" % (corpus_id, str(document_ids), ngram, scoring_method))
corpus, internal_filter = open_corpus(corpus_id, document_ids)
if not internal_filter:
words = corpus.words()
else:
words = corpus.words(internal_filter)
logger.debug("PlaintextCorpusReader on files: %s" % corpus.fileids())
if ngram == "1":
return onegram_collocation(words)
elif ngram == "2":
first_word_list, fdist = bigram_collocation(words, scoring_method)
elif ngram == "3":
first_word_list, fdist = trigram_collocation(words, scoring_method)
else:
logger.debug("Invalid ngram value specified. " + ngram)
word_list = []
for b in first_word_list:
for sample in fdist:
if b == sample:
worddict = {'word': clearencoding(' '.join(sample)), 'weight': fdist[sample], 'exclude': 0, 'exclude_reason': ''}
break
word_list.append(worddict)
return JsonResponse({'list':word_list})
def onegram_collocation(words):
fdist = nltk.FreqDist(words)
unusual_list = unusual_words(words)
word_list = []
for sample in fdist:
contains_punctuation = False
all_punctuation = True
for c in sample:
if c in string.punctuation:
contains_punctuation = True
else:
all_punctuation = False
# If word contains punctuation OR occurs less than 3 times OR is a stop word, SKIP IT
if (contains_punctuation or fdist[sample] < 3 or sample in stopwords.words('english')):
continue
if (clearencoding(sample.lower()) in unusual_list):
unusual = True
else:
unusual = False
if (len(clearencoding(sample)) > 0):
word_list.append({'word': clearencoding(sample), 'weight': fdist[sample], 'exclude': 0, 'exclude_reason': '', 'unusual': unusual})
return JsonResponse({'list':word_list})
def bigram_collocation(words, score):
ignored_words = stopwords.words('english')
bigrams = nltk.bigrams(words)
fdist = nltk.FreqDist(bigrams)
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(words)
# Only select bigrams that appear at least 3 times
finder.apply_freq_filter(3)
finder.apply_word_
|
filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
# return the 10 bigrams with the highest PMI
method = bigram_measures.pmi
if "student_t" in score:
method = bigram_measures.student_t
elif "chi_sq" in score:
method = bigram_measures.chi_sq
elif "pmi" in score:
method = bigram_measures.pmi
elif "likelihood_ratio" in score:
method = bigram_measures.likelihood_ratio
elif "poisson_stirling" in score:
method = bigram_measures.poisson_st
|
irling
elif "jaccard" in score:
method = bigram_measures.jaccard
word_list = finder.nbest(method, 100)
return [word_list, fdist]
def trigram_collocation(words, score):
ignored_words = stopwords.words('english')
trigrams = nltk.trigrams(words)
fdist = nltk.FreqDist(trigrams)
trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_words(words)
#finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
method = trigram_measures.pmi
if "student_t" in score:
method = trigram_measures.student_t
elif "chi_sq" in score:
method = trigram_measures.chi_sq
elif "pmi" in score:
method = trigram_measures.pmi
elif "likelihood_ratio" in score:
method = trigram_measures.likelihood_ratio
elif "poisson_stirling" in score:
method = trigram_measures.poisson_stirling
elif "jaccard" in score:
method = trigram_measures.jaccard
word_list = finder.nbest(method, 100)
return [word_list, fdist]
# Given an array of words, connect to wordnet and return the part of speech, definition, etc...
def wordnet_data(request):
words = json.loads(request.GET.get('words', None))
logger.debug("wordnet_data words=%s" % str(words))
results = []
for w in words:
syns = wordnet.synsets(w)
if len(syns) > 0:
root_word = syns[0].lemmas()[0].name()
pos = syns[0].pos()
definition = syns[0].definition()
synonyms = ''
for syn in syns:
if (syn.lemmas()[0].name() != root_word):
synonyms += syn.lemmas()[0].name() + ', '
examples = syns[0].examples()
results.append({'word': w,
'root': root_word,
'pos': pos,
'definition': definition,
'synonyms': synonyms[:-2],
'examples': examples
})
else:
results.append({'word': w,
'root': 'undefined',
'pos': 'undefined',
'definition': 'undefined',
'synonyms': 'undefined',
'examples': 'undefined'
})
return JsonResponse({'results': results})
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab.difference(english_vocab)
return sorted(unusual)
def open_corpus(corpus_id, document_ids):
c = Corpus.objects.get(pk=corpus_id)
if c.internal_nltk_name:
return eval(c.internal_nltk_name), c.internal_nltk_filter
fileids = []
for d in document_ids:
d = int(d)
# we want entire corpus
if (d == -1):
fileids = '.*\.txt'
break
document = Document.objects.get(pk=d)
fileids.append(os.path.basename(document.file.name))
# Kareem March 5, 2015: Added encoding=None. This prevents NLTK
|
RobinQuetin/CAIRIS-web
|
cairis/cairis/ReferenceContribution.py
|
Python
|
apache-2.0
| 1,176
| 0.007653
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language gover
|
ning permissions and limitations
# under the License.
class ReferenceContribution:
def __init__(self,src,dest,me,cont):
self.theSource = src
self.theDestination = dest
self.theMeansEnd = me
self.theContribution = cont
def source
|
(self): return self.theSource
def destination(self): return self.theDestination
def meansEnd(self): return self.theMeansEnd
def contribution(self): return self.theContribution
|
Akrog/cinder
|
cinder/tests/test_coraid.py
|
Python
|
apache-2.0
| 33,873
| 0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
from cinder.brick.initiator import connector
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import coraid
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
return '{0}K'.format(to_coraid_kb(gb))
fake_esm_ipaddress = "192.168.0.1"
fake_esm_username = "darmok"
fake_esm_group = "tanagra"
fake_esm_group_id = 1
fake_esm_password = "12345678"
fake_coraid_repository_key = 'repository_key'
fake_volume_name = "volume-12345678-1234-1234-1234-1234567890ab"
fake_clone_name = "volume-ffffffff-1234-1234-1234-1234567890ab"
fake_volume_size = 10
fake_repository_name = "A-B:C:D"
fake_pool_name = "FakePool"
fake_aoetarget = 4081
fake_shelf = 16
fake_lun = 241
fake_str_aoetarget = str(fake_aoetarget)
fake_lun_addr = {"shelf": fake_shelf, "lun": fake_lun}
fake_volume_type = {'id': 1}
fake_volume = {"id": fake_volume_name,
"name": fake_volume_name,
"size": fake_volume_size,
"volume_type": fake_volume_type}
fake_clone_volume = {"name": fake_clone_name,
"size": fake_volume_size,
"volume_type": fake_volume_type}
fake_big_clone_volume = {"name": fake_clone_name,
"size": fake_volume_size + 1,
"volume_type": fake_volume_type}
fake_volume_info = {"pool": fake_pool_name,
"repo": fake_repository_name,
"vsxidx": fake_aoetarget,
"index": fake_lun,
"shelf": fake_shelf}
fake_lun_info = {"shelf": fake_shelf, "lun": fake_lun}
fake_snapshot_name = "snapshot-12345678-8888-8888-1234-1234567890ab"
fake_snapshot_id = "12345678-8888-8888-1234-1234567890ab"
fake_volume_id = "12345678-1234-1234-1234-1234567890ab"
fake_snapshot = {"id": fake_snapshot_id,
"name": fake_snapshot_name,
"volume_id": fake_volume_id,
"volume_name": fake_volume_name,
"volume_size": int(fake_volume_size) - 1,
"volume": fake_volume}
fake_configure_data = [{"addr": "cms", "data": "FAKE"}]
fake_esm_fetch = [[
{"command": "super_fake_command"},
{"reply": [
{"lv":
{"containingPool": fake_pool_name,
"lunIndex": fake_aoetarget,
"name": fake_volume_name,
"lvStatus":
{"exportedLun":
{"lun": fake_lun,
"shelf": fake_shelf}}
},
"repoName": fake_repository_name}]}]]
fake_esm_fetch_no_volume = [[
{"command": "super_fake_command"},
{"reply": []}]]
fake_esm_success = {"category": "provider",
"tracking": False,
"configState": "completedSuccessfully",
"heldPending": False,
"metaCROp": "noAction",
"message": None}
fake_group_fullpath = "admin group:%s" % (fake_esm_group)
fake_group_id = 4
fake_login_reply = {"values": [
{"fullPath": fake_group_fullpath,
"groupId": fake_group_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
fake_group_fail_fullpath = "fail group:%s" % (fake_esm_group)
fake_group_fail_id = 5
fake_login_reply_group_fail = {"values": [
{"fullPath": fake_group_fail_fullpath,
"groupId": fake_group_fail_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
def compare(a, b):
if type(a) != type(b):
return False
if type(a) == list or type(a) == tuple:
if len(a) != len(b):
return False
return all(map(lambda t: compare(t[0], t[1]), zip(a, b)))
elif type(a) == dict:
if len(a) != len(b):
return False
for k, v in a.items():
if not compare(v, b[k]):
return False
return True
else:
return a == b
def pack_data(request):
request['data'] = jsonutils.dumps(request['data'])
class FakeRpcBadRequest(Exception):
pass
class FakeRpcIsNotCalled(Exception):
def __init__(self, handle, url_params, data):
self.handle = handle
self.url_params = url_params
self.data = data
def __str__(self):
return 'Fake Rpc handle for {0}/{1}/{2} not found'.format(
self.handle, self.url_params, self.data)
class FakeRpcHandle(object):
def __init__(self, handle, url_params, data, result):
self.handle = handle
self.url_params = url_params
self.data = data
self.result = result
self._is_called = False
def set_called(self):
self._is_called = True
def __call__(self, handle, url_params, data,
allow_empty_response=False):
if handle != self.handle:
raise FakeRpcBadRequest(
'Unexpected handle name {0}. Expected {1}.'
.format(handle, self.handle))
if not compare(url_params, self.url_params):
raise FakeRpcBadRequest('Unexpected url params: {0} / {1}'
.format(url_params, self.url_params))
if not compare(data, self.data):
raise FakeRpcBadRequest('Unexpected data: {0}/{1}'
.format(data, self.data))
if callable(self.result):
return self.result()
else:
return self.result
class FakeRpc(object):
def __init__(self):
self._handles = []
def handle(self, handle, url_params, data, result):
self._handles.append(FakeRpcHandle(handle, url_params, data, result))
def __call__(self, handle_name, url_params, data,
allow_empty_response=False):
for handle in self._handles:
if (handle.handle == handle_name and
compare(handle.url_params, url_params) and
compare(handle.data, handle.data)):
|
handle.set_called()
return handle(handle_name, url_params, data,
allow_empty_response)
raise FakeRpcIsNotCalled(handle_nam
|
e, url_params, data)
class CoraidDriverTestCase(test.TestCase):
def setUp(self):
super(CoraidDriverTestCase, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.coraid_default_repository = 'default_repository'
configuration.coraid_esm_address = fake_esm_ipaddress
configuration.coraid_user = fake_esm_username
configuration.coraid_group = fake_esm_group
configuration.coraid_password = fake_esm_password
configuration.volume_name_template = "volume-%s"
configuration.snapshot_name_template = "snapshot-%s"
configuration.coraid_repository_key = fake_coraid_repository_key
configuration.use_multipath_for_image_xfer = False
|
tuturto/pyherc
|
src/herculeum/ui/gui/mainwindow.py
|
Python
|
mit
| 8,099
| 0.001235
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for main window related functionality
"""
import PyQt4.QtGui
from herculeum.ui.controllers import EndScreenController, StartGameController
from herculeum.ui.gui.endscreen import EndScreen
from herculeum.ui.gui.eventdisplay import EventMessageDockWidget
from herculeum.ui.gui.map import PlayMapWindow
from herculeum.ui.gui.menu import MenuDialog
from herculeum.ui.gui.startgame import StartGameWidget
from PyQt4.QtCore import QFile, Qt
from PyQt4.QtGui import (QAction, QApplication, QCursor, QDialog, QIcon,
QMainWindow, QPixmap, QSplashScreen)
class QtUserInterface():
"""
Class for Qt User Interface
.. versionadded:: 0.9
"""
def __init__(self, application):
"""
Default constructor
"""
super().__init__()
self.application = application
self.splash_screen = None
self.qt_app = QApplication([])
# self.qt_app.setOverrideCursor(QCursor(Qt.BlankCursor))
def show_splash_screen(self):
"""
Show splash screen
"""
file = QFile(':herculeum.qss')
file.open(QFile.ReadOnly)
styleSheet = str(file.readAll().data(), 'ascii')
self.qt_app.setStyleSheet(styleSheet)
pixmap = QPixmap(':splash.png')
self.splash_screen = QSplashScreen(pixmap)
self.splash_screen.show()
def show_main_window(self):
"""
Show main window
"""
main_window = MainWindow(self.application,
self.application.surface_manager,
self.qt_app,
None,
Qt.FramelessWindow
|
Hint,
StartGameController(self.application.level_generator_factory,
self.application.creature_generator,
self.application.item_generator,
|
self.application.config.start_level))
self.splash_screen.finish(main_window)
main_window.show_new_game()
self.qt_app.exec_()
class MainWindow(QMainWindow):
"""
Class for displaying main window
.. versionadded:: 0.5
"""
def __init__(self, application, surface_manager, qt_app, parent, flags,
controller):
"""
Default constructor
"""
super().__init__(parent, flags)
self.application = application
self.surface_manager = surface_manager
self.qt_app = qt_app
self.controller = controller
self.__set_layout()
def __set_layout(self):
exit_action = QAction(QIcon(':exit-game.png'),
'&Quit',
self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Quit game')
exit_action.triggered.connect(PyQt4.QtGui.qApp.quit)
inventory_action = QAction(QIcon(':inventory.png'),
'Inventory',
self)
inventory_action.setShortcut('Ctrl+I')
inventory_action.setStatusTip('Show inventory')
inventory_action.triggered.connect(self.__show_menu)
character_action = QAction(QIcon(':character.png'),
'Character',
self)
character_action.setShortcut('Ctrl+C')
character_action.setStatusTip('Show character')
self.map_window = PlayMapWindow(parent=None,
model=self.application.world,
surface_manager=self.surface_manager,
action_factory=self.application.action_factory,
rng=self.application.rng,
rules_engine=self.application.rules_engine,
configuration=self.application.config)
self.setCentralWidget(self.map_window)
self.map_window.MenuRequested.connect(self.__show_menu)
self.map_window.EndScreenRequested.connect(self.__show_end_screen)
self.setGeometry(50, 50, 800, 600)
self.setWindowTitle('Herculeum')
self.setWindowIcon(QIcon(':rune-stone.png'))
self.showMaximized()
def show_new_game(self):
"""
Show new game dialog
"""
app = self.application
start_dialog = StartGameWidget(generator=app.player_generator,
config=self.application.config.controls,
parent=self,
application=self.application,
surface_manager=self.surface_manager,
flags=Qt.Dialog | Qt.CustomizeWindowHint)
result = start_dialog.exec_()
if result == QDialog.Accepted:
player = start_dialog.player_character
intro_text = self.controller.setup_world(self.application.world,
player)
player.register_for_updates(self.map_window.hit_points_widget)
self.map_window.hit_points_widget.show_hit_points(player)
self.map_window.hit_points_widget.show_spirit_points(player)
self.map_window.message_widget.text_edit.setText(intro_text)
self.__show_map_window()
def __show_map_window(self):
"""
Show map window
"""
self.map_window.construct_scene()
def __show_message_window(self, character):
"""
Show message display
:param character: character which events to display
:type character: Character
"""
messages_display = EventMessageDockWidget(self, character)
self.addDockWidget(Qt.BottomDockWidgetArea,
messages_display)
def __show_menu(self):
"""
Show menu
"""
menu_dialog = MenuDialog(self.surface_manager,
self.application.world.player,
self.application.action_factory,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint)
menu_dialog.exec_()
def __show_end_screen(self):
"""
Show end screen
.. versionadded:: 0.8
"""
end_screen = EndScreen(self.application.world,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint,
controller=EndScreenController())
end_screen.exec_()
self.qt_app.quit()
|
kingvuplus/ts-gui-3
|
lib/python/Components/language_cache.py
|
Python
|
gpl-2.0
| 50,254
| 0.002846
|
LANG_TEXT = {'en_EN': {'tr_TR': 'Turkish',
'fr_FR': 'French',
'fi_FI': 'Finnish',
'pt_PT': 'Portuguese',
'fy_x-FY': 'Frisian',
'it_IT': 'Italian',
'et_EE': 'Estonian',
'no_NO': 'Norwegian',
'nl_NL': 'Dutch',
'lv_LV': 'Latvian',
'el_GR': 'Greek',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hungarian',
'lt_LT': 'Lithuanian',
'sl_SI': 'Slovenian',
'hr_HR': 'Croatian',
'en_EN': 'English',
'es_ES': 'Spanish',
'ca_AD': 'Catalan',
'ru_RU': 'Russian',
'is_IS': 'Icelandic',
'da_DK': 'Danish',
'ar_AE': 'Arabic',
'sk_SK': 'Slovakian',
'de_DE': 'German',
'sr_YU': 'Serbian',
'cs_CZ': 'Czech',
'pl_PL': 'Polish',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': 'Swedish',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': 'Language selection'},
'de_DE': {'tr_TR': 'T\xc3\xbcrkisch',
'fr_FR': 'Franz\xc3\xb6sisch',
'fi_FI': 'Finnisch',
'pt_PT': 'portugiesisch',
'fy_x-FY': 'Friesisch',
'it_IT': 'Italienisch',
'et_EE': 'Estnisch',
'no_NO': 'Norwegisch',
'nl_NL': 'Holl\xc3\xa4ndisch',
'lv_LV': 'Lettisch',
'el_GR': 'Griechisch',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungarisch',
'lt_LT': 'litauisch',
'sl_SI': 'Slowenisch',
'hr_HR': 'Kroatisch',
'en_EN': 'Englisch',
'es_ES': 'Spanisch',
'ca_AD': 'Katalanisch',
'ru_RU': 'Russisch',
'is_IS': 'Isl\xc3\xa4ndisch',
'da_DK': 'D\xc3\xa4nisch',
'ar_AE': 'Arabisch',
'sk_SK': 'Slowakisch',
'de_DE': 'Deutsch',
'sr_YU': 'Serbisch',
'cs_CZ': 'Tschechisch',
'pl_PL': 'Polnisch',
'uk_UA': 'Ukrainisch',
'fa_IR': 'Persisch',
'sv_SE': 'Schwedisch',
'he_IL': 'Hebr\xc3\xa4isch',
'T1': 'Bitte benutzen Sie die Hoch/Runter-Tasten, um Ihre Sprache auszuw\xc3\xa4hlen. Danach dr\xc3\xbccken Sie bitte OK.',
'T2': 'Sprachauswahl'},
'ar_AE': {'tr_TR': '\xd8\xaa\xd8\xb1\xd9\x83\xd9\x89',
'fr_FR': '\xd9\x81\xd8\xb1\xd9\x86\xd8\xb3\xd9\x89',
'fi_FI': '\xd8\xa5\xd9\x86\xd8\xaa\xd9\x87\xd8\xa7\xd8\xa1',
'pt_PT': '\xd8\xa8\xd8\xb1\xd8\xaa\xd8\xba\xd8\xa7\xd9\x84\xd9\x89',
'fy_x-FY': 'Frisian',
'it_IT': '\xd8\xa5\xd9\x8a\xd8\xb7\xd8\xa7\xd9\x84\xd9\x89',
'et_EE': '\xd8\xa3\xd8\xb3\xd8\xaa\xd9\x88\xd9\x86\xd9\x89',
'no_NO': '\xd9\x86\xd8\xb1\xd9\x88\xd9\x8a\xd8\xac\xd9\x89',
'nl_NL': '\xd9\x87\xd9\x88\xd9\x84\xd9\x86\xd8\xaf\xd9\x89',
'lv_LV': 'Latvian',
'el_GR': '\xd8\xa7\xd9\x84\xd9\x8a\xd9\x88\xd9\x86\xd8\xa7\xd9\x86',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd9\x85\xd8\xac\xd8\xb1\xd9\x89',
'lt_LT': '\xd9\x84\xd9\x8a\xd8\xaa\xd9\x88\xd8\xa7\xd9\x86\xd9\x89',
'sl_SI': '\xd8\xb3\xd9\x84\xd9\x88\xd9\x81\xd8\xa7\xd9\x86\xd9\x89',
'hr_HR': '\xd9\x83\xd8\xb1\xd9\x88\xd8\xa7\xd8\xaa\xd9\x89',
'en_EN': '\xd8\xa5\xd9\x86\xd8\xac\xd9\x84\xd9\x8a\xd8\xb2\xd9\x89',
'es_ES': '\xd8\xa3\xd8\xb3\xd8\xa8\xd8\xa7\xd9\x86\xd9\x89',
'ca_AD': '\xd9\x83\xd8\xa7\xd8\xaa\xd8\xa7\xd9\x84\xd8\xa7\xd9\x86\xd9\x8a',
'ru_RU': '\xd8\xb1\xd9\x88\xd8\xb3\xd9\x89',
'is_IS': '\xd8\xa7\xd9\x8a\xd8\xb3\xd9\x84\xd9\x86\xd8\xaf\xd9\x89',
'da_DK': '\xd8\xaf\xd9\x86\xd9\x85\xd8\xa7\xd8\xb1\xd9\x83\xd9\x89',
'ar_AE': '\xd8\xb9\xd9\x80\xd8\xb1\xd8\xa8\xd9\x89',
'sk_SK': '\xd8\xb3\xd9\x84\xd9\x88\xd9\x81\xd8\xa7\xd9\x83\xd9\x89',
'de_DE': '\xd8\xa7\xd9\x84\xd9\x85\xd8\xa7\xd9\x86\xd9\x80\xd9\x89',
'sr_YU': '\xd8\xb5\xd8\xb1\xd9\x8a\xd9\x89',
'cs_CZ': '\xd8\xa7\xd9\x84\xd8\xaa\xd8\xb4\xd9\x8a\xd9\x83',
'pl_PL': '\xd8\xa8\xd9\x88\xd9\x84\xd9\x86\xd8\xaf\xd9\x89',
'uk_UA': '\xd8\xa3\xd9\x88\xd9\x83\xd8\xb1\xd8\xa7\xd9\x86\xd9\x89',
|
'fa_IR': 'Persian',
|
'sv_SE': '\xd8\xb3\xd9\x88\xd9\x8a\xd8\xaf\xd9\x89',
'he_IL': 'Hebrew',
'T1': '\xd9\x85\xd9\x86 \xd9\x81\xd8\xb6\xd9\x84\xd9\x83 \xd8\xa3\xd8\xb3\xd8\xaa\xd8\xae\xd8\xaf\xd9\x85 \xd8\xb0\xd8\xb1 \xd8\xa7\xd9\x84\xd8\xb3\xd9\x87\xd9\x85 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x84\xd9\x88\xd9\x89 \xd8\xa3\xd9\x88 \xd8\xa7\xd9\x84\xd8\xb3\xd9\x81\xd9\x84\xd9\x89 \xd9\x84\xd8\xa5\xd8\xae\xd8\xaa\xd9\x8a\xd8\xa7\xd8\xb1 \xd8\xa7\xd9\x84\xd9\x84\xd8\xba\xd9\x87. \xd8\xab\xd9\x85 \xd8\xa3\xd8\xb6\xd8\xba\xd8\xb7 \xd9\x85\xd9\x88\xd8\xa7\xd9\x81\xd9\x82 .',
'T2': '\xd8\xa5\xd8\xae\xd8\xaa\xd9\x8a\xd8\xa7\xd8\xb1 \xd8\xa7\xd9\x84\xd9\x84\xd8\xba\xd9\x80\xd9\x87'},
'pt_BR_BR': {'tr_TR': 'Turco',
'fr_FR': 'Franc\xc3\xaas',
'fi_FI': 'Finland\xc3\xaas',
'pt_PT': 'Portugu\xc3\xaas brasileiro',
'fy_x-FY': 'Fr\xc3\xadsio',
'it_IT': 'Italiano',
'et_EE': 'Estoniano',
'no_NO': 'Noruegu\xc3\xaas',
'nl_NL': 'Holand\xc3\xaas',
'lv_LV': 'Let\xc3\xa3o',
'el_GR': 'Grego',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'H\xc3\xbangaro',
'lt_LT': 'Lituano',
'sl_SI': 'Esloveno',
'hr_HR': 'Croata',
'en_EN': 'Ingl\xc3\xaas',
'es_ES': 'Catal\xc3\xa3o',
'ca_AD': 'Catal\xc3\xa3o',
'ru_RU': 'Russo',
'is_IS': 'Island\xc3\xaas',
'da_DK': 'Dinamarqu\xc3\xaas',
'ar_AE': '\xc3\x81rabe',
'sk_SK': 'Eslovaco',
'de_DE': 'Alem\xc3\xa3o',
'sr_YU': 'S\xc3\xa9rvia',
'cs_CZ': 'Checo',
'pl_PL': 'Polaco',
'uk_UA': 'Ucraniano',
'fa_IR': 'Persa',
'sv_SE': 'Sueco',
'he_IL': 'Hebr\xc3\xa1ico',
'T1': 'Use a tecla de cima ou de baixo para selecionar seu idioma. Depois pressione OK.',
'T2': 'Sele\xc3\xa7\xc3\xa3o do idioma'},
'ca_AD': {'tr_TR': 'Turc',
'fr_FR': 'Franc\xc3\xa8s',
'fi_FI': 'Finland\xc3\xa8s',
'pt_PT': 'Portugu\xc3\xa8s',
'fy_x-FY': 'Frisian',
'it_IT': 'Itali\xc3\xa0',
'et_EE': 'Estonian',
'no_NO': 'Noruec',
'nl_NL': 'Holand\xc3\xa8s',
'lv_LV': 'Latvian',
'el_GR': 'Grec',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Hongar\xc3\xa8s',
'lt_LT': 'Litu\xc3\xa0',
'sl_SI': 'Slovenian',
'hr_HR': 'Croat',
'en_EN': 'Angl\xc3\xa8s',
'es_ES': 'Espanyol',
'ca_AD': 'Catal\xc3\xa0',
'ru_RU': 'Rus',
'is_IS': 'Island\xc3\xa8s',
'da_DK': 'Dan\xc3\xa8s',
'ar_AE': '\xc3\x80rab',
'sk_SK': 'Slovakian',
'de_DE': 'Alemany',
'sr_YU': 'Serbian',
'cs_CZ': 'Txec',
'pl_PL': 'Polish',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persian',
'sv_SE': 'Suec',
'he_IL': 'Hebrew',
'T1': 'Please use the UP and DOWN keys to select your language. Afterwards press the OK button.',
'T2': "Selecci\xc3\xb3 d'idioma"},
'hr_HR': {'tr_TR': 'Turski',
'fr_FR': 'Francuski',
'fi_FI': 'Finski',
'pt_PT': 'Portugalski',
'fy_x-FY': 'Frisian',
'it_IT': 'Talijanski',
'et_EE': 'Estonian',
'no_NO': 'Norve\xc5\xa1ki',
'nl_NL': 'Nizozemski',
'lv_LV': 'Latvian',
'el_GR': 'Gr\xc
|
glucoseinc/naumanni-server
|
naumanni/web/__init__.py
|
Python
|
agpl-3.0
| 422
| 0
|
# -*- codi
|
ng: utf-8 -*-
"""WebUI."""
from .websocket import WebsocketProxyHandler
def create_webapp(naumanni, **kwargs):
"""App factory.
:param CircleCore core: CircleCore Core
:param str base_url: ベースURL
:param int ws_port: Websocket Port Number
:return: WebUI App
:rtype: CCWebApp
"""
from .app import NaumanniWebApp
app = NaumanniWebApp(nau
|
manni, **kwargs)
return app
|
TallJimbo/mcpib
|
tests/builtin_strings_test.py
|
Python
|
bsd-2-clause
| 2,081
| 0.004325
|
#
# Copyright (c) 2014, Jim Bosch
# All rights reserved.
#
# mcpib is distributed under a simple BSD-like license;
# see the LICENSE file that should be present in the root
# of the source distribution.
#
import unittest
import os
import sys
buildPythonPath = os.path.join(os.path.split(__file__)[0], "..", "python")
if os.path.exists(buildPythonPath): sys.path.insert(0, buildPythonPath)
import mcpib
import builtin_strings_mod as mod
class BuiltinStringsTestCase(unittest.TestCase):
def testString(self):
"""Test that to-Python and from-Python converters for std::string work as expected."""
self.assertEqual(mod.passthru_string("foo"), "foo")
self.assertRaises(mcpib.FromPythonError, mod.passthru_string, 5)
self.assertRaises(mcpib.FromPythonError, mod.passthru_string, ["bar"])
def testCString(self):
"""Test that to-Python and from-Python converters for char const * work as expected."""
self.assertEqual(mod.passthru_cstring("foo"), "foo")
self.assertRaises(mcpib.FromPythonError, mod.passthru_cstring, 5)
self.assertRaises(mcpib.FromPythonError, mod.passthru_cstring, ["bar"])
def testCharArgs(self):
"""Test that c-string converters are not used for char values, references, or non-const pointers."""
self.assertRaises(mcpib.FromPythonError, mod.accept_char, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_const, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_ptr, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_ref, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_const_ref, "foo")
self.assertRaises(mcpib.ToPythonError, mod.return_char)
self.assertRaises(mcpib.ToPythonError, mod.return_char_const)
self.assertRaises(mcpib.ToPythonError, mod.return_char_ptr)
self.assertRaises(mcpib.ToPythonErr
|
or, mod.return_char_ref)
self.assertRaises(mcpib.ToPythonErro
|
r, mod.return_char_const_ref)
if __name__ == "__main__":
unittest.main()
|
flacjacket/sympy
|
examples/advanced/fem.py
|
Python
|
bsd-3-clause
| 5,414
| 0.035648
|
#!/usr/bin/env python
"""FEM library
Demonstrates some simple finite element definitions, and computes a mass
matrix
$ python fem.py
[ 1/60, 0, -1/360, 0, -1/90, -1/360]
[ 0, 4/45, 0, 2/45, 2/45, -1/90]
[-1/360, 0, 1/60, -1/90, 0, -1/360]
[ 0, 2/45, -1/90, 4/45, 2/45, 0]
[ -1/90, 2/45, 0, 2/45, 4/45, 0]
[-1/360, -1/90, -1/360, 0, 0, 1/60]
"""
from sympy import symbols, Symbol, factorial, Rational, zeros, div, eye, \
integrate, diff, pprint, reduced
x, y, z = symbols('x,y,z')
class ReferenceSimplex:
def __init__(self, nsd):
self.nsd = nsd
coords = []
if nsd <= 3:
coords = symbols('x,y,z')[:nsd]
else:
coords = []
for d in range(0,nsd):
coords.append(Symbol("x_%d" % d))
self.coords = coords
def integrate(self,f):
coords = self.coords
nsd = self.nsd
limit = 1
for p in coords:
limit -= p
intf = f
for d in range(0,nsd):
p = coords[d]
limit += p
intf = integrate(intf, (p, 0, limit))
return intf
def bernstein_space(order, nsd):
if nsd > 3:
raise RuntimeError("Bernstein only implemented in 1D, 2D, and 3D")
sum = 0
basis = []
coeff = []
if nsd == 1:
b1, b2 = x, 1-x
for o1 in range(0,order+1):
for o2 in range(0,order+1):
if o1 + o2 == order:
aij = Symbol("a_%d_%d" % (o1,o2))
sum += aij*binomial(order,o1)*pow(b1, o1)*pow(b2, o2)
basis.append(binomial(order,o1)*pow(b1, o1)*pow(b2, o2))
coeff.append(aij)
if nsd == 2:
b1, b2, b3 = x, y, 1-x-y
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
if o1 + o2 + o3 == order:
aij = Symbol("a_%d_%d_%d" % (o1,o2,o3))
fac = factorial(order) / (factorial(o1)*factorial(o2)*factorial(o3))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3))
coeff.append(aij)
if nsd == 3:
b1, b2, b3, b4 = x, y, z, 1-x-y-z
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
for o4 in range(0,order+1):
if o1 + o2 + o3 + o4 == order:
aij = Symbol("a_%d_%d_%d_%d" % (o1,o2,o3,o4))
fac = factorial(order)/ (factorial(o1)*factorial(o2)*factorial(o3)*factorial(o4))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4))
coeff.append(aij)
return sum, coeff, basis
def create_point_set(order, nsd):
h = Rational(1,order)
set = []
if nsd
|
== 1:
for i in range(0, order+1):
x = i*h
if x <= 1:
set.append((x,y))
if nsd == 2:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
if x + y <= 1:
set.append((x,y))
if nsd == 3:
for i in range(0, order+1):
x = i*h
for j in r
|
ange(0, order+1):
y = j*h
for k in range(0, order+1):
z = j*h
if x + y + z <= 1:
set.append((x,y,z))
return set
def create_matrix(equations, coeffs):
A = zeros(len(equations))
i = 0; j = 0
for j in range(0, len(coeffs)):
c = coeffs[j]
for i in range(0, len(equations)):
e = equations[i]
d, _ = reduced(e, [c])
A[i,j] = d[0]
return A
class Lagrange:
def __init__(self,nsd, order):
self.nsd = nsd
self.order = order
self.compute_basis()
def nbf(self):
return len(self.N)
def compute_basis(self):
order = self.order
nsd = self.nsd
N = []
pol, coeffs, basis = bernstein_space(order, nsd)
points = create_point_set(order, nsd)
equations = []
for p in points:
ex = pol.subs(x, p[0])
if nsd > 1:
ex = ex.subs(y, p[1])
if nsd > 2:
ex = ex.subs(z, p[2])
equations.append(ex )
A = create_matrix(equations, coeffs)
Ainv = A.inv()
b = eye(len(equations))
xx = Ainv*b
for i in range(0,len(equations)):
Ni = pol
for j in range(0,len(coeffs)):
Ni = Ni.subs(coeffs[j], xx[j,i])
N.append(Ni)
self.N = N
def main():
t = ReferenceSimplex(2)
fe = Lagrange(2,2)
u = 0
#compute u = sum_i u_i N_i
us = []
for i in range(0, fe.nbf()):
ui = Symbol("u_%d" % i)
us.append(ui)
u += ui*fe.N[i]
J = zeros(fe.nbf())
for i in range(0, fe.nbf()):
Fi = u*fe.N[i]
print Fi
for j in range(0, fe.nbf()):
uj = us[j]
integrands = diff(Fi, uj)
print integrands
J[j,i] = t.integrate(integrands)
pprint(J)
if __name__ == "__main__":
main()
|
yoonkiss/fMBT
|
utils/fmbtx11.py
|
Python
|
lgpl-2.1
| 26,132
| 0.002334
|
# fMBT, free Model Based Testing tool
# Copyright (c) 2013-2016, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
"""
This library implements fmbt GUITestInterface for X.
Using Screen.refreshView() requires the pyatspi library and enabling
accessilibity. For example:
gsettings set org.gnome.desktop.interface toolkit-accessibility true
"""
import fmbt_config
import fmbtgti
fmbtgti._OCRPREPROCESS = [
"",
"-sharpen 5 -level 90%%,100%%,3.0 -sharpen 5"
]
import ctypes
import os
import subprocess
import zlib
import fmbtx11_conn
def _run(command):
exit_status = subprocess.call(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
close_fds=(os.name != "nt"))
return exit_status
sortItems = fmbtgti.sortItems
class ViewItem(fmbtgti.GUIItem):
def __init__(self, view, itemId, parentId, className, text, bbox,
dumpFilename, rawProperties=None):
self._view = view
self._itemId = itemId
self._parentId = parentId
self._className = className
self._text = text
if rawProperties:
self._properties = rawProperties
else:
self._properties = {}
fmbtgti.GUIItem.__init__(self, self._className, bbox, dumpFilename)
def branch(self):
"""Returns list of view items from the root down to this item"""
rv = []
itemId = self._itemId
while itemId:
rv.append(self._view._viewItems[itemId])
if itemId in self._view._viewItems:
itemId = self._view._viewItems[itemId]._parentId
else:
itemId = None
rv.reverse()
return rv
def children(self):
items = self._view._viewItems
return [items[itemId]
for itemId in items
if items[itemId]._parentId == self._itemId]
def parent(self):
return self._parentId
def parentItem(self):
try:
return self._view._viewItems[self._parentId]
except KeyError:
return None
def id(self):
return self._itemId
def properties(self):
return self._properties
def text(self):
return self._text
def dumpProperties(self):
rv = []
if self._properties:
for key in sorted(self._properties.keys()):
rv.append("%s=%s" % (key, self._properties[key]))
return "\n".join(rv)
def __str__(self):
return "ViewItem(%s)" % (self._view._dumpItem(self),)
class View(object):
def __init__(self, dumpFilename, itemTree, itemOnScreen=None):
self._dumpFilename = dumpFilename
self._itemTree = itemTree
self._rootItem = None
self._viewItems = {}
if itemOnScreen == None:
self._itemOnScreen = lambda item: True
else:
self._itemOnScreen = itemOnScreen
self._viewSource = "atspi"
for item in itemTree:
className = item.get("class", "")
text = item.get("text", "")
if text == "" or text == None:
text = item.get("name", "")
if text == "":
text = className
vi = ViewItem(
self, item["id"], item["parent"],
className,
text,
item["bbox"],
dumpFilename,
item)
self._viewItems[item["id"]] = vi
if vi.parent() == None:
self._rootItem = vi
if not self._rootItem:
raise ValueError("no root item in view data")
def _intCoords(self, *args):
# TODO: relative coordinates like (0.5, 0.9)
return [int(c) for c in args[0]]
def filename(self):
return self._dumpFilename
def rootItem(self):
return self._rootItem
def _dumpItem(self, viewItem):
return "id=%s cls=%s text=%s bbox=%s" % (
viewItem._itemId, repr(viewItem._className), repr(viewItem._text),
viewItem._bbox)
def _dumpTree(self, rootItem, depth=0):
l = ["%s%s" % (" " * (depth * 4), self._dumpItem(rootItem))]
for child in rootItem.children():
l.extend(self._dumpTree(child, depth+1))
return l
def dumpTree(self, rootItem=None):
"""
Returns item tree as a string
"""
if rootItem == None:
rootItem = self.rootItem()
return "\n".join(self._dumpTree(rootItem))
def __str__(self):
return "View(%s, %s items)" % (repr(self._dumpFilename), len(self._viewItems))
def findItems(self, comparator, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
foundItems = []
if count == 0: return foundItems
if searchRootItem != None:
if comparator(searchRootItem) and (
not onScreen or (self._itemOnScreen(searchRootItem))):
foundItems.append(searchRootItem)
for c in searchRootItem.children():
foundItems.extend(self.findItems(comparator, count=count-len(foundItems), searchRootItem=c, onScreen=onScreen))
else:
if searchItems:
domain = iter(searchItems)
else:
domain = self._viewItems.itervalues
for i in domain():
if comparator(i) and (not onScreen or (self._itemOnScreen(i))):
foundItems.append(i)
if count > 0 and len(foundItems) >= count:
break
return foundItems
def findItemsByText(self, text, partial=False, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
if partial:
c = lambda item: (text in item._text or text in item.properties()["name"])
else:
c = lambda item: (text == item._text)
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsByClass(self, className, partial=False, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
if partial:
c = lambda item: (className in item._className)
else:
c = lambda item: (className == item._className)
return se
|
lf.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsById(self, itemId, count=-1, searchRootItem=None, searchItems=None, onScreen=Fals
|
e):
c = lambda item: (itemId == item._itemId or itemId == item.properties().get("AutomationId", None))
return self.findItems(c, count=count, searchRootItem=searchRootItem, searchItems=searchItems, onScreen=onScreen)
def findItemsByProperties(self, properties, count=-1, searchRootItem=None, searchItems=None, onScreen=False):
"""
Returns ViewItems where every property matches given properties
Parameters:
properties (dictionary):
names and required values of properties
Example:
view.findItemsByProperties({"Value": "HELLO", "Name": "File name:"})
See also:
viewitem.dumpProperties()
"""
c = lambda item: 0 == len([key for key in properties
if properties[key] != item.properties().get(key, No
|
Shatki/PyIMU
|
gost4401_81.py
|
Python
|
gpl-3.0
| 4,655
| 0.002365
|
# -*- coding: utf-8 -*-
"""
* Partial implementation of standard atmospheric model as described in
* GOST 4401-81 useful for processing of data from meteorological balloon
* sensors.
*
* Supported modelling of temperature and pressure over the altitude span from
* 0 up to 51km.
*
* algorithm by Oleg Kochetov <ok@noiselab.ru>
"""
from math import log10
class GOST4401(object):
G = 9.80665
R = 287.05287
E = 6356766
MIN_PRESSURE = 6.69384
MAX_PRESSURE = 101325.00
MIN_GP_ALT = 0.00
MAX_GP_ALT = 51000.00
# Lookup table with averaged empirical parameters for
# lower layers of atmosphere in accordance with ГОСТ 4401-81
LUT_RECORDS = 6
tab = {
'altitude' : 0, # Geopotentional altitude
'temperature' : 1, # degrees K
'temp gradient' : 2, # degrees K per meter
'pressure' : 3, # pascals
}
ag_table = [
[0, 288.15, -0.0065, 101325.00],
[11000, 216.65, 0.0, 22632.04],
[20000, 216.65, 0.0010, 5474.87],
[32000, 228.65, 0.0028, 868.0146],
[47000, 270.65, 0.0, 110.9056],
[51000, 270.65, -0.0028, 6.69384]
]
@staticmethod
def geopotential_to_geometric(self, altitude):
return altitude * self.E / (self.E - altitude)
@staticmethod
def geometric_to_geopotential(self, altitude):
return altitude * self.E / (self.E + altitude)
def get_altitude(self, pressure):
"""
Returns geometric altitude value for the given pressure.
:param pressure: float pressure - pressure in pascals
:return: float geometric altitude in meters
"""
# Pressure in Pascals
if (pressure <= self.MIN_PRESSURE) or (pressure > self.MAX_PRESSURE):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((pressure <= self.ag_table[idx][self.tab['pressure']]) and
(pressure > self.ag_table[idx + 1][self.tab['pressure']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
geopot_H = ((Tm * pow(Ps / pressure, Bm * self.R / self.G) - Tm) / Bm)
else:
geopot_H = log10(Ps / pressure) * (self.R * Tm) / self.G * 0.434292
return self.geopotential_to_geometric(self, Hb + geopot_H)
def get_pressure(self, altitude):
"""
Returns pressure in pascals for the given geometric altitude
:param altitude: float altitude - geometric altitude in meters
:return: float - pressure in pascals
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][sel
|
f.tab['altitude']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature'
|
]])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
lP = log10(Ps) - (self.G / (Bm * self.R)) * log10((Tm + Bm * (geopot_H - Hb)) / Tm)
else:
lP = log10(Ps) - 0.434294 * (self.G * (geopot_H - Hb)) / (self.R * Tm)
return pow(10, lP)
def get_temperature(self, altitude):
"""
Returns temperature value in K for the given geometric altitude.
:param altitude: float altitude - geometric altitude in meters
:return: float - temperature in degrees K
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
temp = Tm
if Bm != 0:
temp += Bm * (geopot_H - Hb)
return temp
|
qewerty/moto.old
|
tools/scons/engine/SCons/Tool/sunf95.py
|
Python
|
gpl-2.0
| 2,167
| 0.00323
|
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 4720 2010/03/24 03:14:11 jars"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
|
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Varia
|
bles:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
sistason/kinksorter2
|
src/kinksorter/settings.py
|
Python
|
gpl-3.0
| 4,818
| 0.001038
|
"""
Django settings for untitled project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^8wdj$q^6mp6g7z1s7nwip_ffhof4r6g)nl88dy0-u(r)(o=_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG_SFW = False
# Use to blank most frontend NSFW-stuff for developing in public spaces
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_q',
'kinksorter_app',
]
USE_ASYNC = False
Q_CLUSTER = {
'name': 'kinksorter-cluster',
'recycle': 10, # big tasks -> often recycle workers
'save_limit': 10, # try to minimize database_size
'catch_up': False, # try to minimize database_size
'orm': 'default',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kinksorter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kinksorter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'kinksorter.db'),
'OPTIONS': {'timeout': 20000},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthVa
|
lidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
|
USE_TZ = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
}
},
'filters': {
'ignore_get_current_task': {
'()': 'django.utils.log.CallbackFilter',
'callback': lambda r: not (len(r.args) > 2 and r.args[1] == '200' and r.args[0] == 'GET /get_current_task HTTP/1.1'),
}
},
'handlers': {
'console': {
'filters': ['ignore_get_current_task'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django.server': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'kinksorter', 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'kinksorter', 'static_collected')
# User content (to get the videos via static/, as it needs to be under the root
STATIC_LINKED_DIRECTORIES = os.path.join(STATIC_URL, 'directory_links')
DIRECTORY_LINKS = os.path.join(STATIC_ROOT, 'directory_links')
os.makedirs(DIRECTORY_LINKS, exist_ok=True)
|
TurtleRockStudios/renderdoc_public
|
util/test/tests/Vulkan/VK_Indirect.py
|
Python
|
mit
| 14,258
| 0.004699
|
import rdtest
import struct
import renderdoc as rd
class VK_Indirect(rdtest.TestCase):
demos_test_name = 'VK_Indirect'
def check_overlay(self, eventId: int, out: rd.ReplayOutput, tex: rd.TextureDisplay, save_data: rd.TextureSave):
pipe: rd.PipeState = self.controller.GetPipelineState()
# Check that the highlight draw overlay is empty
tex.resourceId = pipe.GetOutputTargets()[0].resourceId
out.SetTextureDisplay(tex)
overlay_path = rdtest.get_tmp_path(str(eventId) + '_draw.png')
ref_path = self.get_ref_path(str(eventId) + '_draw.png')
save_data.resourceId = out.GetDebugOverlayTexID()
self.controller.SaveTexture(save_data, overlay_path)
if not rdtest.png_compare(overlay_path, ref_path):
raise rdtest.TestFailureException("Reference and output image differ @ EID {}".format(str(eventId)),
ref_path, overlay_path)
def check_capture(self):
self.check_final_backbuffer()
for level in ["Primary", "Secondary"]:
rdtest.log.print("Checking {} indirect calls".format(level))
dispatches = self.find_draw("{}: Dispatches".format(level))
# Set up a ReplayOutput and TextureSave for quickly testing the drawcall highlight overlay
out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100),
rd.ReplayOutputType.Texture)
self.check(out is not None)
tex = rd.TextureDisplay()
tex.overlay = rd.DebugOverlay.Drawcall
save_data = rd.TextureSave()
save_data.destType = rd.FileType.PNG
# Rewind to the start of the capture
draw: rd.DrawcallDescription = dispatches.children[0]
while draw.previous is not None:
draw = draw.previous
# Ensure we can select all draws
while draw is not None:
self.controller.SetFrameEvent(draw.eventId, False)
draw = draw.next
rdtest.log.success("Selected all {} draws".format(level))
self.check(dispatches and len(dispatches.children) == 3)
self.check(dispatches.children[0].dispatchDimension == [0,0,0])
self.check(dispatches.children[1].dispatchDimension == [1,1,1])
self.check(dispatches.children[2].dispatchDimension == [3,4,5])
rdtest.log.success("{} Indirect dispatches are the correct dimensions".format(level))
self.controller.SetFrameEvent(dispatches.children[2].eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
ssbo: rd.BoundResource = pipe.GetReadWriteResources(rd.ShaderStage.Compute)[0].resources[0]
data: bytes = self.controller.GetBufferData(ssbo.resourceId, 0, 0)
rdtest.log.print("Got {} bytes of uints".format(len(data)))
uints = [struct.unpack_from('=4L', data, offs) for offs in range(0, len(data), 16)]
for x in range(0, 6): # 3 groups of 2 threads each
for y in range(0, 8): # 3 groups of 2 threads each
for z in range(0, 5): # 5 groups of 1 thread each
idx = 100 + z*8*6 + y*6 + x
if not rdtest.value_compare(uints[idx], [x, y, z, 12345]):
raise rdtest.TestFailureException(
'expected thread index data @ {},{},{}: {} is not as expected: {}'
.format(x, y, z, uints[idx], [x, y, z, 12345]))
rdtest.log.success("Dispatched buffer contents are as expected for {}".format(level))
empties = self.find_draw("{}: Empty draws".format(level))
self.check(empties and len(empties.children) == 2)
draw: rd.DrawcallDescription
for draw in empties.children:
self.check(draw.numIndices == 0)
self.check(draw.numInstances == 0)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have empty PostVS
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut, 0, 1)
self.check(len(postvs_data) == 0)
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} empty draws are empty".format(level))
indirects = self.find_draw("{}: Indirect draws".format(level))
self.check('vkCmdDrawIndirect' in indirects.children[0].name)
self.check('vkCmdDrawIndexedIndirect' in indirects.children[1].name)
self.check(len(indirects.children[1].children) == 2)
rdtest.log.success("Correct number of {} indirect draws".format(level))
# vkCmdDrawIndirect(...)
draw = indirects.children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postv
|
s(rd.MeshDataStage.VSOut)
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.8, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.7, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.6, -0.5, 0.0, 1.0]},
}
self.che
|
ck_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirect[0](...)
draw = indirects.children[1].children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 3)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.6, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.5, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.4, -0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirect[1](...)
draw = indirects.children[1].children[1]
self.check(draw.numIndices == 6)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.4, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.3, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.2, -0.8, 0.0, 1.0]},
3: {'vtx': 3, 'idx': 3, 'gl_PerVertex.gl_Position': [-0.1, -0.5, 0.0, 1.0]},
4: {'vtx': 4, 'idx': 4, 'gl_PerVertex.gl_Position': [ 0.0, -0.8, 0.0, 1.0]},
5: {'vtx': 5, 'idx': 5, 'gl_PerVertex.gl_Position': [ 0.1, -0.8, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(dra
|
shimpe/frescobaldi
|
frescobaldi_app/debuginfo.py
|
Python
|
gpl-2.0
| 2,835
| 0.003175
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Provides version information of important supporting modules.
"""
from __future__ import unicode_literals
import functools
import appinfo
def _catch_unknown(f):
"""Decorate a function, returning "unknown" on import/attribute error."""
@functools.wraps(f)
def wrapper():
try:
return f()
except (ImportError, AttributeError):
return "unknown"
return wrapper
@_catch_unknown
def app_version():
import appinfo
return appinfo.version
@_catch_unknown
def sip_version():
import sip
return sip.SIP_VERSION_STR
@_catch_unknown
def pyqt_version():
import PyQt4.QtCore
return PyQt4.QtCore.PYQT_VERSION_STR
@_catch_unknown
def qt_version():
import PyQt4.QtCore
return PyQt4.QtCore.QT_VERSION_STR
@_catch_u
|
nknown
def python_version():
import platform
return platform.python_version()
@_catch_unknown
def operating_system():
import platform
return platform.platform()
@_catch_unknown
def ly_version():
import ly.pkginfo
return ly.pkginfo.version
@_catch_unknown
def poppler_version():
import popplerqt4
return '.'.join(format(n) for n in popplerqt4.poppler_version())
@_catch_unknown
def python_poppler_version():
import popplerqt4
return '.'.join(format(n) fo
|
r n in popplerqt4.version())
def version_info_named():
"""Yield all the relevant names and their version string."""
yield appinfo.appname, appinfo.version
yield "Python", python_version()
yield "python-ly", ly_version()
yield "Qt", qt_version()
yield "PyQt", pyqt_version()
yield "sip", sip_version()
yield "poppler", poppler_version()
yield "python-poppler-qt", python_poppler_version()
yield "OS", operating_system()
def version_info_string(separator='\n'):
"""Return all version names as a string, joint with separator."""
return separator.join(map("{0[0]}: {0[1]}".format, version_info_named()))
|
CurryBoy/ProtoML-Deprecated
|
protoml/extras/tsne/_bhtsne/__init__.py
|
Python
|
bsd-3-clause
| 62
| 0
|
f
|
rom .bhtsne import bh_tsne
__all__ = ["_bhtsne", "
|
bh_tsne"]
|
unclejed613/gnuradio-projects-rtlsdr
|
scanner/frqedit.py
|
Python
|
gpl-2.0
| 2,267
| 0.013674
|
##!/usr/bin/env python
from array import *
import os
import struct
stats = os.stat('freqtest.dat')
file_size = stats.st_size
#print('file size ', +file_size, ' bytes')
entries = file_size/4
#print('file has ', +entries, +' entries')
freq_array = array('f', []) #create an array to hold the entries
for a in range(0, file_size, 4): #read the entries sequentially from the file
with open('freqtest.dat', 'rb') as f:
f.seek(a)
bytes = f.read(4)
freq = struct.unpack('<f', bytes)
b = (a/4) +1
# frq(b) = str(freq[0])
print('Frequency: ' + str((a/4)+1) + ' ' + str(freq[0])) #print the entries as they are read
freq_array.append(freq[0]) #and add them to the array
f.close()
x = raw_input('continue? (y to modify freqs in the list, n to go to adding freqs)')
while x != "n":
# print(x)
fm = int(input('freq to modify: ')) #we want to modify a particular frequency
current_freq = freq_array[fm-1]
print('current freq is: ', + current_freq) #we want to replace it with a new value
new_freq = input('new f
|
requency: ')
freq_array[fm-1] = new_freq
for indx in range(len(freq_array)): #print the modified
|
list
print(indx+1, +freq_array[indx])
x = raw_input("do you want to change another frequency? ")
x = raw_input('continue? (y to add freqs to the list, n to save the list and exit)') #second part... we may want to add new frequencies to the list
while x != "n": #similar to the modify loop
new_freq = input('new frequency: ')
freq_array.append(new_freq) #except we append the frequency at the end
for indx in range(len(freq_array)): #and as before print the modified list
print(indx+1, +freq_array[indx])
x = raw_input("do you want to add another frequency? ")
print freq_array #this is here as a troubleshooting tool
f = open('freqtest.dat', 'wb') #everything done? dump the array to the file (overwrites
f.write(freq_array) #the old one)
f.close()
|
maurozucchelli/dipy
|
doc/examples/probabilistic_tracking_odfs.py
|
Python
|
bsd-3-clause
| 2,094
| 0.001433
|
"""
====================================
Probabilistic Tracking on ODF fields
====================================
In this example we perform probabilistic fiber tracking on fields of ODF peaks.
This example requires importing example `reconst_csa.py`.
"""
import numpy as np
from reconst_csa import *
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.tracking.markov import (BoundaryStepper,
FixedSizeStepper,
ProbabilisticOdfWeightedTracker)
from dipy.tracking.utils import seeds_from_mask
stepper = FixedSizeStepper(1)
"""
Read the voxel size from the image header:
"""
zooms = img.get_header().get_zooms()[:3]
"""
Randomly select some seed points from the mask:
"""
seeds = seeds_from_mask(mask, [1, 1, 1], zooms)
seeds = seeds[:2000]
interpolator = NearestNeighborInterpolator(data, zooms)
pwt = ProbabilisticOdfWeightedTracker(csamodel, interpolator, mask,
stepper, 20, seeds, sphere)
csa_streamlines = list(pwt)
"""
Now that we have our streamlines in memory we can save the results to disk.
For this purpose we can use the TrackVis format (``*.trk``). First, we need to
create a header.
"""
import nibabel as nib
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = (2., 2., 2.)
hdr['voxel_order'] = 'LAS'
hdr['dim'] = csapeaks.gfa.shape[:3]
"""
Save the streamlines.
"""
csa_streamlines_trk = ((sl, None, None) for sl in csa_streamlines)
csa_sl_fname = 'csa_prob_streamline.trk'
nib.trackvis.write(csa_sl_fname, csa_streamlines_trk, hdr)
"""
Visualize the streamlines with fvtk (python vtk is required).
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
r = fvtk.ren()
fvtk.add(r, fvtk.line(csa_streamlines, line_colors(csa_streamlines)))
print('Saving illustration as csa_prob_tracks.png')
fvtk.record
|
(r, n_frames=1, out_path='csa_prob_t
|
racks.png', size=(600, 600))
"""
.. figure:: csa_prob_tracks.png
:align: center
**Probabilistic streamlines applied on an ODF field modulated by GFA**.
"""
|
firebase/firebase-android-sdk
|
ci/fireci/fireci/commands.py
|
Python
|
apache-2.0
| 946
| 0.002114
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissi
|
ons and
# limitations under the License.
import click
from . import gradle
from . import ci_command
@click.argument('task', required=True, nargs=-1)
@click.option(
'--gradle-opts',
default='',
help='GRADLE_OPTS passed to the gradle invocation.')
@ci_command('gradle'
|
)
def gradle_command(task, gradle_opts):
"""Runs the specified gradle commands."""
gradle.run(*task, gradle_opts=gradle_opts)
|
rchurch4/georgetown-data-science-fall-2015
|
analysis/graph/graph_creation.py
|
Python
|
mit
| 1,784
| 0.008408
|
# Creates graph of restaurant reviews for yelp or trip advisor.
# writes graph to gml file for use in gephi
#
# Rob Churchill
#
# N
|
OTE: I learned to do this in my data science class last semester. If you are looking for plagiarism things, you will almost certainly find similar clustering code.
# I did not copy it, I learned this specific way of doing it, and referred to my previous assignments when doing it for this project. If you would like to see my previous
# assignments, I will provide you them on request. Otherwise, I don't think that it's worth adding a lot of ext
|
ra files for the sole sake of showing that I haven't plagiarized.
import networkx as nx
import numpy as np
import scipy as sp
import csv
folder = 'data/'
file_names = ['yelp_data.csv', 'trip_advisor_data.csv']
# EDIT this line to change which website you make the graph for. True=yelp, False=TripAdvisor
yelp = False
yelp_dataset = list()
file_name = file_names[1]
if yelp == True:
file_name = file_names[0]
# reads in appropriate file given yelp boolean variable
with open(folder+file_name, 'r') as f:
reader = csv.reader(f)
for line in reader:
yelp_dataset.append(line)
# removes headers
yelp_dataset.remove(yelp_dataset[0])
print len(yelp_dataset)
# create the graph
G = nx.Graph()
for y in yelp_dataset:
# add the nodes if they don't already exist
G.add_node(y[4], type='restaurant')
G.add_node(y[13], type='reviewer')
# add the edge between the reviewer and restaurant, weight is in different position in each file.
if yelp == True:
G.add_edge(y[13], y[4], weight=float(y[2]))
else:
G.add_edge(y[13], y[4], weight=float(y[1]))
print nx.number_of_nodes(G)
print nx.number_of_edges(G)
# write graph to gml file.
nx.write_gml(G, 'ta_graph.gml')
|
dupuy/ulm
|
ulm/urls.py
|
Python
|
bsd-3-clause
| 911
| 0
|
# -*- coding: utf-8 -*-
"""Django URLconf file for ulm"""
from __future__ import unicode_literals
from django.conf import settings
try:
# pylint: disable=E0611
from django.conf.urls import patterns, include, url
except (ImportError): # Django 1.3 compatibility
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from ulm.views import laptop, batteries, wifi
# Un
|
comment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlp
|
atterns = \
patterns('',
url(r'^$', laptop),
url(r'^batter(?:y|ies)/$', batteries),
url(r'^(?:wifi|wlan)/$', wifi),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
sebbASF/infrastructure-puppet
|
modules/mail_archives/files/scripts/site-sitemap.py
|
Python
|
apache-2.0
| 2,114
| 0.008515
|
#!/usr/bin/env python
import os
from os.path import join as pjoin
import sys
import subprocess
def get_output(cmd):
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out = s.communicate()[0]
s.wait()
return out.strip()
# you could use os.path.walk to calculate this... or you could use du(1).
def duhack(path):
cmd = ['du', '-k', path]
out = get_output(cmd).split()
return int(out[0]) * 1024
BASEPATH=sys.argv[1]
ROOT="/x1/mail-archives/mod_mbox"
HOSTNAME="http://mail-archives.apache.org/mod_mbox/"
PARITION_SIZE=100 * 1024 * 1024
tlps={}
for files in os.listdir(ROOT):
path = files
tlp = path[0:path.find('-')]
list = path[path.find('-')+1:]
# print "%s - %s %s" % (tlp, list, path)
if not os.access("%s/%s/listinfo.db" % (ROOT, path), os.F_OK):
continue
if tlp == "www":
tlp = "asf"
if not tlps.has_key(tlp):
tlps[tlp] = {}
tlps[tlp][list] = [path, duhack(pjoin(ROOT, path))]
keys = tlps.keys()
keys.sort()
count = 0
fcount = 0
def write_sitemap_header(fp):
fp.write("""<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
def write_sitemap_footer(fp):
fp.write("</sitemapindex>\n")
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
for tlp in keys:
klist = tlps[tlp].keys()
klist.sort()
for list in klist:
name = tlps[tlp][list][0]
size = tlps[tlp][list][1]
if size < PARITION_SIZE:
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap</loc></sitemap>\n" % (HOSTNAME, name))
else:
part = (size / PARITION_SIZE) + 1
for i in range(0, part):
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap&pmax=%d&
|
;part=%d</loc></sitemap>\n" % (HOSTNAME, name, part, i))
if count > 500:
write_sitemap_footer(fp)
fp.close()
count = 0
fcount += 1
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
wr
|
ite_sitemap_footer(fp)
|
mellanox-senior-design/docker-volume-rdma
|
benchmarking/book_store/bench.py
|
Python
|
apache-2.0
| 5,515
| 0.008704
|
import os
import logging
import MySQLdb
import time
import sys
import Queue
import threading
import json
createUserSQL = "INSERT IGNORE INTO users (name) VALUES (%s);"
getUserByUsernameSQL = "SELECT * FROM users WHERE name=%s;"
getAuthorByNameSQL = "SELECT * FROM authors WHERE name=%s;"
createAuthorSQL = "INSERT IGNORE INTO authors (userid, name) VALUES (%s, %s);"
createBookSQL = "INSERT IGNORE INTO books (name, author, price) VALUES (%s, %s, %s);"
firstNames = sorted(["Kenia ", "Randal", "Shawnna ", "Rey ", "Cordia", "Kendal",
"Alina", "Dianna", "Misti", "Chelsie", "Gracia", "Teena", "Ronny", "Willy",
"Betsy", "Kenisha", "Elsy", "Cheryle", "Lurline ", "Karina", "Luba", "Vita",
"Lu", "Frances", "Lavenia", "Nereida", "Zetta", "Melony", "Eloise",
"Nickolas", "Ericka", "Cecilia", "Jenni", "Sofia", "Nobuko", "Trudy",
"Petronila", "Donnette", "Santos", "Viola", "Jessika", "Chere", "Azalee",
"Meggan", "Floyd", "Liberty", "Tabitha", "Juliana", "Pamila", "Blondell"])
lastNames = sorted(["Watterson", "Lawler", "Walt", "Birch", "Bryd", "Speight",
"Monroy", "Milledge", "Davilla", "Behrendt", "Mustain", "Blythe", "Gandhi",
"Brady", "Gooden", "Jellison", "Hager", "Selders", "Seaton", "Wind",
"Jelinek", "Reiser", "Lacour", "Maginnis", "Baggs", "Crossno", "Shadley",
"Bramer", "Mento", "Manigault", "Jacobi", "Deckman", "Spikes", "Duncan",
"Ackman", "Hornick", "Bourbeau", "Riehl", "Sena", "Rolon", "Pereira",
"Mikula", "Luk", "Albaugh", "Akin", "Bradburn", "Houlihan", "Frisina",
"Funnell", "Keister"])
def connect():
return MySQLdb.connect(host="mysql", # your host, usually localhost
user="root", # your username
passwd="password", # your password
db="bench") # name of the data base
createUserThreads = []
def createUsers(name):
logging.debug("Creating... "+name)
sys.stdout.flush()
db = connect();
cur = db.cursor()
for j in lastNames:
for k in range(0, 10):
myname = name + " " + j + "(" + str(k) + ")"
sys.stdout.flush()
cur.execute(createUserSQL, (myname,))
cur.execute(getUserByUsernameSQL, (myname, ))
row = cur.fetchone()
if not row == None:
cur.execute(createAuthorSQL, [str(row[0]), ("Author "+myname)])
else:
print "Could not create ", myname
db.commit()
db.close()
logging.debug("Created! "+name)
sys.stdout.flush()
createBookThreads = []
def createBook(username):
logging.debug("Creating books... "+username)
sys.stdout.flush()
db = connect()
cur = db.cursor()
for j in lastNames:
for k in range(0, 3):
myname = "Author " + username + " " + j + "(" + str(k) + ")"
cur.execute(getAuthorByNameSQL, (myname, ))
row = cur.fetchone()
if not row == None:
for i in range(0,2):
bookname = myname+"'s book "+str(i)
cur.execute(createBookSQL, [bookname, str(row[0]), i * 5])
else:
print "Could not find ", myname
db.commit()
db.close()
logging.debug("Created books! "+username)
sys.stdout.flush()
def initilizeUsers():
logging.debug("Initilizing users...")
start = time.time();
for i in firstNames:
name = i + " " + hostname
t = threading.Thread(target=createUsers, args = (name, ))
t.daemon = True
createUserThreads.append(t)
# Start all the threads
for x in createUserThreads:
x.start()
# Wait for them to complete
for x in createUserThreads:
x.join()
# Return the time it took to run
logging.debug("Creating users took: "+str(time.time() - start))
return time.time() - start;
def initilizeBooks():
logging.debug("Initilizing books...")
start = time.time();
for i in firstNames:
name = i + " " + hostname
t = threading.Thread(target=createBook, args = (name, ))
t.daemon = True
createBookThreads.appe
|
nd(t)
# Start all the threads
for x in createBookThreads:
x.start()
# Wait for them to complete
for x in createBookTh
|
reads:
x.join()
# Return the time it took to run
logging.debug("Creating books took: "+str(time.time() - start))
return time.time() - start;
def main():
logging.debug("Starting...")
db = connect();
intUserTime = initilizeUsers();
intBookTime = initilizeBooks();
# cur.execute("SELECT * FROM users")
# # print all the first cell of all the rows
# for row in cur.fetchall():
# logging.debug(row[1])
#
# cur.execute("SELECT * FROM authors")
# # print all the first cell of all the rows
# for row in cur.fetchall():
# logging.debug(row[2])
# db.close()
logging.info("Starting result save.")
with open('/tmp/bench_results/result.json', 'w') as fp:
results = {
"hostname": hostname,
"results": {
"Create": {
"Users": intUserTime,
"Books": intBookTime
}
}
}
logging.info(json.dumps(results))
json.dump(results, fp)
if __name__ == '__main__':
hostname = os.uname()[1]
logging.basicConfig(format=hostname + ' %(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG)
main()
|
artoonie/RedStatesBlueStates
|
redblue/viewsenators/views.py
|
Python
|
gpl-3.0
| 6,895
| 0.004206
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from colour import Color
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.db.models import Sum
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.template import loader
from .models import Party, City, Senator, ContactList
from .forms import ChooseForm
from .getPopulations import getCityStatePopulations
import viewsenators.initialization as initialization
# This seems to be the most that Facebook will allow, though it varies over time
NUM_CITIES_PER_QUERY = 50
def index(request):
def colorToD3(color):
return "rgb(%d,%d,%d)" % (color.red*255, color.green*255, color.blue*255)
def substituteDesc(moc, desc):
if "{{number}}" not in desc:
desc += "\n\n%s's phone number is {{number}}" % moc.lastName
if moc.phoneNumber:
text = moc.phoneNumber
else:
|
text ="(unknown number)"
desc = desc.replace("{{name}}", moc.firstName + " " + moc.lastName)
return desc.replace("{{number}}", text)
template = loader.get_template('halcyonic/index.html')
if 'list' in request.GET:
|
clId = str(request.GET['list'])
contactList = get_object_or_404(ContactList, slug=clId)
else:
try:
contactList = ContactList.objects.get(slug='keep-children-with-their-families')
except ContactList.DoesNotExist:
contactList = ContactList.objects.get(title="Republican")
stateColor = colorToD3(Color(rgb=(125/255.0, 0/255.0, 16/255.0)))
senatorToURLsPopsAndDesc = {}
for senator in contactList.senators.all():
senatorToURLsPopsAndDesc[senator] = _stateToFbCode(senator.state)
senatorToURLsPopsAndDesc[senator]['callScript'] = substituteDesc(senator, contactList.description)
sortedDict = sorted(senatorToURLsPopsAndDesc.items(),
key = lambda x: x[0].state.name)
context = {
"stateColor": stateColor, # TODO eventually have meaningful colors?
"title": contactList.title,
"senatorToURLsPopsAndDesc": sortedDict
}
return HttpResponse(template.render(context, request))
def combineContactList(request):
template = loader.get_template('viewsenators/combine.html')
context = {'contactLists': ContactList.objects.all()}
return HttpResponse(template.render(context, request))
def createContactList(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ChooseForm(request.POST)
# check whether it's valid:
if form.is_valid():
data = form.cleaned_data
title = data['title']
description = data['description']
senators = data['senators']
public = data['public']
cl = _makeContactList(title, description, senators, public)
return HttpResponseRedirect(reverse('index')+'?list=' + cl.slug)
# if a GET (or any other method) we'll create a blank form
else:
form = ChooseForm()
so = Senator.objects
ids = {}
for party in Party.objects.all():
idList = ["input[value=\""+str(s.id)+"\"]"
for s in so.filter(party=party)]
idsSet = set(idList)
idsStr = ', '.join(idsSet)
ids[party.name] = idsStr
template = loader.get_template('viewsenators/choose.html')
context = {'form': form,
'ids': ids}
return HttpResponse(template.render(context, request))
def debugWriteAnything(text):
response = HttpResponse()
response.write(text)
return response
def _stateToFbCode(state):
""" :return: the URL and the percentage of the population of the
desired states which will be found via that URL """
# While there are many better URL constructions that ideally start with
# your friends, rather than start with all FB users in each city then
# intersect that with your friends list, this is the only way I could get it
# to work.
# In particular, facebook seems to limit the number of unions to six,
# whereas the number of intersections can be ten times that.
setOfCities = City.objects.filter(state=state).order_by('-population')[:NUM_CITIES_PER_QUERY]
url = "https://www.facebook.com/search/"
for city in setOfCities:
url += city.facebookId + "/residents/present/"
url += "union/me/friends/intersect/"
# % of population in this search
cityPop = setOfCities.aggregate(Sum('population'))['population__sum']
if cityPop is None: cityPop = 0 # TODO hack if a state has no cities
statePop = state.population
percentPopIncludedInURL = float(cityPop) / float(statePop)
percentPopIncludedInURL = int(100*percentPopIncludedInURL+0.5)
return {'url': url,
'percentPopIncludedInURL': percentPopIncludedInURL}
def _makeContactList(title, description, senatorList, public):
cl = ContactList.objects.create(
title = title,
description = description,
public = public)
cl.senators.set(senatorList)
cl.save()
return cl
@user_passes_test(lambda u: u.is_superuser)
def populateSenators(request):
def _createInitialLists():
if ContactList.objects.count() != 0:
return
assert Senator.objects.count() == 100
for party in Party.objects.all():
title = party.name
description = "Call {{name}} at {{number}}"
senators = Senator.objects.filter(party=party)
_makeContactList(title, description, senators, public=True)
initialization.populateAllData()
_createInitialLists()
senators = Senator.objects.all()
def s2t(s): return "%s: %s, %s" % (s.state.abbrev, s.firstName, s.lastName)
senText = '<br>'.join(sorted([s2t(s) for s in senators]))
return debugWriteAnything("The list of senators: <br>" + senText)
@user_passes_test(lambda u: u.is_superuser)
def clearDataForNewCongress(request):
initialization.clearDataForNewCongress()
return populateSenators(request)
@user_passes_test(lambda u: u.is_superuser)
def updateCitiesAndStatesWithLatestData(request):
# This can take more than 30 seconds, so we need a streaming response
# for Heroku to not shut it down
# This is only run once by the admin, so the decreased performance
# shouldn't matter.
def runner():
cityPopulations, statePopulations = getCityStatePopulations()
for x in initialization.updateCitiesWithCurrentData(cityPopulations):
yield x
yield initialization.addPopulationToStates(statePopulations)
return StreamingHttpResponse(runner())
|
TAMU-CPT/galaxy-tools
|
tools/gff3/gff3_rebase.py
|
Python
|
gpl-3.0
| 4,407
| 0.000908
|
#!/usr/bin/env python
import sys
import logging
import argparse
from gff3 import feature_lambda, feature_test_qual_value
from CPT_GFFParser import gffParse, gffWrite
from Bio.SeqFeature import FeatureLocation
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def __get_features(child, interpro=False):
child_features = {}
for rec in gffParse(child):
log.info("Parsing %s", rec.id)
# Only top level
for feature in rec.features:
# Get the record id as parent_feature_id (since this is how it will be during remapping)
parent_feature_id = rec.id
# If it's an interpro specific gff3 file
if interpro:
# Then we ignore polypeptide features as they're useless
if feature.type == "polypeptide":
continue
try:
child_features[parent_feature_id].append(feature)
except KeyError:
child_features[parent_feature_id] = [feature]
# Keep a list of feature objects keyed by parent record id
return child_features
def __update_feature_location(feature, parent, protein2dna):
start = feature.location.start
end = feature.location.end
if protein2dna:
start *= 3
end *= 3
if parent.location.strand >= 0:
ns = parent.location.start + start
ne = parent.location.start + end
st = +
|
1
else:
ns = parent.location.end - end
ne = parent.location.end - start
st
|
= -1
# Don't let start/stops be less than zero.
#
# Instead, we'll replace with %3 to try and keep it in the same reading
# frame that it should be in.
if ns < 0:
ns %= 3
if ne < 0:
ne %= 3
feature.location = FeatureLocation(ns, ne, strand=st)
if hasattr(feature, "sub_features"):
for subfeature in feature.sub_features:
__update_feature_location(subfeature, parent, protein2dna)
def rebase(parent, child, interpro=False, protein2dna=False, map_by="ID"):
# get all of the features we will be re-mapping in a dictionary, keyed by parent feature ID
child_features = __get_features(child, interpro=interpro)
for rec in gffParse(parent):
replacement_features = []
# Horrifically slow I believe
for feature in feature_lambda(
rec.features,
# Filter features in the parent genome by those that are
# "interesting", i.e. have results in child_features array.
# Probably an unnecessary optimisation.
feature_test_qual_value,
{"qualifier": map_by, "attribute_list": child_features.keys()},
subfeatures=False,
):
# Features which will be re-mapped
to_remap = child_features[feature.id]
fixed_features = []
for x in to_remap:
# Then update the location of the actual feature
__update_feature_location(x, feature, protein2dna)
if interpro:
for y in ("status", "Target"):
try:
del x.qualifiers[y]
except:
pass
fixed_features.append(x)
replacement_features.extend(fixed_features)
# We do this so we don't include the original set of features that we
# were rebasing against in our result.
rec.features = replacement_features
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"parent", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument(
"child",
type=argparse.FileType("r"),
help="Child GFF3 annotations to rebase against parent",
)
parser.add_argument(
"--interpro", action="store_true", help="Interpro specific modifications"
)
parser.add_argument(
"--protein2dna",
action="store_true",
help="Map protein translated results to original DNA data",
)
parser.add_argument("--map_by", help="Map by key", default="ID")
args = parser.parse_args()
rebase(**vars(args))
|
illicitonion/pymacaroons
|
pymacaroons/caveat_delegates/first_party.py
|
Python
|
mit
| 1,498
| 0
|
from __future__ import unicode_literals
import binascii
from pymacaroons import Caveat
from pymacaroons.utils import (
convert_to_bytes,
sign_first_party_caveat
)
from .base_first_party import (
BaseFirstPartyCaveatDelegate,
BaseFirstPartyCaveatVerifierDelegate
)
class FirstPartyCaveatDelegate(BaseFirstPartyCaveatDelegate):
def __init__(self, *args, **kwargs):
super(FirstPartyCaveatDelegate, self).__init__(*args, **kwargs)
def add_first_party_caveat(self, macaroon, predicate, **kwargs):
predicate = convert_to_bytes(predicate)
caveat = Caveat(caveat_id=convert_to_bytes(predicate))
macaroon.caveats.append(caveat)
encode_key = binascii.unhexlify(macaroon.signature_bytes)
macaroon.signature = sign_first_party_c
|
aveat(encode_key, predicate)
return macaroon
class FirstPartyCaveatVerifierDelegate(BaseFirstPartyCaveatVerifierDelegate):
def __init__(self, *args, **kwargs):
super(FirstPartyCaveatVerifierDelegate, self).__init__(*args, **kwargs)
def verify_first_party_caveat(self, verifier, caveat, signature):
predicate = caveat.caveat_id
caveat_met = su
|
m(callback(predicate)
for callback in verifier.callbacks)
return caveat_met
def update_signature(self, signature, caveat):
return binascii.unhexlify(
sign_first_party_caveat(
signature,
caveat._caveat_id
)
)
|
willsirius/DualTreeRRTStartMotionPlanning
|
pythonVision2/HW3_testUpdateFunction.py
|
Python
|
mit
| 3,774
| 0.023317
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#HW3 for EECS 598 Motion Planning
import time
import openravepy
import userdefined as us
import kdtree
import transformationFunction as tf
from random import randrange
#### YOUR IMPORTS GO HERE ####
handles = [];
#### END OF YOUR IMPORTS ####
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def waitrobot(robot):
"""busy wait for robot completion"""
while not robot.GetController().IsDone():
time.sleep(0.01)
def tuckarms(env,robot):
with env:
jointnames = ['l_shoulder_lift_joint','l_elbow_flex_joint','l_wrist_flex_joint','r_shoulder_lift_joint','r_elbow_flex_joint','r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
|
robot.SetActiveDOFValues([1.29023451,-2.32099996,-0.69800004,1.27843491,-2.32100002,-0.69799996]);
robot.GetController().SetDesired(robot.
|
GetDOFValues());
waitrobot(robot)
def stringToFloatList(path):
path = path.split('\n')
for line in xrange(len(path)):
path[line] = path[line].split(',')
for i in xrange(len(path[line])):
path[line][i]=float(path[line][i])
return path
def drawPath(path,robot,color,size):
if type(path) is str: path = stringToFloatList(path)
for i in path:
robot.SetActiveDOFValues(i)
handles.append(env.plot3(points=robot.GetTransform()[0:3,3],pointsize=size,colors=color,drawstyle=1))
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
collisionChecker = RaveCreateCollisionChecker(env,'ode')
env.SetCollisionChecker(collisionChecker)
env.Reset()
# load a scene from ProjectRoom environment XML file
env.Load('env/bitreequad.env.xml')
time.sleep(0.1)
# 1) get the 1st robot that is inside the loaded scene
# 2) assign it to the variable named 'robot'
robot = env.GetRobots()[0]
robot.SetActiveDOFs([],DOFAffine.X|DOFAffine.Y|DOFAffine.Z|DOFAffine.RotationQuat)
# print robot.GetActiveDOFValues()
# raw_input("Press enter to move robot...")
# qt = tf.quaternion_from_euler(0.5,0.5,0.75,'rzxz')
# startconfig = [4.0,-1.5 ,0.2] + list(qt)
# print startconfig
startconfig = [ 4.0,-1.5 ,0.2 ,0.0, 0.0, 0.0 ];
robot.SetActiveDOFValues(us.E2Q(startconfig));
# robot.GetController().SetDesired(robot.GetDOFValues());
# waitrobot(robot);
waitrobot(robot)
print "test update state"
# s1 = [1,1,1,1,0,0,0,0.2,0.2,0.2,0.1,0.1,-0.1]
avf = 1.85*9.8/4
u = [-0.5*avf,2*avf,-0.5*avf,3*avf]
ts = 0.02
t = range(0,100)
while 1:
s2 = [0,0,0,0,0,0,1,0,0,0,0,0,0]
for tt in t:
s2 = us.updateState(s2,u,ts)
x1 = array(s2[0:3])
v1 = array(s2[3:6])
Q1 = array(s2[6:10])
W1 = array(s2[10:13])
E1 = tf.euler_from_quaternion(Q1)
C = list(x1)+list(Q1)
robot.SetActiveDOFValues(C);
time.sleep(0.02)
# traj = RaveCreateTrajectory(env,'');
# config = robot.GetActiveConfigurationSpecification('linear');
# config.AddDeltaTimeGroup();
# traj.Init(config);
# # myPath = [ [point.x, point.y,point.theta,i*0.01] for i,point in enumerate(path) ];
# num = 0
# for pathNode in path:
# num += 1
# traj.Insert(num,pathNode,config,True)
# # for i ,wayPoint in enumerate(myPath):
# # traj.Insert(i,wayPoint,config,True);
# robot.GetController().SetPath(traj);
# # robot.GetController().SetPath(traj)
### END OF YOUR CODE ###
raw_input("Press enter to exit...")
|
jsantoso91/smartlighting
|
characterizeRSSI.py
|
Python
|
mit
| 1,721
| 0.012783
|
import sys
import os
import struct
import binascii
from time import sleep
from ctypes import (CDLL, get_errno)
from ctypes.util import find_library
from socket import (socket, AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI, SOL_HCI, HCI_FILTER,)
os.system("hciconfig hci0 down")
os.system("hciconfig hci0 up")
if not os.geteuid() == 0:
sys.exit("script only works as root")
btlib = find_library("bluetooth")
if not btlib:
raise Exception(
"Can't find required bluetooth libraries"
" (need to install bluez)"
)
bluez = CDLL(btlib, use_errno=True)
dev_id = bluez.hci_get_route(None)
|
sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)
sock.bind((dev_id,))
err = bluez.h
|
ci_le_set_scan_parameters(sock.fileno(), 0, 0x10, 0x10, 0, 0, 1000);
if err < 0:
raise Exception("Set scan parameters failed")
# occurs when scanning is still enabled from previous call
# allows LE advertising events
hci_filter = struct.pack(
"<IQH",
0x00000010,
0x4000000000000000,
0
)
sock.setsockopt(SOL_HCI, HCI_FILTER, hci_filter)
err = bluez.hci_le_set_scan_enable(
sock.fileno(),
1, # 1 - turn on; 0 - turn off
0, # 0-filtering disabled, 1-filter out duplicates
1000 # timeout
)
if err < 0:
errnum = get_errno()
raise Exception("{} {}".format(
errno.errorcode[errnum],
os.strerror(errnum)
))
distanceAway = 1 # distance away from the estimote beacon in meter
with open("RSSI_data" + str(distanceAway) + ".csv","w") as out_file:
for x in range (1,100):
data = sock.recv(1024)
RSSI = int(binascii.b2a_hex(data[-1]),16)-255
out_string = ""
out_string += str(RSSI)
out_string += "\n"
out_file.write(out_string)
sock.close()
sys.exit()
|
QKaiser/pynessus
|
pynessus/models/scanner.py
|
Python
|
apache-2.0
| 2,984
| 0.001005
|
"""
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nessusobject import NessusObject
class Scanner(NessusObject):
"""
A Nessus Scan Template instance.
Attributes:
_Google Python Style Guide:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
"""
def __init__(self, server):
"""Constructor"""
super(Scanner, self).__init__(server)
self._id = None
self._uuid = None
self._name = None
self._ty
|
pe = None
self._status = None
self._scan_count = 0
self._engine_version = None
self._platform = None
self._loaded_plugin_set = None
self._registration_code = None
self._owner = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(val
|
ue)
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, value):
self._uuid = str(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = str(value)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = str(value)
@property
def scan_count(self):
return self._scan_count
@scan_count.setter
def scan_count(self, value):
self._scan_count = int(value)
@property
def engine_version(self):
return self._engine_version
@engine_version.setter
def engine_version(self, value):
self._engine_version = str(value)
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = str(value)
@property
def loaded_plugin_set(self):
return self._loaded_plugin_set
@loaded_plugin_set.setter
def loaded_plugin_set(self, value):
self._loaded_plugin_set = str(value)
@property
def registration_code(self):
return self._registration_code
@registration_code.setter
def registration_code(self, value):
self._registration_code = str(value)
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = str(value)
|
davinwang/caffe2
|
caffe2/python/workspace.py
|
Python
|
apache-2.0
| 19,553
| 0.001074
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package workspace
# Module caffe2.python.workspace
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
from google.protobuf.message import Message
from multiprocessing import Process
import os
from collections import defaultdict
import logging
import numpy as np
from past.builtins import basestring
import shutil
import socket
import tempfile
from caffe2.proto import caffe2_pb2
from caffe2.python import scope, utils
import caffe2.python._import_c_extension as C
logger = logging.getLogger(__name__)
Blobs = C.blobs
CreateBlob = C.create_blob
CurrentWorkspace = C.current_workspace
DeserializeBlob = C.deserialize_blob
GlobalInit = C.global_init
HasBlob = C.has_blob
RegisteredOperators = C.registered_operators
SerializeBlob = C.serialize_blob
SwitchWorkspace = C.switch_workspace
RootFolder = C.root_folder
Workspaces = C.workspaces
BenchmarkNet = C.benchmark_net
GetStats = C.get_stats
operator_tracebacks = defaultdict(dict)
is_asan = C.is_asan
has_gpu_support = C.has_gpu_support
if has_gpu_support:
NumCudaDevices = C.num_cuda_devices
SetDefaultGPUID = C.set_default_gpu_id
GetDefaultGPUID = C.get_default_gpu_id
GetCUDAVersion = C.get_cuda_version
GetCuDNNVersion = C.get_cudnn_version
def GetCudaPeerAccessPattern():
return np.asarray(C.get_cuda_peer_access_pattern())
GetDeviceProperties = C.get_device_properties
else:
NumCudaDevices = lambda: 0 # noqa
SetDefaultGPUID = lambda x: None # noqa
GetDefaultGPUID = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCudaPeerAccessPattern = lambda: np.array([]) # noqa
GetDeviceProperties = lambda x: None # noqa
def _GetFreeFlaskPort():
"""Get a free flask port."""
# We will prefer to use 5000. If not, we will then pick a random port.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', 5000))
if result == 0:
return 5000
else:
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
# Race condition: between the interval we close the socket and actually
# start a mint process, another process might have occupied the port. We
# don't do much here as this is mostly for convenience in research
# rather than 24x7 service.
return port
def StartMint(root_folder=None, port=None):
"""Start a mint instance.
TODO(Yangqing): this does not work well under ipython yet. According to
https://github.com/ipython/ipython/issues/5862
writing up some fix is a todo item.
"""
from caffe2.python.mint import app
if root_folder is None:
# Get the root folder from the current workspace
root_folder = C.root_folder()
if port is None:
port = _GetFreeFlaskPort()
process = Process(
target=app.main,
args=(
['-p', str(port), '-r', root_folder],
)
)
process.start()
print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))
return process
def StringifyProto(obj):
"""Stringify a protocol buffer object.
Inputs:
obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()
function.
Outputs:
string: the output protobuf string.
Raises:
AttributeError: if the passed in object does not have the right attribute.
"""
if isinstance(obj, basestring):
return obj
else:
if isinstance(obj, Message):
# First, see if this object is a protocol buffer, which we can
# simply serialize with the SerializeToString() call.
return obj.SerializeToString()
elif hasattr(obj, 'Proto'):
return obj.Proto().SerializeToString()
else:
raise ValueError("Unexpected argument to StringifyProto of type " +
type(obj).__name__)
def ResetWorkspace(root_folder=None):
if root_folder is None:
# Reset the workspace, but keep the current root folder setting.
return C.reset_workspace(C.root_folder())
else:
if not os.path.exists(root_folder):
os.makedirs(root_folder)
return C.reset_workspace(root_folder)
def CreateNet(net, overwrite=False, input_blobs=None):
if input_blobs is None:
input_blobs = []
for input_blob in input_blobs:
C.create_blob(input_blob)
return CallWithExceptionIntercept(
C.create_net,
C.
|
Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
def Predictor(init_net, predict_net):
return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net))
def GetOperatorCost(operator, blobs):
return C.get_operator_cost(StringifyProto(operator), blobs)
def RunOperatorOnce(operator):
return C.run_op
|
erator_once(StringifyProto(operator))
def RunOperatorsOnce(operators):
for op in operators:
success = RunOperatorOnce(op)
if not success:
return False
return True
def CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
op_id = op_id_fetcher()
net_tracebacks = operator_tracebacks.get(net_name, None)
print("Traceback for operator {} in network {}".format(op_id, net_name))
if net_tracebacks and op_id in net_tracebacks:
tb = net_tracebacks[op_id]
for line in tb:
print(':'.join(map(str, line)))
raise
def RunNetOnce(net):
return CallWithExceptionIntercept(
C.run_net_once,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net),
)
def RunNet(name, num_iter=1, allow_fail=False):
"""Runs a given net.
Inputs:
name: the name of the net, or a reference to the net.
num_iter: number of iterations to run
allow_fail: if True, does not assert on net exec failure but returns False
Returns:
True or an exception.
"""
return CallWithExceptionIntercept(
C.run_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(name),
StringifyNetName(name), num_iter, allow_fail,
)
def RunPlan(plan_or_step):
# TODO(jiayq): refactor core.py/workspace.py to avoid circular deps
import caffe2.python.core as core
if isinstance(plan_or_step, core.ExecutionStep):
plan_or_step = core.Plan(plan_or_step)
return C.run_plan(StringifyProto(plan_or_step))
def InferShapesAndTypes(nets, blob_dimensions=None):
"""Infers the shapes and types for the specified nets.
Inputs:
nets: the list of nets
blob_dimensions (optional): a dictionary of blobs and their dimensions.
If not specified, the workspace blobs are used.
Returns:
A tuple of (shapes, types) dictionaries keyed by blob name.
"""
net_protos = [StringifyProto(n.Proto()) for n in nets]
if blob_dimensions is None:
blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)
else:
blobdesc_prototxt = C.infer_shapes_and_types_from_map(
net_protos, blob_dimensions
)
blobdesc_proto = caffe2_pb2.TensorShapes()
blobdesc_proto.ParseFromString(blobdesc
|
smmribeiro/intellij-community
|
python/testData/psi/PatternMatchingAnnotatedAssignmentLooksLikeIncompleteMatchStatement.py
|
Python
|
apache-2.0
| 17
| 0
|
matc
|
h: case =
|
42
|
CongLi/avocado-vt
|
setup.py
|
Python
|
gpl-2.0
| 3,742
| 0
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FI
|
TNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import
|
os
import glob
# pylint: disable=E0611
from setuptools import setup
VERSION = open('VERSION', 'r').read().strip()
VIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ
def get_dir(system_path=None, virtual_path=None):
"""
Retrieve VIRTUAL_ENV friendly path
:param system_path: Relative system path
:param virtual_path: Overrides system_path for virtual_env only
:return: VIRTUAL_ENV friendly path
"""
if virtual_path is None:
virtual_path = system_path
if VIRTUAL_ENV:
if virtual_path is None:
virtual_path = []
return os.path.join(*virtual_path)
else:
if system_path is None:
system_path = []
return os.path.join(*(['/'] + system_path))
def get_data_files():
def add_files(level=[]):
installed_location = ['usr', 'share', 'avocado-plugins-vt']
installed_location += level
level_str = '/'.join(level)
if level_str:
level_str += '/'
file_glob = '%s*' % level_str
files_found = [path for path in glob.glob(file_glob) if
os.path.isfile(path)]
return [((get_dir(installed_location, level)), files_found)]
data_files = [(get_dir(['etc', 'avocado', 'conf.d']),
['etc/avocado/conf.d/vt.conf'])]
data_files += [(get_dir(['usr', 'share', 'avocado-plugins-vt',
'test-providers.d']),
glob.glob('test-providers.d/*'))]
data_files_dirs = ['backends', 'shared']
for data_file_dir in data_files_dirs:
for root, dirs, files in os.walk(data_file_dir):
for subdir in dirs:
rt = root.split('/')
rt.append(subdir)
data_files += add_files(rt)
return data_files
setup(name='avocado-plugins-vt',
version=VERSION,
description='Avocado Virt Test Compatibility Layer plugin',
author='Avocado Developers',
author_email='avocado-devel@redhat.com',
url='http://github.com/avocado-framework/avocado-vt',
packages=['avocado_vt',
'avocado_vt.plugins',
'virttest',
'virttest.libvirt_xml',
'virttest.libvirt_xml.devices',
'virttest.libvirt_xml.nwfilter_protocols',
'virttest.qemu_devices',
'virttest.remote_commander',
'virttest.staging',
'virttest.staging.backports',
'virttest.tests',
'virttest.unittest_utils',
'virttest.utils_test',
'virttest.utils_test.qemu'],
package_data={"virttest": ["*.*"]},
data_files=get_data_files(),
entry_points={
'avocado.plugins.cli': [
'vt-list = avocado_vt.plugins.vt_list:VTLister',
'vt = avocado_vt.plugins.vt:VTRun',
],
'avocado.plugins.cli.cmd': [
'vt-bootstrap = avocado_vt.plugins.vt_bootstrap:VTBootstrap',
],
'avocado.plugins.job.prepost': [
'vt-joblock = avocado_vt.plugins.vt_joblock:VTJobLock'
],
},
)
|
codelieche/codelieche.com
|
apps/article/urls/api/post.py
|
Python
|
mit
| 382
| 0.00266
|
# -*- coding:utf-8 -*-
from django.urls import path
from article.views.post import PostListApiView, PostCreateApiView, PostDetailApiView
url
|
patterns = [
# 前缀:/api/v1/article/post/
path('create', PostCreateApiView.as_view(), name="create"),
|
path('list', PostListApiView.as_view(), name="list"),
path('<int:pk>', PostDetailApiView.as_view(), name="detail"),
]
|
dirtycold/git-cola
|
cola/fsmonitor.py
|
Python
|
gpl-2.0
| 19,637
| 0.000662
|
# Copyright (c) 2008 David Aguilar
# Copyright (c) 2015 Daniel Harding
"""Provides an filesystem monitoring for Linux (via inotify) and for Windows
(via pywin32 and the ReadDirectoryChanges function)"""
from __future__ import division, absolute_import, unicode_literals
import errno
import os
import os.path
import select
from threading import Lock
from . import utils
from . import version
from .decorators import memoize
AVAILABLE = None
if utils.is_win32():
try:
import pywintypes
import win32con
import win32event
import win32file
except ImportError:
pass
else:
AVAILABLE = 'pywin32'
elif utils.is_linux():
try:
from . import inotify
except ImportError:
pass
else:
AVAILABLE = 'inotify'
from qtpy i
|
mport QtCore
from qtpy.QtCore import Signal
from . import core
from . import gitcfg
from . import gitcmds
from .compat import bchr
from .git import git
from .i18n import N_
from .interaction import Interaction
class _Monitor(QtCore.QObject):
files_changed = Signal()
def __init__(se
|
lf, thread_class):
QtCore.QObject.__init__(self)
self._thread_class = thread_class
self._thread = None
def start(self):
if self._thread_class is not None:
assert self._thread is None
self._thread = self._thread_class(self)
self._thread.start()
def stop(self):
if self._thread_class is not None:
assert self._thread is not None
self._thread.stop()
self._thread.wait()
self._thread = None
def refresh(self):
if self._thread is not None:
self._thread.refresh()
class _BaseThread(QtCore.QThread):
#: The delay, in milliseconds, between detecting file system modification
#: and triggering the 'files_changed' signal, to coalesce multiple
#: modifications into a single signal.
_NOTIFICATION_DELAY = 888
def __init__(self, monitor):
QtCore.QThread.__init__(self)
self._monitor = monitor
self._running = True
self._use_check_ignore = version.check('check-ignore',
version.git_version())
self._force_notify = False
self._file_paths = set()
@property
def _pending(self):
return self._force_notify or self._file_paths
def refresh(self):
"""Do any housekeeping necessary in response to repository changes."""
pass
def notify(self):
"""Notifies all observers"""
do_notify = False
if self._force_notify:
do_notify = True
elif self._file_paths:
proc = core.start_command(['git', 'check-ignore', '--verbose',
'--non-matching', '-z', '--stdin'])
path_list = bchr(0).join(core.encode(path)
for path in self._file_paths)
out, err = proc.communicate(path_list)
if proc.returncode:
do_notify = True
else:
# Each output record is four fields separated by NULL
# characters (records are also separated by NULL characters):
# <source> <NULL> <linenum> <NULL> <pattern> <NULL> <pathname>
# For paths which are not ignored, all fields will be empty
# except for <pathname>. So to see if we have any non-ignored
# files, we simply check every fourth field to see if any of
# them are empty.
source_fields = out.split(bchr(0))[0:-1:4]
do_notify = not all(source_fields)
self._force_notify = False
self._file_paths = set()
if do_notify:
self._monitor.files_changed.emit()
@staticmethod
def _log_enabled_message():
msg = N_('File system change monitoring: enabled.\n')
Interaction.safe_log(msg)
if AVAILABLE == 'inotify':
class _InotifyThread(_BaseThread):
_TRIGGER_MASK = (
inotify.IN_ATTRIB |
inotify.IN_CLOSE_WRITE |
inotify.IN_CREATE |
inotify.IN_DELETE |
inotify.IN_MODIFY |
inotify.IN_MOVED_FROM |
inotify.IN_MOVED_TO
)
_ADD_MASK = (
_TRIGGER_MASK |
inotify.IN_EXCL_UNLINK |
inotify.IN_ONLYDIR
)
def __init__(self, monitor):
_BaseThread.__init__(self, monitor)
worktree = git.worktree()
if worktree is not None:
worktree = core.abspath(worktree)
self._worktree = worktree
self._git_dir = git.git_path()
self._lock = Lock()
self._inotify_fd = None
self._pipe_r = None
self._pipe_w = None
self._worktree_wd_to_path_map = {}
self._worktree_path_to_wd_map = {}
self._git_dir_wd_to_path_map = {}
self._git_dir_path_to_wd_map = {}
self._git_dir_wd = None
@staticmethod
def _log_out_of_wds_message():
msg = N_('File system change monitoring: disabled because the'
' limit on the total number of inotify watches was'
' reached. You may be able to increase the limit on'
' the number of watches by running:\n'
'\n'
' echo fs.inotify.max_user_watches=100000 |'
' sudo tee -a /etc/sysctl.conf &&'
' sudo sysctl -p\n')
Interaction.safe_log(msg)
def run(self):
try:
with self._lock:
self._inotify_fd = inotify.init()
self._pipe_r, self._pipe_w = os.pipe()
poll_obj = select.poll()
poll_obj.register(self._inotify_fd, select.POLLIN)
poll_obj.register(self._pipe_r, select.POLLIN)
self.refresh()
self._log_enabled_message()
while self._running:
if self._pending:
timeout = self._NOTIFICATION_DELAY
else:
timeout = None
try:
events = poll_obj.poll(timeout)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except select.error:
continue
else:
if not self._running:
break
elif not events:
self.notify()
else:
for fd, event in events:
if fd == self._inotify_fd:
self._handle_events()
finally:
with self._lock:
if self._inotify_fd is not None:
os.close(self._inotify_fd)
self._inotify_fd = None
if self._pipe_r is not None:
os.close(self._pipe_r)
self._pipe_r = None
os.close(self._pipe_w)
self._pipe_w = None
def refresh(self):
with self._lock:
if self._inotify_fd is None:
return
try:
if self._worktree is not None:
tracked_dirs = set(
os.path.dirname(os.path.join(self._worktree,
path))
for path in gitcmds.tracked_files())
self._refresh_watches(tracked_dirs,
self._worktree_wd_to_path_map,
self._worktre
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py
|
Python
|
apache-2.0
| 21,832
| 0.0071
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.kernel_tests import benchmarking
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
block_lstm = lstm_ops._block_lstm # pylint: disable=pro
|
tected-access
def blocks_match(sess, use_peephole):
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 4
inputs = []
for _ in range(sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
stacked_inputs =
|
array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with variable_scope.variable_scope("test", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
if use_peephole:
wci = variable_scope.get_variable(
"rnn/lstm_cell/w_i_diag", shape=[cell_size], dtype=dtypes.float32)
wcf = variable_scope.get_variable(
"rnn/lstm_cell/w_f_diag", shape=[cell_size], dtype=dtypes.float32)
wco = variable_scope.get_variable(
"rnn/lstm_cell/w_o_diag", shape=[cell_size], dtype=dtypes.float32)
w = variable_scope.get_variable(
"rnn/lstm_cell/kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
b = variable_scope.get_variable(
"rnn/lstm_cell/bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
basic_cell = rnn_cell.LSTMCell(
cell_size, use_peepholes=use_peephole, state_is_tuple=True, reuse=True)
basic_outputs_op, basic_state_op = rnn.static_rnn(
basic_cell, inputs, dtype=dtypes.float32)
if use_peephole:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
else:
_, _, _, _, _, _, block_outputs_op = block_lstm(
ops.convert_to_tensor(sequence_length, dtype=dtypes.int64),
inputs,
w,
b,
cell_clip=0)
fused_cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=use_peephole, reuse=True,
name="rnn/lstm_cell")
fused_outputs_op, fused_state_op = fused_cell(
stacked_inputs, dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([basic_outputs_op, basic_state_op[0]])
basic_grads = sess.run(gradients_impl.gradients(basic_outputs_op, inputs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
basic_wgrads = sess.run(gradients_impl.gradients(basic_outputs_op, xs))
block_outputs = sess.run(block_outputs_op)
block_grads = sess.run(gradients_impl.gradients(block_outputs_op, inputs))
block_wgrads = sess.run(gradients_impl.gradients(block_outputs_op, xs))
xs = [w, b]
if use_peephole:
xs += [wci, wcf, wco]
fused_outputs, fused_state = sess.run([fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(fused_outputs_op, xs))
return (basic_state, fused_state, basic_outputs, block_outputs,
fused_outputs, basic_grads, block_grads, fused_grads, basic_wgrads,
block_wgrads, fused_wgrads)
class LSTMBlockCellTest(test.TestCase):
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
num_steps = 5
input_dim = 6
cell_size = 7
cell = lstm_ops.LSTMBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_dim))
output, _ = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_dim)
sess.run(output, feed)
def testLSTMBlockCell(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2)
for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 2]),
m1.name: 0.1 * np.ones([1, 2]),
m2.name: 0.1 * np.ones([1, 2]),
m3.name: 0.1 * np.ones([1, 2])
})
self.assertEqual(len(res), 5)
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
# These numbers are from testBasicLSTMCell and only test c/h.
self.assertAllClose(res[1], [[0.68967271, 0.68967271]])
self.assertAllClose(res[2], [[0.44848421, 0.44848421]])
self.assertAllClose(res[3], [[0.39897051, 0.39897051]])
self.assertAllClose(res[4], [[0.24024698, 0.24024698]])
def testCompatibleNames(self):
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = rnn_cell.LSTMCell(10)
pcell = rnn_cell.LSTMCell(10, use_peepholes=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
basic_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockCell(10)
pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True)
inputs = [array_ops.zeros([4, 5])] * 6
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope="basic")
rnn.static_rnn(pcell, inputs, dtype=dtypes.float32, scope="peephole")
block_names = {
v.name: v.get_shape()
for v in variables.trainable_variables()
}
with self.test_session(use_gpu=True, graph=ops.Graph()):
cell = lstm_ops.LSTMBlockFusedCell(10)
pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True)
inputs = array_ops.stack([array_ops.zeros([4, 5])] * 6)
cell(inputs, dtype=dtypes.float32, scope="basic/lstm_cell")
pcell(inputs, dtype=dtypes.float32, scope="peephole/ls
|
emCOMP/twinkle
|
twinkle/feature_extraction/pipelines.py
|
Python
|
mit
| 2,688
| 0.034226
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core import FeatureExtractorRegistry
from twinkle.connectors.core import ConnectorRegistry
class FeatureExtractorPipelineFactory(object):
"""
Factory object for creating a pipeline from a file
"""
def __init__(self):
"""
"""
pass
def buildInput(self, config_data):
"""
builds an input from the ConnectorRegistry
"""
input_name = config_data["name"]
input_config = config_data["config"]
return ConnectorRegistry.buildConnector(input_name, input_config)
def buildOutput(self, config_data):
"""
builds na output from the connectorRegister
"""
output_name = config_data["name"]
output_config = config_data["config"]
return ConnectorRegistry.buildConnector(output_name, output_config)
def buildExtractor(self, config_data):
"""
"""
extractor_name = config_data["name"]
extractor_config = config_data["config"]
return FeatureExtractorRegistry.buildExtractor(extractor_name, extractor_config)
def buildFromDictionary(self,config_data):
"""
"""
if "input" not in config_data:
raise Exception("No input source was specified in the configuration data")
if "output" not in config_data:
raise Exception("No output source was specified in the configuration data")
#build input
input_data = config_data["input"]
input = self.buildInput(input_data)
# build output
output_data = config_data["output"]
output = self.buildOutput(output_data)
# create the pipeline
pipeline = FeatureExtractorPipeline(input, output)
# get feat
|
ure extractors
extractors = config_data["extractors"]
# add each extractor
for extractor_config in extractors:
extractor = self.buildExtractor(extractor_config)
pipeline.addExtractor(extractor)
return pipeline
class FeatureExtractorPipeline(object):
"""
Simple feature extractor pipeline.
Needs a lot of features in the future such as dependency graphs to resolve some of the intermediates
and the ability to do second passes for items which need to be normalize
|
d.
"""
def __init__(self, input, output):
self.feature_extractors = []
self.input = input
self.output = output
def addExtractor(self, extractor):
"""
add Extractor to the pipeline
"""
self.feature_extractors.append(extractor)
def run(self):
"""
runs the pipeline
"""
processed_items = []
# iterate through each item
for item in self.input:
item_cookie = { "tweet": item, "text": item.text}
output = {}
# first do preprossing
for extractor in self.feature_extractors:
extractor.extract(item, item_cookie, output)
print output
# write output
self.output.write(output)
|
flipagram/elasticity
|
setup.py
|
Python
|
mit
| 752
| 0.00133
|
from setuptools import setup
setup(
# general meta
name='elasticity',
version='0.7',
author='Brian C. Dilley - Flipagram',
author_email='brian@flipagram.com',
description='Python based command line tool for managing ElasticSearch clusters.',
platforms='any',
url='https://github.com/Cheers-Dev/elasticity',
download_url='https://github.com/Cheers-Dev/elasticity',
# packages
packages=[
'elasticity'
],
# dependencies
install_requires=[
'elasticsearch>=1.4.0',
'pyyaml>=3.10'
],
# additional files to include
include
|
_package_data=True,
# the scripts
scripts=['scripts/elasticity'],
# wut?
classifiers=['Intended Audience :: Developers'
|
]
)
|
ahmadiga/min_edx
|
common/lib/xmodule/xmodule/crowdsource_hinter.py
|
Python
|
agpl-3.0
| 17,456
| 0.00338
|
"""
Adds crowdsourced hinting functionality to lon-capa numerical response problems.
Currently experimental - not for instructor use, yet.
"""
import logging
import json
import random
import copy
from pkg_resources import resource_string
from lxml import etree
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.raw_module import RawDescriptor
from xblock.fields import Scope, String, Integer, Boolean, Dict, List
from capa.responsetypes import FormulaResponse
from django.utils.html import escape
log = logging.getLogger(__name__)
class CrowdsourceHinterFields(object):
"""Defines fields
|
for the crowdsource hinter module."""
has_children = True
moderate = String(help='String "True"/"False" - activates moderation', scope=Scope.content,
default='False')
debug = String(help='String "True"/"False" - allows multiple voting', scope=Scope.content,
default='False')
# Usage: hints[answer] = {str(pk): [hint_text, #votes]}
# hints is a dictionary that takes answer ke
|
ys.
# Each value is itself a dictionary, accepting hint_pk strings as keys,
# and returning [hint text, #votes] pairs as values
hints = Dict(help='A dictionary containing all the active hints.', scope=Scope.content, default={})
mod_queue = Dict(help='A dictionary containing hints still awaiting approval', scope=Scope.content,
default={})
hint_pk = Integer(help='Used to index hints.', scope=Scope.content, default=0)
# A list of previous hints that a student viewed.
# Of the form [answer, [hint_pk_1, ...]] for each problem.
# Sorry about the variable name - I know it's confusing.
previous_answers = List(help='A list of hints viewed.', scope=Scope.user_state, default=[])
# user_submissions actually contains a list of previous answers submitted.
# (Originally, preivous_answers did this job, hence the name confusion.)
user_submissions = List(help='A list of previous submissions', scope=Scope.user_state, default=[])
user_voted = Boolean(help='Specifies if the user has voted on this problem or not.',
scope=Scope.user_state, default=False)
class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
"""
An Xmodule that makes crowdsourced hints.
Currently, only works on capa problems with exactly one numerical response,
and no other parts.
Example usage:
<crowdsource_hinter>
<problem blah blah />
</crowdsource_hinter>
XML attributes:
-moderate="True" will not display hints until staff approve them in the hint manager.
-debug="True" will let users vote as often as they want.
"""
icon_class = 'crowdsource_hinter'
css = {'scss': [resource_string(__name__, 'css/crowdsource_hinter/display.scss')]}
js = {'coffee': [resource_string(__name__, 'js/src/crowdsource_hinter/display.coffee')],
'js': []}
js_module_name = "Hinter"
def __init__(self, *args, **kwargs):
super(CrowdsourceHinterModule, self).__init__(*args, **kwargs)
# We need to know whether we are working with a FormulaResponse problem.
try:
responder = self.get_display_items()[0].lcp.responders.values()[0]
except (IndexError, AttributeError):
log.exception('Unable to find a capa problem child.')
return
self.is_formula = isinstance(self, FormulaResponse)
if self.is_formula:
self.answer_to_str = self.formula_answer_to_str
else:
self.answer_to_str = self.numerical_answer_to_str
# compare_answer is expected to return whether its two inputs are close enough
# to be equal, or raise a StudentInputError if one of the inputs is malformatted.
if hasattr(responder, 'compare_answer') and hasattr(responder, 'validate_answer'):
self.compare_answer = responder.compare_answer
self.validate_answer = responder.validate_answer
else:
# This response type is not supported!
log.exception('Response type not supported for hinting: ' + str(responder))
def get_html(self):
"""
Puts a wrapper around the problem html. This wrapper includes ajax urls of the
hinter and of the problem.
- Dependent on lon-capa problem.
"""
if self.debug == 'True':
# Reset the user vote, for debugging only!
self.user_voted = False
if self.hints == {}:
# Force self.hints to be written into the database. (When an xmodule is initialized,
# fields are not added to the db until explicitly changed at least once.)
self.hints = {}
try:
child = self.get_display_items()[0]
out = child.render(STUDENT_VIEW).content
# The event listener uses the ajax url to find the child.
child_id = child.id
except IndexError:
out = u"Error in loading crowdsourced hinter - can't find child problem."
child_id = ''
# Wrap the module in a <section>. This lets us pass data attributes to the javascript.
out += u'<section class="crowdsource-wrapper" data-url="{ajax_url}" data-child-id="{child_id}"> </section>'.format(
ajax_url=self.runtime.ajax_url,
child_id=child_id
)
return out
def numerical_answer_to_str(self, answer):
"""
Converts capa numerical answer format to a string representation
of the answer.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
return str(answer.values()[0])
def formula_answer_to_str(self, answer):
"""
Converts capa formula answer into a string.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
return str(answer.values()[0])
def get_matching_answers(self, answer):
"""
Look in self.hints, and find all answer keys that are "equal with tolerance"
to the input answer.
"""
return [key for key in self.hints if self.compare_answer(key, answer)]
def handle_ajax(self, dispatch, data):
"""
This is the landing method for AJAX calls.
"""
if dispatch == 'get_hint':
out = self.get_hint(data)
elif dispatch == 'get_feedback':
out = self.get_feedback(data)
elif dispatch == 'vote':
out = self.tally_vote(data)
elif dispatch == 'submit_hint':
out = self.submit_hint(data)
else:
return json.dumps({'contents': 'Error - invalid operation.'})
if out is None:
out = {'op': 'empty'}
elif 'error' in out:
# Error in processing.
out.update({'op': 'error'})
else:
out.update({'op': dispatch})
return json.dumps({'contents': self.runtime.render_template('hinter_display.html', out)})
def get_hint(self, data):
"""
The student got the incorrect answer found in data. Give him a hint.
Called by hinter javascript after a problem is graded as incorrect.
Args:
`data` -- must be interpretable by answer_to_str.
Output keys:
- 'hints' is a list of hint strings to show to the user.
- 'answer' is the parsed answer that was submitted.
Will record the user's wrong answer in user_submissions, and the hints shown
in previous_answers.
"""
# First, validate our inputs.
try:
answer = self.answer_to_str(data)
except (ValueError, AttributeError):
# Sometimes, we get an answer that's just not parsable. Do nothing.
log.exception('Answer not parsable: ' + str(data))
return
if not self.validate_answer(answer):
# Answer is not in the right form.
log.exception('Answer not valid: ' + str(answer))
return
if answer not in self.user_submissions:
self.user_submissions += [answer]
# For
|
csdms/wmt-exe
|
wmtexe/cmi/make.py
|
Python
|
mit
| 673
| 0
|
from __future__ import print_function
import argparse
import yaml
from .bocca import make_project, ProjectExistsError
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=argparse.FileType('r'),
help
|
='Project description file')
parser.add_argument('--clobber', action='store_true',
help='Clobber an existing project')
args = parser.parse_args()
try:
make_project(yaml.load(arg
|
s.file), clobber=args.clobber)
except ProjectExistsError as error:
print('The specified project (%s) already exists. Exiting.' % error)
if __name__ == '__main__':
main()
|
qk4l/Flexget
|
flexget/api/plugins/tvmaze_lookup.py
|
Python
|
mit
| 6,102
| 0.001475
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flask import jsonify
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.api.app import NotFoundError, BadRequest, etag
from flexget.plugins.internal.api_tvmaze import APITVMaze as tvm
tvmaze_api = api.namespace('tvmaze', description='TVMaze Shows')
class ObjectsContainer(object):
actor_object = {
'type': 'object',
'properties': {
"last_update": {'type': 'string', 'format': 'date-time'},
"medium_image": {'type': 'string'},
"name": {'type': 'string'},
"original_image": {'type': 'string'},
"tvmaze_id": {'type': 'integer'},
"url": {'type': 'string'}
}
}
schedule_object = {
'type': 'object',
'properties': {
"days": {'type': 'array', 'items': {'type': 'string'}},
"time": {'type': 'string'}
}
}
tvmaze_series_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'status': {'type': 'string'},
'rating': {'type': 'number'},
'genres': {'type': 'array', 'items': {'type': 'string'}},
'weight': {'type': 'integer'},
'updated': {'type': 'string', 'format': 'date-time'},
'name': {'type': 'string'},
'language': {'type': 'string'},
'schedule': schedule_object,
'url': {'type': 'string', 'format': 'url'},
'original_image': {'type': 'string'},
'medium_image': {'type': 'string'},
'tvdb_id': {'type': 'integer'},
'tvrage_id': {'type': 'integer'},
'premiered': {'type': 'string', 'format': 'date-time'},
'year': {'type': 'integer'},
'summary': {'type': 'string'},
'webchannel': {'type': ['string', 'null']},
'runtime': {'type': 'integer'},
'show_type': {'type': 'string'},
'network': {'type': ['string', 'null']},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'status', 'rating', 'genres', 'weight', 'updated', 'name', 'language',
'schedule', 'url', 'original_image', 'medium_image', 'tvdb_id', 'tvrage_id', 'premiered', 'year',
'summary', 'webchannel', 'runtime', 'show_type', 'network', 'last_update'],
'additionalProperties': False
}
tvmaze_episode_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'series_id': {'type': 'integer'},
'number': {'type': 'integer'},
'season_number': {'type': 'integer'},
'title': {'type': 'string'},
'airdate': {'type': 'string', 'format': 'date-time'},
'url': {'type': 'string'},
'original_image': {'type': ['string', 'null']},
'medium_image': {'type': ['string', 'null']},
'airstamp': {'type': 'string', 'format': 'date-time'},
'runtime': {'type': 'integer'},
'summary': {'type': 'string'},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'series_id', 'number', 'season_number', 'title', 'airdate', 'url', 'original_image',
'medium_image', 'airstamp', 'runtime', 'summary', 'last_update'],
'additionalProperties': False
}
tvmaze_series_schema = api.schema_model('tvmaze_series_schema', ObjectsContainer.tvmaze_series_object)
tvmaze_episode_schema = api.schema_model('tvmaze_episode_schema', ObjectsContainer.tvmaze_episode_object)
@tvmaze_api.route('/series/<string:title>/')
@api.doc(params={'title': 'TV Show name or TVMaze ID'})
class TVDBSeriesSearchApi(APIResource):
@etag
@api.response(200, 'Successfully found show', model=tvmaze_series_schema)
@api.response(NotFoundError)
def get(self, title, session=None):
"""TVMaze series lookup"""
try:
tvmaze_id = int(title)
except ValueError:
tvmaze_id = None
try:
if tvmaze_id:
series = tvm.series_lookup(tvmaze_id=tvmaze
|
_id, session=sess
|
ion)
else:
series = tvm.series_lookup(series_name=title, session=session)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(series.to_dict())
episode_parser = api.parser()
episode_parser.add_argument('season_num', type=int, help='Season number')
episode_parser.add_argument('ep_num', type=int, help='Episode number')
episode_parser.add_argument('air_date', type=inputs.date_from_iso8601, help="Air date in the format of '2012-01-01'")
@tvmaze_api.route('/episode/<int:tvmaze_id>/')
@api.doc(params={'tvmaze_id': 'TVMaze ID of show'})
@api.doc(parser=episode_parser)
class TVDBEpisodeSearchAPI(APIResource):
@etag
@api.response(200, 'Successfully found episode', tvmaze_episode_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, tvmaze_id, session=None):
"""TVMaze episode lookup"""
args = episode_parser.parse_args()
air_date = args.get('air_date')
season_num = args.get('season_num')
ep_num = args.get('ep_num')
kwargs = {'tvmaze_id': tvmaze_id,
'session': session}
if air_date:
kwargs['series_id_type'] = 'date'
kwargs['series_date'] = air_date
elif season_num and ep_num:
kwargs['series_id_type'] = 'ep'
kwargs['series_season'] = season_num
kwargs['series_episode'] = ep_num
else:
raise BadRequest('not enough parameters sent for lookup')
try:
episode = tvm.episode_lookup(**kwargs)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(episode.to_dict())
|
BBN-Q/Qlab
|
common/@PulseCalibration/PhaseEstimationSequence.py
|
Python
|
apache-2.0
| 1,148
| 0.004355
|
import argparse
import sys, os
import numpy as np
from copy import copy
parser = argparse.ArgumentParser()
parser.add_argument('qubit', help='qubit name')
parser.add_argument('direction', help='direction (X or Y)')
parser.add_argument('numPulses', type=int, help='log2(n) of the longest sequence n')
parser.add_argument('ampli
|
tude', type=float, help='pulse amplitude')
args = parser.parse_args()
from QGL import *
q = QubitFactory(args.qubit)
if args.direction == 'X':
pPulse = Xtheta(q, amp=args.amplitude)
mPulse = X90m(q)
else:
pPulse = Ytheta(q, amp=args.amplitude)
mPulse = Y90m(q)
# Exponentially growing repetitions of the target pulse, e.g.
# (1, 2, 4, 8, 16, 32, 64, 128, ...) x X90
seqs = [[pPulse]*n for n in 2**np.arange(args.numPulses+1)]
# measure each along Z or X/Y
seqs = [s + m for s in seqs
|
for m in [ [MEAS(q)], [mPulse, MEAS(q)] ]]
# tack on calibrations to the beginning
seqs = [[Id(q), MEAS(q)], [X(q), MEAS(q)]] + seqs
# repeat each
repeated_seqs = [copy(s) for s in seqs for _ in range(2)]
fileNames = compile_to_hardware(repeated_seqs, fileName='RepeatCal/RepeatCal')
# plot_pulse_files(fileNames)
|
dhamaniasad/magnetor
|
magnetor.py
|
Python
|
unlicense
| 1,885
| 0.002122
|
import requests
import urllib2
import argparse
from bs4 import BeautifulSoup
def get_best_torrent(query):
query = urllib2.quote(query)
r = requests.get('http://kat.cr/usearch/{}/'.format(query))
soup = BeautifulSoup(r.content)
torrents = soup.find('table', class_='data').find_all(has_class_odd_or_even, limit=5)
for torrent in torrents:
name = torrent.find('a', class_='cellMainLink').text.encode('utf-8')
print "Name: {}".format(name)
size = torrent.find(class_='nobr center').text
print "Size: {}".format(size)
verified = bool(torrent.find('i', class_='ka ka-verify'))
if verified:
print "Verified Uploader: True"
else:
print "Verified: False"
seeds = torrent.find(class_='green center').text
print "Seeds: {}".format(seeds)
leeches = torrent.find(class_='red lasttd center').text
print "Leeches: {}".format(leeches)
try:
seed_to_leech = float(seeds) / float(leeches)
except ZeroDivisionError:
seed_to_leech = int(seeds)
print "Seed to leech ratio: {}".format(seed_to_leech)
magnet = torrent.find(class_='iaconbox').find('a', class_='imagnet')['href']
print "Magnet: \n{}\n".format(magnet)
def has_class_odd_or_even(tag):
if tag.has_attr('class'):
if 'odd' in tag.attrs['class'] or 'even' in tag.attrs['class']:
return True
return False
def command_li
|
ne_runner():
parser = argpars
|
e.ArgumentParser(description='Get magnet links for torrents from the CLI')
parser.add_argument('name', type=str, nargs='*', help='Name of the torrent you are looking for')
args = parser.parse_args()
if not args.name:
parser.print_help()
else:
get_best_torrent(' '.join(args.name))
if __name__ == '__main__':
command_line_runner()
|
joshk105/daw-translator
|
hindenburg.py
|
Python
|
mit
| 5,531
| 0.004701
|
from xml.dom import minidom
from object_classes import *
from helpers import timeToSeconds
class HindenburgInt(object):
def __init__(self, project_file, version="Hindenburg Journalist 1.26.1936", version_num="1.26.1936"):
self.projectFile = project_file
self.version = version
self.version_num = version_num
def get_session_name(self):
for i in self.projectFile.split("/"):
name = i
name = name.split(".")
return name[0]
def read(self):
projectXML = minidom.parse(self.projectFile)
projectObj = Session(self.get_session_name())
projectXML = projectXML.getElementsByTagName("Session")
project = projectXML[0]
projectObj.samplerate = project.getAttribute('Samplerate')
fileSourceInfo = project.getElementsByTagName("AudioPool")[0]
fileSourcePath = fileSourceInfo.getAttribute("Location") + "/" + fileSourceInfo.getAttribute("Path")
projectObj.audio_folder = fileSourceInfo.getAttribute('Path')
projectObj.folder_path = fileSourceInfo.getAttribute('Location')
audioFiles = project.getElementsByTagName("File")
for file in audioFiles:
projectObj.addFile(fileSourcePath + "/" + file.getAttribute("Name"), int(file.getAttribute('Id')))
markers = project.getElementsByTagName("Marker")
for marker in markers:
projectObj.addMarker(marker.getAttribute('Id'), marker.getAttribute('Name'), float(marker.getAttribute('Time')))
tracks = project.getElementsByTagName("Track")
for track in tracks:
current_track = projectObj.addTrack(track.getAttribute('Name'))
try:
current_track.pan = self.interpretPan(track.getAttribute('Pan'))
except:
current_track.pan = 0
try:
current_track.volume = track.getAttribute('Volume')
except:
current_track.volume = 0
try:
if track.getAttribute('Solo') == "1":
current_track.solo = True
except:
current_track.solo = False
try:
if track.getAttribute('Mute') == "1":
current_track.mute = False
except:
current_track.mute = False
try:
if track.getAttribute('Rec') == "1":
current_track.rec = True
except:
current_track.rec = False
trackItems = track.getElementsByTagName("Region")
for item in trackItems:
new_item = current_track.addItem(projectObj.getFileByID(int(item.getAttribute('Ref'))))
try:
start = float(item.getAttribute('Start'))
except:
start = 0
new_item.startTime = start
try:
startAt = float(item.getAttribute('Offset'))
except:
startAt = 0
new_item.startAt = startAt
length = timeToSeconds(item.getAttribute('Length'))
new_item.length = length
try:
gain = float(item.getAttribute('Gain'))
except:
gain = 0
new_item.gain = gain
new_item.name = item.getAttribute('Name')
fades = item.getElementsByTagName('Fade')
if fades:
autoEnv = current_track.getEnvelope('Volume')
if autoEnv == "Envelope Not Found":
autoEnv = current_track.addEnvelope('Volume')
firstFade = True
for fade in fades:
startTime = new_item.startTime + float(fade.getAttribute('Start'))
if firstFade:
startValue = new_item.gain
else:
startValue = autoEnv.points[-1].value
firstFade = False
endTime = startTime + float(fade.getAttribute('Length'))
try:
endValue = float(fa
|
de.getAttribute('Gain'))
except:
endValue = 0
autoEnv.addPoint(startTime, startValue)
autoEnv.addPoint(endTime, endValue)
plugins = track.getElementsByTagName("Plugin")
for plugin in plugins:
if plugin.getAttribute('Name') == 'Compressor':
pluginType = "Native"
else:
|
pluginType = "Plugin"
new_plugin = current_track.addFX(plugin.getAttribute('Name'), pluginType, int(plugin.getAttribute('Id')))
if pluginType == "Native":
if plugin.getAttribute('Name') == 'Compressor':
new_plugin.addProperty('UID', plugin.getAttribute('UID'))
new_plugin.addProperty('Comp', plugin.getAttribute('Comp'))
return projectObj
#Notes: Need to develop the section that reads the plugins...include support for external plugins, and the native EQ plugin
def write(self, destinationFile):
print('This function still needs to be written')
def interpretPan(self, amount):
num = -float(amount)
num = num*90
return num
|
yejingxin/kaggle-ndsb
|
configurations/featharalick_sharding_blend_pl_blend4_convroll4_doublescale_fs5_no_dropout_33_66.py
|
Python
|
mit
| 2,560
| 0.007813
|
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
features = [
# "hu",
# "tutorial",
"haralick",
# "aaronmoments",
# "lbp",
# "pftas",
# "zernike_moments",
# "image_size",
]
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/"
train_pred_file = sdir+""
valid_pred_file = sdir+""
test_pred_file = sdir+"test--sharding_blend_pl_blend4_convroll4_doublescale_fs5_no_dropout_33_66.npy"
data_loader = load.PredictionsW
|
ithFeaturesDataLoader(
features = features,
train_pred_file=train_pred_file,
valid_pred_file=valid_pred_file,
test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train,
chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model
|
():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 52))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
reg_param = 0.0002
alpha = 0. # 0 -> L2 1-> L1
print "regu", reg_param, alpha
# lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
# reg_term = sum(T.sum(p**2) for p in params)
L2 = sum(T.sum(p**2) for p in params)
L1 = sum(T.sum(T.abs_(p)) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + reg_param*(alpha * L1 + (1-alpha) * L2)
return nn.objectives.Objective(l_out, loss_function=loss)
|
nwjs/chromium.src
|
third_party/blink/tools/blinkpy/web_tests/stale_expectation_removal/queries.py
|
Python
|
bsd-3-clause
| 7,303
| 0.000685
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Web test-specific impl of the unexpected passes' queries module."""
import os
import posixpath
from blinkpy.web_tests.stale_expectation_removal import constants
from unexpected_passes_common import queries as queries_module
# The target number of results/rows per query when running in large query mode.
# Higher values = longer individual query times and higher chances of running
# out of memory in BigQuery. Lower values = more parallelization overhead and
# more issues with rate limit errors.
TARGET_RESULTS_PER_QUERY = 20000
# This query gets us all results for tests that have had results with a
# Failure, Timeout, or Crash expectation in the past |@num_samples| builds on
# |@builder_name|. Whether these are CI or try results depends on whether
# |builder_type| is "ci" or "try".
BQ_QUERY_TEMPLATE = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM `chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
WHERE
exported.realm = "chromium:{builder_type}"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT @num_builds
),
results AS (
SELECT
exported.id,
test_id,
status,
duration,
(
SELECT value
FROM tr.tags
WHERE key = "step_name") as step_name,
(
SELECT value
FROM tr.tags
WHERE key = "web_tests_base_timeout") as timeout,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "web_tests_used_expectations_file") as expectation_files
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
{test_filter_clause}
)
SELECT *
FROM results
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
# Very similar to above, but used to get the names of tests that are of
# interest for use as a filter.
TEST_FILTER_QUERY_TEMPLATE = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
WHERE
exported.realm = "chromium:{builder_type}"
AND STRUCT("build
|
er", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT 50
),
results AS (
SELECT
exported.id,
test_id,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations
FROM
`chrome-luci-data.chromium.blink_web
|
_tests_{builder_type}_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
)
SELECT DISTINCT r.test_id
FROM results r
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
ACTIVE_BUILDER_QUERY_TEMPLATE = """\
WITH
builders AS (
SELECT
(
SELECT value
FROM tr.variant
WHERE key = "builder") as builder_name
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
)
SELECT DISTINCT builder_name
FROM builders
"""
KNOWN_TEST_ID_PREFIXES = [
'ninja://:blink_web_tests/',
'ninja://:webgpu_blink_web_tests',
]
# The default timeout of most web tests is 6 seconds, so use that if we happen
# to get a result that doesn't report its own timeout.
DEFAULT_TIMEOUT = 6
class WebTestBigQueryQuerier(queries_module.BigQueryQuerier):
def _ConvertJsonResultToResultObject(self, json_result):
result = super(WebTestBigQueryQuerier,
self)._ConvertJsonResultToResultObject(json_result)
result.SetDuration(json_result['duration'], json_result['timeout']
or DEFAULT_TIMEOUT)
return result
def _GetRelevantExpectationFilesForQueryResult(self, query_result):
# Files in the query are either relative to the web tests directory or
# are an absolute path. The paths are always POSIX-style. We don't
# handle absolute paths since those typically point to temporary files
# which will not exist locally.
filepaths = []
for f in query_result.get('expectation_files', []):
if posixpath.isabs(f):
continue
f = f.replace('/', os.sep)
f = os.path.join(constants.WEB_TEST_ROOT_DIR, f)
filepaths.append(f)
return filepaths
def _ShouldSkipOverResult(self, result):
# WebGPU web tests are currently unsupported for various reasons.
return 'webgpu/cts.html' in result['test_id']
def _GetQueryGeneratorForBuilder(self, builder, builder_type):
# Look for all tests.
if not self._large_query_mode:
return WebTestFixedQueryGenerator(builder_type, '')
query = TEST_FILTER_QUERY_TEMPLATE.format(builder_type=builder_type)
query_results = self._RunBigQueryCommandsForJsonOutput(
query, {'': {
'builder_name': builder
}})
test_ids = ['"%s"' % r['test_id'] for r in query_results]
if not test_ids:
return None
# Only consider specific test cases that were found to have active
# expectations in the above query. Also perform any initial query
# splitting.
target_num_ids = TARGET_RESULTS_PER_QUERY / self._num_samples
return WebTestSplitQueryGenerator(builder_type, test_ids,
target_num_ids)
def _StripPrefixFromTestId(self, test_id):
# Web test IDs provided by ResultDB are the test name known by the test
# runner prefixed by one of the following:
# "ninja://:blink_web_tests/"
# "ninja://:webgpu_blink_web_tests/"
for prefix in KNOWN_TEST_ID_PREFIXES:
if test_id.startswith(prefix):
return test_id.replace(prefix, '')
raise RuntimeError('Unable to strip prefix from test ID %s' % test_id)
def _GetActiveBuilderQuery(self, builder_type):
return ACTIVE_BUILDER_QUERY_TEMPLATE.format(builder_type=builder_type)
class WebTestFixedQueryGenerator(queries_module.FixedQueryGenerator):
def GetQueries(self):
return QueryGeneratorImpl(self.GetClauses(), self._builder_type)
class WebTestSplitQueryGenerator(queries_module.SplitQueryGenerator):
def GetQueries(self):
return QueryGeneratorImpl(self.GetClauses(), self._builder_type)
def QueryGeneratorImpl(test_filter_clauses, builder_type):
queries = []
for tfc in test_filter_clauses:
queries.append(
BQ_QUERY_TEMPLATE.format(builder_type=builder_type,
test_filter_clause=tfc))
return queries
|
airbnb/streamalert
|
streamalert/scheduled_queries/main.py
|
Python
|
apache-2.0
| 760
| 0
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file
|
except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file is the entry poin
|
t for AWS Lambda.
"""
from streamalert.scheduled_queries.command.application import ScheduledQueries
def handler(event, _):
return ScheduledQueries().run(event)
|
e-mission/e-mission-server
|
emission/core/wrapper/filter_modules.py
|
Python
|
bsd-3-clause
| 7,166
| 0.016467
|
""" Query modules mapping functions to their query strings
structured:
module_name { query_string: function_for_query }
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
import sys
import os
import math
import datetime
import logging
# logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
import random
from uuid import UUID
# Our imports
from emission.core.get_database import get_section_db, get_trip_db, get_routeCluster_db, get_alternatives_db
from . import trip_old as trip
# 0763de67-f61e-3f5d-90e7-518e69793954
# 0763de67-f61e-3f5d-90e7-518e69793954_20150421T230304-0700_0
# helper for getCanonicalTrips
def get_clusters_info(uid):
c_db = get_routeCluster_db()
s_db = get_section_db()
clusterJson = c_db.find_one({"clusters":{"$exists":True}, "user": uid})
if clusterJson is None:
return []
c_info = []
clusterSectionLists= list(clusterJson["clusters"].values())
logging.debug( "Number of section lists for user %s is %s" % (uid, len(clusterSectionLists)))
for sectionList in clusterSectionLists:
first = True
logging.debug( "Number of sections in sectionList for user %s is %s" % (uid, len(sectionList)))
if (len(sectionList) == 0):
# There's no point in returning this cluster, let's move on
continue
distributionArrays = [[] for _ in range(5)]
for section in sectionList:
section_json = s_db.find_one({"_id":section})
if first:
representative_trip = section_json
first = False
appendIfPresent(distributionArrays[0], section_json, "section_start_datetime")
appendIfPresent(distributionArrays[1], section_json, "section_end_datetime")
appendIfPresent(distributionArrays[2], section_json, "section_start_point")
appendIfPresent(distributionArrays[3], section_json, "section_end_point")
appendIfPresent(distributionArrays[4], section_json, "confirmed_mode")
c_info.append((distributionArrays, representative_trip))
return c_info
def appendIfPresent(list,element,key):
if element is not None and key in element:
list.append(element[key])
else:
logging.debug("not appending element %s with key %s" % (element, key))
class AlternativesNotFound(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#returns the top trips for the user, defaulting to the top 10 trips
def getCanonicalTrips(uid, get_representative=False): # number returned isnt used
"""
uid is a UUID object, not a string
"""
# canonical_trip_list = []
# x = 0
# if route clusters return nothing, then get common routes for user
#clusters = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
# c = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
logging.debug('UUID for canonical %s' % uid)
info = get_clusters_info(uid)
cluster_json_list = []
for (cluster, rt) in info:
json_dict = dict()
json_dict["representative_trip"] = rt
json_dict["start_point_distr"] = cluster[2]
json_dict["end_point_distr"] = cluster[3]
json_dict["start_time_distr"] = cluster[0]
json_dict["end_time_distr"] = cluster[1]
json_dict["confirmed_mode_list"] = cluster[4]
cluster_json_list.append(json_dict)
toRet = cluster_json_list
return toRet.__iter__()
#returns all trips to the user
def getAllTrips(uid):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getAllTrips_Date(uid, dys):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
return get_trip_db().find(query)
#returns all trips with no alternatives to the user
def getNoAlternatives(uid):
# If pipelineFlags exists then we have started alternatives, and so have
# already scheduled the query. No need to reschedule unless the query fails.
# TODO: If the query fails, then remove the pipelineFlags so that we will
# reschedule.
query = {'user_id':uid, 'type':'move', 'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
def getNoAlternativesPastMonth(uid):
d = datetime.datetime.now() - datetime.timedelta(days=30)
query = {'user_id':uid, 'type':'move',
'trip_start_datetime':{"$gt":d},
'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
# Returns the trips that are suitable for training
# Currently this is:
# - trips that have alternatives, and
# - have not yet been included in a training set
def getTrainingTrips(uid):
return getTrainingTrips_Date(uid, 30)
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getTrainingTrips_Date(uid, dys):
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','t
|
rip_start_datetime':{"$gt":d}, "pipelineFlags":{"$exists":True}}
#query = {'user_id':uid, '
|
type':'move','trip_start_datetime':{"$gt":d}}
#print get_trip_db().count_documents(query)
return get_trip_db().find(query)
def getAlternativeTrips(trip_id):
#TODO: clean up datetime, and queries here
#d = datetime.datetime.now() - datetime.timedelta(days=6)
#query = {'trip_id':trip_id, 'trip_start_datetime':{"$gt":d}}
query = {'trip_id':trip_id}
alternatives = get_alternatives_db().find(query)
if alternatives.estimated_document_count() > 0:
logging.debug("Number of alternatives for trip %s is %d" % (trip_id, alternatives.estimated_document_count()))
return alternatives
raise AlternativesNotFound("No Alternatives Found")
def getRecentTrips(uid):
raise NotImplementedError()
def getTripsThroughMode(uid):
raise NotImplementedError()
modules = {
# Trip Module
'trips': {
'get_canonical': getCanonicalTrips,
'get_all': getAllTrips,
'get_no_alternatives': getNoAlternatives,
'get_no_alternatives_past_month': getNoAlternativesPastMonth,
'get_most_recent': getRecentTrips,
'get_trips_by_mode': getTripsThroughMode},
# Utility Module
'utility': {
'get_training': getTrainingTrips
},
# Recommender Module
'recommender': {
'get_improve': getCanonicalTrips
},
#Perturbation Module
'perturbation': {},
#Alternatives Module
# note: uses a different collection than section_db
'alternatives': {
'get_alternatives': getAlternativeTrips
}
}
|
fritzfrancisco/flumeview
|
FlumeView1.2.py
|
Python
|
apache-2.0
| 5,412
| 0.035292
|
import argparse
import datetime
import imutils
import numpy as np
import time
import csv
import cv2
import os.path
#define variable
click_frame = False
divide_x = 0
divide_y = 0
channel_A = 0
channel_B = 0
area_A = 0
area_B = 0
#division fuction (divide_frame)
def divide_frame(event,x,y,flags,param):
global click_frame
global divide_x,divide_y
global shape
if click_frame == False and event == cv2.EVENT_LBUTTONDOWN:
click_frame = True
divide_x = x
divide_y = y
print("First frame selected")
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=100, help="minimum area size")
#ap.add_argument("-s","--shape",type=str,default="rectangle",help="shape of test arena")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
else:
camera = cv2.VideoCapture(args.get("video", None))
fps = camera.get(cv2.cv.CV_CAP_PROP_FPS)
frame_count = 0
firstFrame = None
#Creating window and initializing mouse callback for division
cv2.namedWindow("Security Feed")
cv2.setMouseCallback("Security Feed",divide_frame)
# After selecting firstFrame no tracking should occur for 5s
#def relay(event,flags,param)
# while (frame_count/fps) < 5:
# break
while True:
# grab the current frame and initialize the occupied/unoccupied"rectangle"
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
cv2.imshow("Security Feed", frame)
while click_frame == False:
print("Selected Image")
cv2.waitKey(25)
continue
frame_count += 1
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRec
|
t(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
fish_x = x+w/2
fish_y = y+h/2
if fish_x < divide_x and fish_y < divide_y:
channel_A += 1
if fish_x > divide_x and fish_y < divide_y:
area_A += 1
if fish_x < divide_x and fish_y > divide_y:
channel_B += 1
if fish_x > divide_x and fish_y > divide_y:
area_B += 1
#division lines
#tags
fontsize = 1
thickness = 1
cv2.putText(frame,"{0:.2f}".for
|
mat(fps)+" fps",(25,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,255)
cv2.putText(frame,"{0:.2f}".format(channel_A/fps),(divide_x-width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(channel_B/fps),(divide_x-width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(area_A/fps),(divide_x+width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(area_B/fps),(divide_x+width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(frame_count/fps)+" time (s)",(divide_x+width/4,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0))
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
# cv2.imshow("Thresh", thresh)
# cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
#print data
print("Total Time [s]: "+"{0:.2f}".format(frame_count/fps))
print("Channel_A [s]: "+"{0:.2f}".format(channel_A/fps))
print("Channel_B [s]: "+"{0:.2f}".format(channel_B/fps))
print("Area_A [s]: "+"{0:.2f}".format(area_A/fps))
print("Area_B [s]: "+"{0:.2f}".format(area_B/fps))
# Print data to file (data.csv)
# Write file and header if file does not already exist
# If file exists data is inserted in a new row and no header is added
# lineterminator = '\n' to remove blank line between rows when program is restarted
file_exists=os.path.isfile("data.csv")
with open('data.csv','a') as csvfile:
dw=csv.DictWriter(csvfile,delimiter=',',fieldnames=["File","Total Time","Channel_A","Channel_B","Area_A","Area_B"],lineterminator='\n')
writer=csv.writer(csvfile)
if file_exists == True:
writer.writerow([args.get("video"),frame_count/fps,channel_A/fps,channel_B/fps,area_A/fps,area_B/fps])
else:
dw.writeheader()
writer.writerow([args.get("video"),frame_count/fps,channel_A/fps,channel_B/fps,area_A/fps,area_B/fps])
|
weblabdeusto/weblabdeusto
|
server/src/experiments/vm/user_manager/manager.py
|
Python
|
bsd-2-clause
| 2,115
| 0.008511
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Luis Rodriguez <luis.rodriguez@opendeusto.
|
es>
#
class ConfigureError(Exception):
""" Configure error of any kind. """
pass
class PermanentConfigureError(ConfigureError):
""" Configure error that would most likely occur again should we retry """
def __str__(self):
return "PermanentConfigureError()"
|
class TemporaryConfigureError(ConfigureError):
""" Configure error that is likely to not be permanent. Server will retry whenever this is received. """
def __str__(self):
return "TemporaryConfigureError()"
class UserManager(object):
def __init__(self, cfg_manager):
"""
Creates the UserManager.
@param cfg_manager Config Manager which will be used to read configuration parameters
"""
self.cfg = cfg_manager
self.cancelled = False
def configure(self, sid):
"""
Configures the Virtual Machine for use.
@note This method may block for a long time. It might hence be advisable to account for this delay
and to call it from a worker thread.
@note Implementations might require additional information, which should generally be provided
through the configuration script and accessed through the UserManager's config reader.
@param sid Unique session id of the user.
@return None
@raise ConfigureError If the configure attempt failed. Failure and the ConfigureError should be either
a PermanentConfigureError or a TemporaryConfigureError. Should a different kind of exception be
raised however, it would be considered permanent.
"""
pass
def cancel(self):
self.cancelled = True
|
davidhrbac/spacewalk
|
client/rhel/rhn-client-tools/src/up2date_client/rhnserver.py
|
Python
|
gpl-2.0
| 8,608
| 0.001859
|
# rhn-client-tools
#
# Copyright (c) 2006--2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
import rpcServer
import up2dateErrors
import capabilities
import sys
import xmlrpclib
import OpenSSL
class _DoCallWrapper(object):
"""
A callable object that will handle multiple levels of attributes,
and catch exceptions.
"""
def __init__(self, server, method_name):
self._server = server
self._method_name = method_name
def __getattr__(self, method_name):
""" Recursively build up the method name to pass to the server. """
return _DoCallWrapper(self._server,
"%s.%s" % (self._method_name, method_name))
def __call__(self, *args, **kwargs):
""" Call the method. Catch faults and translate them. """
method = getattr(self._server, se
|
lf._method_name)
|
try:
return rpcServer.doCall(method, *args, **kwargs)
except xmlrpclib.Fault:
raise (self.__exception_from_fault(sys.exc_info()[1]), None, sys.exc_info()[2])
except OpenSSL.SSL.Error:
# TODO This should probably be moved to rhnlib and raise an
# exception that subclasses OpenSSL.SSL.Error
# TODO Is there a better way to detect cert failures?
error = str(sys.exc_info()[1])
error = error.strip("[()]")
pieces = error.split(',')
message = ""
if len(pieces) > 2:
message = pieces[2]
elif len(pieces) == 2:
message = pieces[1]
message = message.strip(" '")
if message == 'certificate verify failed':
raise (up2dateErrors.SSLCertificateVerifyFailedError(), None, sys.exc_info()[2])
else:
raise (up2dateErrors.NetworkError(message), None, sys.exc_info()[2])
def __exception_from_fault(self, fault):
if fault.faultCode == -3:
# This username is already taken, or the password is incorrect.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -2:
# Invalid username and password combination.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -110:
# Account is disabled
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -1:
exception = up2dateErrors.UnknownMethodException(fault.faultString)
elif fault.faultCode == -13:
# Username is too short.
exception = up2dateErrors.LoginMinLengthError(fault.faultString)
elif fault.faultCode == -14:
# too short password
exception = up2dateErrors.PasswordMinLengthError(
fault.faultString)
elif fault.faultCode == -15:
# bad chars in username
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -16:
# Invalid product registration code.
# TODO Should this really be a validation error?
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -19:
# invalid
exception = up2dateErrors.NoBaseChannelError(fault.faultString)
elif fault.faultCode == -31:
# No entitlement
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -36:
# rhnException.py says this means "Invalid action."
# TODO find out which is right
exception = up2dateErrors.PasswordError(fault.faultString)
elif abs(fault.faultCode) == 49:
exception = up2dateErrors.AbuseError(fault.faultString)
elif abs(fault.faultCode) == 60:
exception = up2dateErrors.AuthenticationTicketError(fault.faultString)
elif abs(fault.faultCode) == 74:
exception = up2dateErrors.RegistrationDeniedError()
elif abs(fault.faultCode) == 105:
exception = up2dateErrors.RhnUuidUniquenessError(fault.faultString)
elif fault.faultCode == 99:
exception = up2dateErrors.DelayError(fault.faultString)
elif abs(fault.faultCode) == 91:
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -106:
# Invalid username.
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -600:
# Invalid username.
exception = up2dateErrors.InvalidRegistrationNumberError(fault.faultString)
elif fault.faultCode == -601:
# No entitlements associated with given hardware info
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -602:
# No entitlements associated with reg num
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -2001 or fault.faultCode == -700:
exception = up2dateErrors.AuthenticationOrAccountCreationError(
fault.faultString)
elif fault.faultCode == -701:
exception = up2dateErrors.PasswordMaxLengthError(
fault.faultString)
elif fault.faultCode == -61:
exception = up2dateErrors.ActivationKeyUsageLimitError(
fault.faultString)
elif fault.faultCode == -5:
exception = up2dateErrors.UnableToCreateUser(
fault.faultString)
else:
exception = up2dateErrors.CommunicationError(fault.faultString)
return exception
class RhnServer(object):
"""
An rpc server object that calls doCall for you, and catches lower
level exceptions
"""
def __init__(self, serverOverride=None, timeout=None):
self._server = rpcServer.getServer(serverOverride=serverOverride,
timeout=timeout)
self._capabilities = None
def __get_capabilities(self):
if self._capabilities is None:
headers = self._server.get_response_headers()
if headers is None:
self.registration.welcome_message()
headers = self._server.get_response_headers()
self.
|
jtauber/sgf
|
setup.py
|
Python
|
mit
| 552
| 0
|
from setuptools import setup
setup(
name="sgf",
version="0.5",
description="Python library for reading and writing Smart Game Format",
license="MIT",
url="http://github.com/jtauber/sgf",
author="Jam
|
es Tauber",
author_email="jtauber@jtauber.com",
py_modules=["sgf"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Game
|
s/Entertainment :: Board Games",
"Topic :: Utilities",
],
)
|
ruibarreira/linuxtrail
|
usr/lib/virtualbox/vboxshell.py
|
Python
|
gpl-3.0
| 120,819
| 0.00591
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# $Id: vboxshell.py $
"""
VirtualBox Python Shell.
This program is a simple interactive shell for VirtualBox. You can query
information and issue comma
|
nds from a simple command line.
It also provides you with examples on how to use VirtualBox's Python API.
This shell is even somewhat documented, supports TAB-completion and
history if you have Python readline installed.
Finally, shell allows arbitrary custom extensions, just create
.VirtualBox/shexts/ and drop your extensions the
|
re.
Enjoy.
P.S. Our apologies for the code quality.
"""
__copyright__ = \
"""
Copyright (C) 2009-2013 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision: 92173 $"
import os, sys
import traceback
import shlex
import time
import re
import platform
from optparse import OptionParser
from pprint import pprint
#
# Global Variables
#
g_fBatchMode = False
g_sScriptFile = None
g_sCmd = None
g_fHasReadline = True
try:
import readline
import rlcompleter
except ImportError:
g_fHasReadline = False
g_sPrompt = "vbox> "
g_fHasColors = True
g_dTermColors = {
'red': '\033[31m',
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'magenta': '\033[35m',
'cyan': '\033[36m'
}
def colored(strg, color):
"""
Translates a string to one including coloring settings, if enabled.
"""
if not g_fHasColors:
return strg
col = g_dTermColors.get(color, None)
if col:
return col+str(strg)+'\033[0m'
return strg
if g_fHasReadline:
class CompleterNG(rlcompleter.Completer):
def __init__(self, dic, ctx):
self.ctx = ctx
rlcompleter.Completer.__init__(self, dic)
def complete(self, text, state):
"""
taken from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496812
"""
if False and text == "":
return ['\t', None][state]
else:
return rlcompleter.Completer.complete(self, text, state)
def canBePath(self, _phrase, word):
return word.startswith('/')
def canBeCommand(self, phrase, _word):
spaceIdx = phrase.find(" ")
begIdx = readline.get_begidx()
firstWord = (spaceIdx == -1 or begIdx < spaceIdx)
if firstWord:
return True
if phrase.startswith('help'):
return True
return False
def canBeMachine(self, phrase, word):
return not self.canBePath(phrase, word) and not self.canBeCommand(phrase, word)
def global_matches(self, text):
"""
Compute matches when text is a simple name.
Return a list of all names currently defined
in self.namespace that match.
"""
matches = []
phrase = readline.get_line_buffer()
try:
if self.canBePath(phrase, text):
(directory, rest) = os.path.split(text)
c = len(rest)
for word in os.listdir(directory):
if c == 0 or word[:c] == rest:
matches.append(os.path.join(directory, word))
if self.canBeCommand(phrase, text):
c = len(text)
for lst in [ self.namespace ]:
for word in lst:
if word[:c] == text:
matches.append(word)
if self.canBeMachine(phrase, text):
c = len(text)
for mach in getMachines(self.ctx, False, True):
# although it has autoconversion, we need to cast
# explicitly for subscripts to work
word = re.sub("(?<!\\\\) ", "\\ ", str(mach.name))
if word[:c] == text:
matches.append(word)
word = str(mach.id)
if word[:c] == text:
matches.append(word)
except Exception, e:
printErr(self.ctx, e)
if g_fVerbose:
traceback.print_exc()
return matches
def autoCompletion(cmds, ctx):
if not g_fHasReadline:
return
comps = {}
for (key, _value) in cmds.items():
comps[key] = None
completer = CompleterNG(comps, ctx)
readline.set_completer(completer.complete)
delims = readline.get_completer_delims()
readline.set_completer_delims(re.sub("[\\./-]", "", delims)) # remove some of the delimiters
readline.parse_and_bind("set editing-mode emacs")
# OSX need it
if platform.system() == 'Darwin':
# see http://www.certif.com/spec_help/readline.html
readline.parse_and_bind ("bind ^I rl_complete")
readline.parse_and_bind ("bind ^W ed-delete-prev-word")
# Doesn't work well
# readline.parse_and_bind ("bind ^R em-inc-search-prev")
readline.parse_and_bind("tab: complete")
g_fVerbose = False
def split_no_quotes(s):
return shlex.split(s)
def progressBar(ctx, progress, wait=1000):
try:
while not progress.completed:
print "%s %%\r" % (colored(str(progress.percent), 'red')),
sys.stdout.flush()
progress.waitForCompletion(wait)
ctx['global'].waitForEvents(0)
if int(progress.resultCode) != 0:
reportError(ctx, progress)
return 1
except KeyboardInterrupt:
print "Interrupted."
ctx['interrupt'] = True
if progress.cancelable:
print "Canceling task..."
progress.cancel()
return 0
def printErr(_ctx, e):
oVBoxMgr = _ctx['global'];
if oVBoxMgr.errIsOurXcptKind(e):
print colored('%s: %s' % (oVBoxMgr.xcptToString(e), oVBoxMgr.xcptGetMessage(e)), 'red');
else:
print colored(str(e), 'red')
def reportError(_ctx, progress):
errorinfo = progress.errorInfo
if errorinfo:
print colored("Error in module '%s': %s" % (errorinfo.component, errorinfo.text), 'red')
def colCat(_ctx, strg):
return colored(strg, 'magenta')
def colVm(_ctx, vmname):
return colored(vmname, 'blue')
def colPath(_ctx, path):
return colored(path, 'green')
def colSize(_ctx, byte):
return colored(byte, 'red')
def colPci(_ctx, pcidev):
return colored(pcidev, 'green')
def colDev(_ctx, pcidev):
return colored(pcidev, 'cyan')
def colSizeM(_ctx, mbyte):
return colored(str(mbyte)+'M', 'red')
def createVm(ctx, name, kind):
vbox = ctx['vb']
mach = vbox.createMachine("", name, [], kind, "")
mach.saveSettings()
print "created machine with UUID", mach.id
vbox.registerMachine(mach)
# update cache
getMachines(ctx, True)
def removeVm(ctx, mach):
uuid = mach.id
print "removing machine ", mach.name, "with UUID", uuid
cmdClosedVm(ctx, mach, detachVmDevice, ["ALL"])
mach = mach.unregister(ctx['global'].constants.CleanupMode_Full)
if mach:
mach.deleteSettings()
# update cache
getMachines(ctx, True)
def startVm(ctx, mach, vmtype):
vbox = ctx['vb']
perf = ctx['perf']
session = ctx['global'].getSessionObject(vbox)
progress = mach.launchVMProcess(session, vmtype, "")
if progressBar(ctx, progress, 100) and int(progress.resultCode) == 0:
# we ignore exceptions to allow starting VM even if
# perf collector cannot be started
if perf:
try:
perf.setup(['*'], [mach], 10, 15)
|
great-expectations/great_expectations
|
tests/build_index_page.py
|
Python
|
apache-2.0
| 628
| 0
|
import glob
json_files = glob.glob("tests/**/output/**/*.json", recursive=True)
html_files = glob.glob("tests/**/output/**/*.html", recursive=True)
html_list = ""
for f_ in html_files:
html_list += '\t<li><a href="{}">{}</li>
|
\n'.format(
f_[6:],
f_.split(".")[-2],
)
json_list = ""
for f_ in json_files:
json_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
html_file = """
<html>
<body>
<h3>HTML</h3>
<ul>
{}
</ul>
|
<br/><br/>
<h3>JSON</h3>
<ul>
{}
</ul>
</body>
</html>
""".format(
html_list, json_list
)
print(html_file)
|
shriyanka/daemo-forum
|
spirit/comment/serializers.py
|
Python
|
mit
| 715
| 0.051748
|
from models import Comment
from ..user.serializers import UserProfileSerializer
from rest_framework import serializers
class CommentSerializer(serializers.ModelSerializer):
username = serializers.SerializerMethodField()
class Meta:
model = Comment
fields = ("id","user","username", "topic","comment","comment_html", "action", "date","is_removed","is_modified","ip_address",
"modified_count","likes_count")
read_on
|
ly_fi
|
elds = ("user","comment_html","action","date","is_removed","is_modified","modified_count","likes_count")
def get_username(self,obj):
return obj.user.username
def create(self,**kwargs):
comment = Comment.objects.create(user = kwargs['user'],**self.validated_data)
return comment
|
blueman-project/blueman
|
test/main/test_imports.py
|
Python
|
gpl-3.0
| 1,075
| 0.002791
|
import os.path
import pkgutil
from unittest import TestCase, TestSuite
class TestImports(TestCase):
def __init__(self, mod_name, import_error):
name = f"test_{mod_name.replace('.', '_')}_import"
def run():
try:
__import__(mod_name)
except ImportError as e:
self.assertIsNotNone(import_error)
self.assertEqual(e.msg, import_error)
setattr(self, name, run)
super().__init__(name)
def load_tests(*_args):
expected_exceptions = {
"blueman.main.NetworkManager": "NM python
|
bindings not found.",
"blueman.main.PulseAudioUtils": "Could not load pulseaudio shared library",
}
test_cases = TestSuite()
home, subpath = os.path.dirname(__file__).rsplit("/test/", 1)
for package in pkgutil.iter_modules([f"{home}/
|
blueman/{subpath}"], f"blueman.{subpath.replace('/', '.')}."):
test_cases.addTest(TestImports(package.name, expected_exceptions.get(package.name)))
assert test_cases.countTestCases() > 0
return test_cases
|
F5Networks/f5-common-python
|
f5/bigiq/cm/device/licensing/__init__.py
|
Python
|
apache-2.0
| 1,033
| 0
|
# coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribut
|
ed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
REST URI
``http://localhost/mgmt/cm/device/licensi
|
ng/pool/regkey``
REST Kind
N/A -- HTTP GET returns an error
"""
from f5.bigiq.cm.device.licensing.pool import Pool
from f5.bigiq.resource import OrganizingCollection
class Licensing(OrganizingCollection):
def __init__(self, device):
super(Licensing, self).__init__(device)
self._meta_data['allowed_lazy_attributes'] = [
Pool
]
|
unho/translate
|
translate/convert/flatxml2po.py
|
Python
|
gpl-2.0
| 3,817
| 0.000262
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 BhaaL
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert flat XML files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/flatxml2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import flatxml, po
class flatxml2po:
"""Convert a single XML file to a single PO file."""
SourceStoreClass = flatxml.FlatXMLFile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Initialize the converter."""
self.inputfile = inputfile
self.outputfile = outputfile
self.source_store = self.SourceStoreClass(inputfile,
|
root_name=root,
value_name=value,
key_name=key,
namespace=ns)
self.target_store = self.TargetStoreClass()
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass.buildfromunit(unit)
return target_unit
d
|
ef convert_store(self):
"""Convert a single source file to a target format file."""
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def run(self):
"""Run the converter."""
self.convert_store()
if self.target_store.isempty():
return 0
self.target_store.serialize(self.outputfile)
return 1
def run_converter(inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Wrapper around the converter."""
return flatxml2po(inputfile, outputfile, templatefile,
root, value, key, ns).run()
formats = {
"xml": ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats,
description=__doc__)
parser.add_option("-r", "--root", action="store", dest="root",
default="root",
help='name of the XML root element (default: "root")')
parser.add_option("-v", "--value", action="store", dest="value",
default="str",
help='name of the XML value element (default: "str")')
parser.add_option("-k", "--key", action="store", dest="key",
default="key",
help='name of the XML key attribute (default: "key")')
parser.add_option("-n", "--namespace", action="store", dest="ns",
default=None,
help="XML namespace uri (default: None)")
parser.passthrough.append("root")
parser.passthrough.append("value")
parser.passthrough.append("key")
parser.passthrough.append("ns")
parser.run(argv)
if __name__ == "__main__":
main()
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/345_MethodToRaiseTheDead/__init__.py
|
Python
|
gpl-3.0
| 5,090
| 0.0389
|
# Made by mtrix
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "345_MethodToRaiseTheDead"
ADENA = 57
VICTIMS_ARM_BONE = 4274
VICTIMS_THIGH_BONE = 4275
VICTIMS_SKULL = 4276
VICTIMS_RIB_BONE = 4277
VICTIMS_SPINE = 4278
USELESS_BONE_PIECES = 4280
POWDER_TO_SUMMON_DEAD_SOULS = 4281
BILL_OF_IASON_HEINE = 4310
CHANCE = 15
CHANCE2 = 50
class Quest (JQuest) :
|
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "1" :
st.set("cond","1")
st.setState(STARTED)
htmltext = "
|
30970-02.htm"
st.playSound("ItemSound.quest_accept")
elif event == "2" :
st.set("cond","2")
htmltext = "30970-06.htm"
elif event == "3" :
if st.getQuestItemsCount(ADENA)>=1000 :
st.takeItems(ADENA,1000)
st.giveItems(POWDER_TO_SUMMON_DEAD_SOULS,1)
st.set("cond","3")
htmltext = "30912-03.htm"
st.playSound("ItemSound.quest_itemget")
else :
htmltext = "<html><body>You dont have enough adena!</body></html>"
elif event == "4" :
htmltext = "30973-02.htm"
st.takeItems(POWDER_TO_SUMMON_DEAD_SOULS,-1)
st.takeItems(VICTIMS_ARM_BONE,-1)
st.takeItems(VICTIMS_THIGH_BONE,-1)
st.takeItems(VICTIMS_SKULL,-1)
st.takeItems(VICTIMS_RIB_BONE,-1)
st.takeItems(VICTIMS_SPINE,-1)
st.set("cond","6")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30970 and id != STARTED : return htmltext
level = player.getLevel()
cond = st.getInt("cond")
amount = st.getQuestItemsCount(USELESS_BONE_PIECES)
if npcId==30970 :
if id == CREATED :
if level>=35 :
htmltext = "30970-01.htm"
else :
htmltext = "<html><body>(This is a quest that can only be performed by players of level 35 and above.)</body></html>"
st.exitQuest(1)
elif cond==1 and st.getQuestItemsCount(VICTIMS_ARM_BONE) and st.getQuestItemsCount(VICTIMS_THIGH_BONE) and st.getQuestItemsCount(VICTIMS_SKULL) and st.getQuestItemsCount(VICTIMS_RIB_BONE) and st.getQuestItemsCount(VICTIMS_SPINE) :
htmltext = "30970-05.htm"
elif cond==1 and (st.getQuestItemsCount(VICTIMS_ARM_BONE)+st.getQuestItemsCount(VICTIMS_THIGH_BONE)+st.getQuestItemsCount(VICTIMS_SKULL)+st.getQuestItemsCount(VICTIMS_RIB_BONE)+st.getQuestItemsCount(VICTIMS_SPINE)<5) :
htmltext = "30970-04.htm"
elif cond==7 :
htmltext = "30970-07.htm"
st.set("cond","1")
st.giveItems(ADENA,amount*238)
st.giveItems(BILL_OF_IASON_HEINE,st.getRandom(7)+1)
st.takeItems(USELESS_BONE_PIECES,-1)
if npcId==30912 :
if cond == 2 :
htmltext = "30912-01.htm"
st.playSound("ItemSound.quest_middle")
elif cond == 3 :
htmltext = "<html><body>What did the urn say?</body></html>"
elif cond == 6 :
htmltext = "30912-04.htm"
st.set("cond","7")
if npcId==30973 :
if cond==3 :
htmltext = "30973-01.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
random = st.getRandom(100)
if random<=CHANCE :
if not st.getQuestItemsCount(VICTIMS_ARM_BONE) :
st.giveItems(VICTIMS_ARM_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_THIGH_BONE) :
st.giveItems(VICTIMS_THIGH_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_SKULL) :
st.giveItems(VICTIMS_SKULL,1)
elif not st.getQuestItemsCount(VICTIMS_RIB_BONE) :
st.giveItems(VICTIMS_RIB_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_SPINE) :
st.giveItems(VICTIMS_SPINE,1)
if random<=CHANCE2 :
st.giveItems(USELESS_BONE_PIECES,st.getRandom(8)+1)
return
QUEST = Quest(345,qn,"Method To Raise The Dead")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30970)
QUEST.addTalkId(30970)
QUEST.addTalkId(30912)
QUEST.addTalkId(30973)
STARTED.addQuestDrop(30970,VICTIMS_ARM_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_THIGH_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_SKULL,1)
STARTED.addQuestDrop(30970,VICTIMS_RIB_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_SPINE,1)
STARTED.addQuestDrop(30912,POWDER_TO_SUMMON_DEAD_SOULS,1)
QUEST.addKillId(20789)
QUEST.addKillId(20791)
|
City-of-Bloomington/green-rental
|
listing/models.py
|
Python
|
agpl-3.0
| 2,921
| 0.015063
|
from django.db import models
from django.contrib.auth.models import User
from building.models import Building, Unit
# Create your models here.
class Listing(models.Model):
"""
An option to lease, rent, or sublease a specific Unit
"""
CYCLE_CHOICES = (
('year', 'Year'),
('month', 'Month'),
('week', 'Week'),
('day', 'Day'),
)
#who is listing the unit:
#pe
|
rson = models.ForeignKey(Person)
|
#might be better to just use a User account
#this should be required (setting blank and null to assist with migrations)
user = models.ForeignKey(User, blank=True, null=True)
#even though the building is available by way of the Unit
#it may be easier to look at building
#especially when limiting search results on a map
#
#also, it may be better to schedule a nightly task to update/cache
#the number of listings that are available in a building
#otherwise that could be an expensive search
#
#this should be required (setting blank and null to assist with migrations)
building = models.ForeignKey(Building, related_name="listings", blank=True, null=True)
#the unit available
#unit = models.ForeignKey(Unit, related_name="listings", blank=True, null=True)
unit = models.ForeignKey(Unit, related_name="listings")
#sublease, standard?
lease_type = models.CharField(max_length=200, default="Standard")
lease_term = models.CharField(max_length=200, default="12 Months")
active = models.BooleanField(default=True)
#duplicating available_start and rent on unit with current listing
#that will make database lookups simpler
#but it will require coordination when adding a new listing.
#optional
available_start = models.DateTimeField()
#might be useful for subleases:
available_end = models.DateTimeField()
#these may be duplicated at the unit level:
#aka rent? (previously cost)
rent = models.FloatField()
rent_cycle = models.CharField(max_length=10, choices=CYCLE_CHOICES, default="month")
deposit = models.FloatField()
description = models.TextField()
#are pets allowed? if so what kind?
#pets = models.CharField(max_length=200)
#what utilities are included: (to help estimate total cost)
#
#this is set at the building level
#should be consistent within a building,
#and that makes things easier to read if it's not duplicated here:
#TODO:
#application (to apply for lease)
#link to a default one for manager if available
#otherwise allow one to be attached?
#application = models.ForeignKey(BuildingDocument)
#TODO:
#allow photos *(more than 1)* to be submitted for the listing
#but associate them with the unit
added = models.DateTimeField('date published', auto_now_add=True)
updated = models.DateTimeField('date updated', auto_now=True)
|
rabernat/satdatatools
|
satdatatools/aggregator.py
|
Python
|
mit
| 1,349
| 0.005189
|
import numpy as np
from scipy.io import netcdf_file
import bz2
import os
from fnmatch import fnmatch
from numba import jit
@jit
def binsum2D(data, i, j, Nx, Ny):
data_binned = np.zeros((Ny,Nx), dtype=data.dtype)
N = len(data)
for n in range(N):
data_binned[j[n],i[n]] += data[n]
return data_binned
class LatLonAggregator(object):
"""A class for aggregating L2 data into a gridded dataset."""
def __init__(self, dlon=1., dlat=1., lo
|
nlim=(-180,180), latlim=(-90,90)):
self.dlon = dlon
self.dlat = dlat
self.lonmin = lonlim[0]
self.lonmax = lonlim[1]
self.latmin = latlim[0]
self.latmax = latlim[1]
# define grids
self.lon = np.arange(self.lonmin, self.lonmax, dlon)
self.lat = np.arange(self.latmin, self.lat
|
max, dlat)
self.Nx, self.Ny = len(self.lon), len(self.lat)
self.lonc = self.lon + self.dlon/2
self.latc = self.lat + self.dlat/2
def binsum(self, data, lon, lat):
"""Bin the data into the lat-lon grid.
Returns gridded dataset."""
i = np.digitize(lon.ravel(), self.lon)
j = np.digitize(lat.ravel(), self.lat)
return binsum2D(data.ravel(), i, j, self.Nx, self.Ny)
def zeros(self, dtype=np.dtype('f4')):
return np.zeros((self.Ny, self.Nx), dtype=dtype)
|
bryanperris/winN64dev
|
mips64-elf/mips64-elf/lib/el/libstdc++.a-gdb.py
|
Python
|
gpl-2.0
| 2,328
| 0.006873
|
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/usr/mips64-elf/share/gcc-4.8.4/python'
libdir = '/usr/mips64-elf/mips64-elf/lib/el'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case
|
we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not
|
None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
ENCODE-DCC/encoded
|
src/encoded/tests/test_upgrade_atac_alignment_enrichment_quality_metric.py
|
Python
|
mit
| 467
| 0.002141
|
def test_upgrade_atac_alignment_enrichment_quality_metric_1_2(
upgrader, atac_alignment_enrichment_quality_metric_1
):
value = upgrader.upgrade(
'atac_alignment_enrichment_quali
|
ty_metric',
atac_alignment_enrichment_quality_metric_1,
current_version='1',
target_version='2',
)
assert value['schema_version'] =
|
= '2'
assert 'fri_blacklist' not in value
assert value['fri_exclusion_list'] == 0.0013046877081284722
|
epitron/youtube-dl
|
youtube_dl/extractor/pornovoisines.py
|
Python
|
unlicense
| 4,003
| 0.002001
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornovoisines\.com/videos/show/(?P<id>\d+)/(?P<display_id>[^/.]+)'
_TEST = {
'url': 'http://www.pornovoisines.com/videos/show/919/recherche-appartement.html',
'md5': '6f8aca6a058592ab49fe701c8ba8317b',
'info_dict': {
'id': '919',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:fe10cb92ae2dd3ed94bb4080d11ff493',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutante', 'Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
'subtitles': {
'fr': [{
'ext': 'vtt',
}]
},
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
settings_url = self._download_json(
'http://www.pornovoisines.com/api/video/%s/getsettingsurl/' % video_id,
video_id, note='Getting settings URL')['video_settings_url']
settings = self._download_json(settings_url, video_id)['data']
formats = []
for kind, data in settings['variants'].items():
if kind == 'HLS':
formats.extend(self._extract_m3u8_formats(
data, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
elif kind == 'MP4':
for item in data:
formats.append({
'url': item['url'],
'height': item.get('height'),
'bitrate': item.get('bitrate'),
})
self._sort_formats(formats)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
# The webpage has a bug - there's no space between "thumb" and src=
thumbnail = self._html_search_regex(
r'<img[^>]+class=([\'"])thumb\1[^>]*src=([\'"])(?P<url>[^"]+)\2',
webpage, 'thumbnail', fatal=False, group='url')
upload_date = unified_strdate(self._search_regex(
r'Le\s*<b>([\d/]+)', webpage, 'upload date', fatal=False))
duration = settings.get('main', {}).get('duration')
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
|
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_regex(
r'(?s)Catégories\s*:\s*<b>(.+?)</b>', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for catego
|
ry in categories.split(',')]
subtitles = {'fr': [{
'url': subtitle,
} for subtitle in settings.get('main', {}).get('vtt_tracks', {}).values()]}
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
'subtitles': subtitles,
}
|
Eficent/purchase-workflow
|
procurement_purchase_no_grouping/models/purchase_order.py
|
Python
|
agpl-3.0
| 1,521
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015 AvanzOsc (http://www.avanzosc.es)
# Copyright 2015-2016 - Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import api, models
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
make_po_conditions = {
'partner_id', 'state', 'picking_type_id', 'company_id',
|
'dest_address_id',
}
# Restrict the empty return for these conditions
if (self.env.cont
|
ext and
self.env.context.get('grouping', 'standard') == 'order' and
make_po_conditions.issubset(set(x[0] for x in args))):
return self.browse()
return super(PurchaseOrder, self).search(
args, offset=offset, limit=limit, order=order, count=count)
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
# Restrict the empty return for these conditions
if (self.env.context and
self.env.context.get('grouping', 'standard') == 'line' and
len(args) == 1 and args[0][0] == 'order_id' and
args[0][1] == 'in'):
return self.browse()
return super(PurchaseOrderLine, self).search(
args, offset=offset, limit=limit, order=order, count=count)
|
dbrattli/OSlash
|
oslash/typing/applicative.py
|
Python
|
apache-2.0
| 1,869
| 0.00214
|
from abc import abstractmethod
from typing import Callable, TypeVar, Protocol
from typing_extensions import runtime_checkable
TSource = TypeVar('TSource')
TResult = TypeVar('TResult')
@runtime_checkable
class Applicative(Protocol[TSource, TResult]):
"""Applicative.
Applicative functors are functors with some extra properties.
Most importantly, they allow you to apply functions inside the
functor (hence the name).
To learn more about Applicative functors:
* http://www.davesquared.net/2012/05/fp-newbie-learns-applicatives.html
"""
@abstractmethod
def apply(self, something):
"""Apply wrapped callable.
Python: apply(self: Applicative, something: Applicative[Callable[[A], B]]) -> Applicative
Haskell: (<*>) :: f (a -> b) -> f a -> f b.
Apply (<*>) is a beef
|
ed up fmap. It takes a functor value that
has a function in it and another functor, and extracts that
function from the first functor an
|
d then maps it over the second
one.
"""
raise NotImplementedError
#def __mul__(self, something):
# """(<*>) :: f (a -> b) -> f a -> f b.
# Provide the * as an infix version of apply() since we cannot
# represent the Haskell's <*> operator in Python.
# """
# return self.apply(something)
#def lift_a2(self, func, b):
# """liftA2 :: (Applicative f) => (a -> b -> c) -> f a -> f b -> f c."""
# return func % self * b
@classmethod
@abstractmethod
def pure(cls, fn: Callable[[TSource], TResult]) -> 'Applicative[TSource, TResult]':
"""Applicative functor constructor.
Use pure if you're dealing with values in an applicative context
(using them with <*>); otherwise, stick to the default class
constructor.
"""
raise NotImplementedError
|
python-acoustics/python-acoustics
|
acoustics/standards/iso_1996_2_2007.py
|
Python
|
bsd-3-clause
| 25,331
| 0.003948
|
"""
ISO 1996-2:2007
ISO 1996-2:2007 describes how sound pressure levels can be determined by direct measurement,
by extrapolation of measurement results by means of calculation, or exclusively by calculation,
intended as a basis for assessing environmental noise.
"""
import numpy as np
import pandas as pd
from scipy.signal import welch
from scipy.stats import linregress
import matplotlib.pyplot as plt
from acoustics.decibel import dbsum
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
import weakref
from tabulate import tabulate
TONE_WITHIN_PAUSE_CRITERION_DB = 6.0
"""A tone may exist when the level of any line in the noise pause is 6 dB or more about...."""
TONE_BANDWIDTH_CRITERION_DB = 3.0
"""Bandwidth of the detected peak."""
TONE_LINES_CRITERION_DB = 6.0
"""All lines with levels within 6 dB of the maximum level are classified as tones."""
TONE_SEEK_CRITERION = 1.0
"""Tone seek criterion."""
REGRESSION_RANGE_FACTOR = 0.75
"""Range of regression is usually +/- 0.75 critical bandwidth."""
_WINDOW_CORRECTION = {
'hanning': -1.8,
}
def window_correction(window):
"""Correction to be applied to :math:`L_{pt}` due to use of window."""
try:
return _WINDOW_CORRECTION[window]
except KeyError:
raise ValueError("Window correction is not available for specified window.")
def critical_band(frequency):
"""Bandwidth of critical band of frequency.
:param frequency: Center frequency of tone.
:returns: (bandwidth, center, lower, upper) of band.
"""
if isinstance(frequency, np.ndarray):
center = frequency.copy()
center[frequency < 50.0] = 50.0
else:
center = 50.0 if frequency < 50 else frequency
bandwidth = (center > 500.0) * (center * 0.20) + (center <= 500.0) * 100.0
upper = center + bandwidth / 2.0
lower = center - bandwidth / 2.0
return center, lower, upper, bandwidth
def tones_level(tone_levels):
"""Total sound pressure level of the tones in a critical band given the level of each of the tones.
.. math L_{pt} = 10 \log_{10}{\sum 10^{L_{pti}/10}}
See equation C.1 in section C.2.3.1.
"""
return dbsum(tone_levels)
def masking_noise_level(noise_lines, frequency_resolution, effective_analysis_bandwidth):
"""Masking noise level :math:`L_{pn}`
:param noise_lines: Masking noise lines. See :func:`masking_noise_lines`.
:param frequency_resolution: Frequency resolution :math:`\Delta f`.
:param effective_analysis_bandwidth: Effective analysis bandwidth :math:`B`.
.. math:: L_{pn} = 10 \log_{10}{\sum 10^{L_n/10}} + 10 \log_{10}{\frac{\Delta f}{B}}
See equation C.11 in section C.4.4.
"""
return dbsum(noise_lines) + 10.0 * np.log10(frequency_resolution / effective_analysis_bandwidth)
def masking_noise_lines(levels, line_classifier, center, bandwidth, regression_range_factor):
"""Determine masking noise level lines using regression line. Returns array of :math:`L_n`.
:param levels: L
|
evels as function of frequency.
:type levels: :class:`pd.Series`.
:param lines_classifier: Categorical indicating what each line is.
:param center: Center frequency.
:param bandwidth: bandwidth of critical band.
:param regression_range_factor: Range factor.
:returns: (Array with masking noise li
|
nes, slope, intercept).
"""
slicer = slice(center - bandwidth * regression_range_factor, center + bandwidth * regression_range_factor)
levels = levels[slicer]
frequencies = levels.index
regression_levels = levels[line_classifier == 'noise']
slope, intercept = linregress(x=regression_levels.index, y=regression_levels)[0:2]
levels_from_regression = slope * frequencies + intercept
return levels_from_regression, slope, intercept
def tonal_audibility(tones_level, masking_noise_level, center):
"""Tonal audibility.
:param tones_level: Total sound pressure level of the tones in the critical band :math:`L_{pt}.
:param masking_noise_level: Total sound pressure level of the masking noise in the critical band :math:`L_{pn}.
:param center: Center frequency of the critical band :math:`f_c`.
:returns: Tonal audibility :math:`\Delta L_{ta}`
.. math:: \Delta L_{ta} = L_{pt} - L_{pn} + 2 + \log_{10}{1 + \left(\frac{f_c}{502}\right)^{2.5}}
See equation C.3. in section C.2.4.
"""
return tones_level - masking_noise_level + 2.0 + np.log10(1.0 + (center / 502.0)**(2.5))
def tonal_adjustment(tonal_audibility):
"""Adjustment :math:`K`.
:param tonal_audibility: Tonal audibility :math:`L_{ta}`.
See equations C.4, C.5 and C.6 in section C.2.4.
"""
if tonal_audibility > 10.0:
return 6.0
elif tonal_audibility < 4.0:
return 0.0
else:
return tonal_audibility - 4.0
class Tonality:
"""Perform assessment of audibility of tones in noise.
Objective method for assessing the audibility of tones in noise.
"""
def __init__( # pylint: disable=too-many-instance-attributes
self,
signal,
sample_frequency,
window='hanning',
reference_pressure=REFERENCE_PRESSURE,
tsc=TONE_SEEK_CRITERION,
regression_range_factor=REGRESSION_RANGE_FACTOR,
nbins=None,
force_tone_without_pause=False,
force_bandwidth_criterion=False,
):
self.signal = signal
"""Samples in time-domain."""
self.sample_frequency = sample_frequency
"""Sample frequency."""
self.window = window
"""Window to be used."""
self.reference_pressure = reference_pressure
"""Reference sound pressure."""
self.tsc = tsc
"""Tone seeking criterium."""
self.regression_range_factor = regression_range_factor
"""Regression range factor."""
self.nbins = nbins
"""Amount of frequency nbins to use. See attribute `nperseg` of :func:`scipy.signal.welch`."""
self._noise_pauses = list()
"""Private list of noise pauses that were determined or assigned."""
self._spectrum = None
"""Power spectrum as function of frequency."""
self.force_tone_without_pause = force_tone_without_pause
self.force_bandwidth_criterion = force_bandwidth_criterion
@property
def noise_pauses(self):
"""Noise pauses that were determined."""
for noise_pause in self._noise_pauses:
yield noise_pause
@property
def tones(self):
"""Tones that were determined."""
for noise_pause in self.noise_pauses:
if noise_pause.tone is not None:
yield noise_pause.tone
@property
def critical_bands(self):
"""Critical bands that were determined. A critical band is determined for each tone."""
for tone in self.tones:
yield tone.critical_band
@property
def spectrum(self):
"""Power spectrum of the input signal.
"""
if self._spectrum is None:
nbins = self.nbins
if nbins is None:
nbins = self.sample_frequency
nbins //= 1 # Fix because of bug in welch with uneven nbins
f, p = welch(self.signal, fs=self.sample_frequency, nperseg=nbins, window=self.window, detrend=False,
scaling='spectrum')
self._spectrum = pd.Series(10.0 * np.log10(p / self.reference_pressure**2.0), index=f)
return self._spectrum
@property
def frequency_resolution(self):
"""Frequency resolution.
"""
df = np.diff(np.array(self.spectrum.index)).mean()
return df
#return 1.0 / self.sample_frequency
@property
def effective_analysis_bandwidth(self):
"""Effective analysis bandwidth.
In the case of the Hanning window
.. math:: B_{eff} = 1.5 \Delta f
with \Delta f the :attr:`frequency_resolution`.
C.2.2: Note 1.
"""
if self.window == 'hanning':
return 1.5 * self.frequency_resolution
else:
raise ValueError()
def _set_noise_pauses(sel
|
Abhinav117/pymtl
|
pymtl/tools/translation/visitors.py
|
Python
|
bsd-3-clause
| 29,868
| 0.028994
|
#=========================================================================
# visitors.py
#=========================================================================
from __future__ import print_function
import ast, _ast
import re
import warnings
from ..ast_helpers import get_closure_dict, print_simple_ast
from ...model.signals import Wire, Signal, InPort, OutPort, _SignalSlice
from ...model.Model import Model
from ...model.PortBundle import PortBundle
from ...model.signal_lists import PortList, WireList
from ...datatypes.Bits import Bits
from exceptions import VerilogTranslationError
#-------------------------------------------------------------------------
# AnnotateWithObjects
#-------------------------------------------------------------------------
# Annotates AST Nodes with the live Python objects they reference.
# TODO: store objects in PyObj wrapper, or not?
class AnnotateWithObjects( ast.NodeTransformer ):
def __init__( self, model, func ):
self.model = model
self.func = func
self.closed_vars = get_closure_dict( func )
self.current_obj = None
def visit_Attribute( self, node ):
self.generic_visit( node )
# TODO: handle self.current_obj == None. These are temporary
# locals that we should check to ensure their types don't
# change!
if self.current_obj:
try :
x = self.current_obj.getattr( node.attr )
self.current_obj.update( node.attr, x )
except AttributeError:
if node.attr not in ['next', 'value', 'n
|
', 'v']:
raise Exception('Unknown attribute "{}" in model "{}"'
.format( node.attr, self.model.__class__ ))
node._object = self.current_obj.inst if self.current_obj else None
return node
def visit_Name( self, node ):
# Check if the name is a global constant
if node.id in self.func.func_globals:
new_
|
obj = PyObj( '', self.func.func_globals[ node.id ] )
# If the name is not in closed_vars or func_globals, it's a local temporary
elif node.id not in self.closed_vars:
new_obj = None
# If the name points to the model, this is a reference to self (or s)
elif self.closed_vars[ node.id ] is self.model:
new_obj = PyObj( '', self.closed_vars[ node.id ] )
# Otherwise, we have some other variable captured by the closure...
# TODO: should we allow this?
else:
new_node = node
new_obj = PyObj( node.id, self.closed_vars[ node.id ] )
# Store the new_obj
self.current_obj = new_obj
node._object = self.current_obj.inst if self.current_obj else None
# Return the new_node
return node
def visit_Subscript( self, node ):
# Visit the object being sliced
new_value = self.visit( node.value )
# Visit the index of the slice; stash and restore the current_obj
stash, self.current_obj = self.current_obj, None
new_slice = self.visit( node.slice )
self.current_obj = stash
# Update the current_obj
# TODO: check that type of all elements in item are identical
# TODO: won't work for lists that are initially empty
# TODO: what about lists that initially contain None?
# TODO: do we want the array, or do we want element 0 of the array...
node._object = self.current_obj.inst if self.current_obj else None
if self.current_obj:
self.current_obj.update( '[]', self.current_obj.inst[0] )
return node
def visit_List( self, node ):
node._object = []
for item in node.elts:
self.visit( item )
node._object.append( item._object )
return node
#-------------------------------------------------------------------------
# AnnotateAssignments
#-------------------------------------------------------------------------
class AnnotateAssignments( ast.NodeTransformer ):
'Annotate assign nodes with ._is_blocking attribute'
def visit_Assign( self, node ):
# catch untranslatable constructs
if len(node.targets) != 1:
raise VerilogTranslationError(
'Chained assignments are not supported!\n'
'Please modify "x = y = ..." to be two separate lines.',
node.lineno
)
# annotate the assignment with _is_blocking if not sequential update
lhs = node.targets[0]
seq = isinstance( lhs, ast.Attribute ) and lhs.attr in ['next','n']
node._is_blocking = not seq
self.generic_visit( node )
return node
def visit_AugAssign( self, node ):
# annotate the assignment with _is_blocking if not sequential update
lhs = node.target
seq = isinstance( lhs, ast.Attribute ) and lhs.attr in ['next','n']
node._is_blocking = not seq
self.generic_visit( node )
return node
#-------------------------------------------------------------------------
# RemoveValueNext
#-------------------------------------------------------------------------
# Remove .value and .next.
class RemoveValueNext( ast.NodeTransformer ):
def visit_Attribute( self, node ):
if node.attr in ['next', 'value', 'n', 'v']:
# Update the Load/Store information
node.value.ctx = node.ctx
return ast.copy_location( node.value, node )
return node
#-------------------------------------------------------------------------
# RemoveSelf
#-------------------------------------------------------------------------
# Remove references to self.
# TODO: make Attribute attached to self a Name node?
class RemoveSelf( ast.NodeTransformer ):
def __init__( self, model ):
self.model = model
def visit_Name( self, node ):
if node._object == self.model:
return None
return node
#-------------------------------------------------------------------------
# FlattenSubmodAttrs
#-------------------------------------------------------------------------
# Transform AST branches for submodule signals. A PyMTL signal referenced
# as 's.submodule.port' would appear in the AST as:
#
# Attribute(port)
# |- Attribute(submodule)
#
# This visitor transforms the AST and name to 's.submodule_port':
#
# Attribute(submodule$port)
#
class FlattenSubmodAttrs( ast.NodeTransformer ):
def __init__( self ):
self.submodule = None
def visit_Attribute( self, node ):
# Visit children
self.generic_visit( node )
# If the direct child of this attribute was a submodule then the node
# will be removed by the visitor. We must update our name to include
# submodule name for proper mangling.
if self.submodule:
new_node = _ast.Name( id = '{}${}'.format(self.submodule, node.attr ),
ctx = node.ctx )
new_node._object = node._object
node = new_node
# Attribute is a submodel remove the node, set the submodule name
if hasattr( node._object, 'class_name' ):
self.submodule = node._object.name
return None
# Otherwise, clear the submodule name, return node unmodified
self.submodule = None
return ast.copy_location( node, node )
#-------------------------------------------------------------------------
# FlattenPortBundles
#-------------------------------------------------------------------------
# Transform AST branches for PortBundle signals. A PyMTL signal referenced
# as 's.portbundle.port' would appear in the AST as:
#
# Attribute(port)
# |- Attribute(portbundle)
#
# This visitor transforms the AST and name to 's.submodule_port':
#
# Attribute(portbundle_port)
#
class FlattenPortBundles( ast.NodeTransformer ):
def __init__( self ):
self.portbundle = None
def visit_Attribute( self, node ):
# Visit children
self.generic_visit( node )
# If the direct child of this attribute was a portbundle then the node
# will be removed by the visitor. We must update our name to include
# portbundle name for proper mangling.
if self.portbundle:
new_node = _ast.Name( id = '{}_{}'.format(self.portbundle, node.attr ),
ctx = node.ctx )
new_node._object = node._object
node = new_node
# Attribute is a PortBundle, remove the node, set the submodule name
if isinstance( node._object, PortBundle ):
|
RuthAngus/chronometer
|
chronometer/test_MH.py
|
Python
|
mit
| 1,566
| 0.000639
|
"""
Test the metropolis hastings algorithm.
"""
import numpy as np
import chronometer as gc
import matplotlib.pyplot as plt
import corner
import emcee
def model(par, x):
return par[0] + par[1]*x
def lnlike(par, x, y, yerr, par_inds):
y_mod = model(par, x)
return sum(-.5*((y_mod - y)/yerr)**2)
def test_metropolis_hastings():
# Straight line model
x = np.arange(0, 10, .1)
err = 2.
yerr = np.on
|
es_like(x) * err
y = .7 + 2.5*x + np.random.randn(len(x))*err
# Plot the data.
plt.clf()
plt.errorbar(x, y,
|
yerr=yerr, fmt="k.")
plt.savefig("data")
print("Running Metropolis Hastings")
N = 1000000 # N samples
pars = np.array([.5, 2.5]) # initialisation
t = np.array([.01, .01])
par_inds = np.arange(len(pars))
args = [x, y, yerr, par_inds]
samples, par, probs = gc.MH(pars, lnlike, N, t, *args)
results = [np.percentile(samples[:, i], 50) for i in range(2)]
upper = [np.percentile(samples[:, i], 64) for i in range(2)]
lower = [np.percentile(samples[:, i], 15) for i in range(2)]
print(lower, "lower")
print(results, "results")
print(upper, "upper")
assert lower < results
assert results < upper
plt.clf()
plt.errorbar(x, y, yerr=yerr, fmt="k.")
plt.plot(x, results[0] + results[1]*x)
plt.savefig("test")
fig = corner.corner(samples, truths=[.7, 2.5], labels=["m", "c"])
fig.savefig("corner_MH_test")
plt.clf()
plt.plot(probs)
plt.savefig("prob_test")
if __name__ == "__main__":
test_metropolis_hastings()
|
sveetch/boussole
|
tests/002_finder/005_relativefrompaths.py
|
Python
|
mit
| 2,097
| 0
|
# -*- coding: utf-8 -*-
import pytest
from boussole.exceptions import FinderException
def test_001(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home/foo",
"/home/bar",
"/etc",
])
assert results == "plop"
def test_002(finder):
results = finder.get_relative_from_paths("/etc/plop.plip", [
"/home/foo",
"/home/bar",
"/etc",
])
assert results == "plop.p
|
lip"
def test_003(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/fo
|
o",
"/etc",
])
assert results == "plop"
def test_004(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/home/bar",
"/etc/ping",
])
assert results == "plop"
def test_005(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/home/bar/pika",
"/etc/ping",
])
assert results == "plop"
def test_006(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/home",
"/home/foo",
"/home/bar/pika",
"/home/bar",
])
assert results == "pika/plop"
def test_007(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/etc",
"/home/foo/pika",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_008(finder):
results = finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
"/etc",
"/home/foo/pika/bim/bam",
"/home/foo/pika/bim/bom",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_009(finder):
"""
Unable to find relative path raise an exception
"""
with pytest.raises(FinderException):
finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
"/etc",
"/home/foo/pika/bim/bom",
"/home/bar/pika",
"/home/bar",
])
|
huggingface/transformers
|
utils/check_repo.py
|
Python
|
apache-2.0
| 30,085
| 0.002393
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
import warnings
from collections import OrderedDict
from difflib import get_close_matches
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.file_utils import ENV_VARS_TRUE_VALUES
from transformers.models.auto import get_values
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"DPRSpanPredictor",
"RealmBertModel",
"T5Stack",
"TFDPRSpanPredictor",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"SegformerDecodeHead", # Building part of bigger (tested) model.
"PLBartEncoder", # Building part of bigger (tested) model.
"PLBartDecoder", # Building part of bigger (tested) model.
"PLBartDecoderWrapper", # Building part of bigger (tested) model.
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"RealmBertModel", # Building part of bigger (tested) model.
"RealmReader", # Not regular model.
"RealmScorer", # Not regular model.
"RealmForOpenQA", # Not regular model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"Speech2Text2DecoderWrapper", # Building part of bigger (tested) model.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"TrOCRDecoderWrapper", # Building part of bigger (tested) model.
"SeparableConv1D", # Building part of bigger (tested) model.
"FlaxBartForCausalLM", # Building part of bigger (tested) model.
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"camembert/test_modeling_camembert.py",
"mt5/test_modeling_flax_mt5.py",
"mbart/test_modeling_mbart.py",
"mt5/test_modeling_mt5.py",
"pegasus/test_modeling_pegasus.py",
"camembert/test_modeling_tf_camembert.py",
"mt5/test_modeling_tf_mt5.py",
"xlm_roberta/test_modeling_tf_xlm_roberta.py",
"xlm_roberta/test_modeling_flax_xlm_roberta.py",
"xlm_prophetnet/test_modeling_xlm_prophetnet.py",
"xlm_roberta/test_modeling_xlm_roberta.py",
"vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py",
"vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"ViltForQuestionAnswering",
"ViltForImagesAndTextClassification",
"ViltForImageAndTextRetrieval",
"ViltForMaskedLM",
"XGLMEncoder",
"XGLMDecoder",
"XGLMDecoderWrapper",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"SegformerDecodeHead",
"FlaxBeitForMaskedImageModeling",
"PLBartEncoder",
"PLBartDecoder",
"PLBartDecoderWrapper",
"BeitForMaskedImageModeling",
"CLIPTextModel",
"CLIPVisionModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"GPT2DoubleHeadsModel",
"LukeForMaskedLM",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"OpenAIGPTDoubleHeadsModel",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"RealmEmbedder",
"RealmForOpenQA",
"RealmScorer",
"RealmReader",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"SEWForCTC",
"SEWDForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
|
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
"MaskFormerForInstanceSegmentation",
]
# Update this list for models that have multiple model types for the same
# model doc
MODEL_TYPE_TO_DOC_MAPPING = OrderedDi
|
ct(
[
("data2vec-text", "data2vec"),
("data2vec-audio", "data2vec"),
]
)
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
def check_model_list():
"""Check the model list inside the transformers library."""
# Get the models from the directory structure of `src/transformers/models/`
models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models")
_models = []
for model in os.listdir(models_dir):
model_dir = os.path.join(models_dir, model)
if o
|
ishandongol/voli-fix-vetnae
|
videocalling/views.py
|
Python
|
mit
| 150
| 0.013333
|
f
|
rom django.http import HttpResponse
from django.shortcuts import render
def video_calling(request):
return render(request,'video_calling.
|
html')
|
elimence/edx-platform
|
lms/djangoapps/simplewiki/views.py
|
Python
|
agpl-3.0
| 21,289
| 0.003852
|
# -*- coding: utf-8 -*-
from django.conf import settings as settings
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
from mitxmako.shortcuts import render_to_response
from courseware.courses import get_opt_course_with_access
from courseware.access import has_access
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from .models import Revision, Article, Namespace, CreateArticleForm, RevisionFormWithTitle, RevisionForm
import wiki_settings
def wiki_reverse(wiki_page, article=None, course=None, namespace=None, args=[], kwargs={}):
kwargs = dict(kwargs) # TODO: Figure out why if I don't do this kwargs sometimes contains {'article_path'}
if not 'course_id' in kwargs and course:
kwargs['course_id'] = course.id
if not 'article_path' in kwargs and article:
kwargs['article_path'] = article.get_path()
if not 'namespace' in kwargs and namespace:
kwargs['namespace'] = namespace
return reverse(wiki_page, kwargs=kwargs, args=args)
def update_template_dictionary(dictionary, request=None, course=None, article=None, revision=None):
if article:
dictionary['wiki_article'] = article
dictionary['wiki_title'] = article.title # TODO: What is the title when viewing the article in a course?
if not course and 'namespace' not in dictionary:
dictionary['namespace'] = article.namespace.name
if course:
dictionary['course'] = course
if 'namespace' not in dictionary:
dictionary['namespace'] = "edX"
else:
dictionary['course'] = None
if revision:
dictionary['wiki_article_revision'] = revision
dictionary['wiki_current_revision_deleted'] = not (revision.deleted == 0)
if request:
dictionary.update(csrf(request))
if request and course:
dictionary['staff_access'] = has_access(request.user, course, 'staff')
else:
dictionary['staff_access'] = False
def view(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=True)
if perm_err:
return perm_err
d = {}
update_template_dictionary(d, request, course, article, article.current_revision)
return render_to_response('simplewiki/simplewiki_view.html', d)
def view_revision(request, revision_number, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
try:
revision = Revision.objects.get(counter=int(revision_number), article=article)
except:
d = {'wiki_err_norevision': revision_number}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_error.html', d)
perm_err = check_permissions(request, article, course, check_read=True, check_deleted=True, revision=revision)
if perm_err:
return perm_err
d = {}
update_template_dictionary(d, request, course, article, revision)
return render_to_response('simplewiki/simplewiki_view.html', d)
def root_redirect(request, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
#TODO: Add a default namespace to settings.
namespace = "edX"
try:
root = Article.get_root(namespace)
return HttpResponseRedirect(reverse('wiki_view', kwargs={'course_id': course_id, 'article_path': root.get_path()}))
except:
# If the root is not found, we probably are loading this class for the first time
# We should make sure the namespace exists so the root article can be created.
Namespace.ensure_namespace(namespace)
err = not_found(request, namespace + '/', course)
return err
def create(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
article_path_components = article_path.split('/')
# Ensure the namespace exists
if not len(article_path_components) >= 1 or len(article_path_components[0]) == 0:
d = {'wiki_err_no_namespace': True}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
namespace = None
try:
namespace = Namespace.objects.get(name__exact=article_path_components[0])
except Namespace.DoesNotExist, ValueError:
d = {'wiki_err_bad_namespace': True}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_error.html', d)
# See if the article already exists
article_slug = article_path_components[1] if len(article_path_components) >= 2 else ''
#TODO: Make sure the slug only contains legal characters (which is already done a bit by the url regex)
try:
existing_article = Article.objects.get(namespace=namespace, slug__exact=article_slug)
#It already exists, so we just redirect to view the article
return HttpResponseRedirect(wiki_reverse("wiki_view", existing_article, course))
except Article.DoesNotExist:
#This is good. The article doesn't exist
pass
#TODO: Once we have permissions for namespaces, we should check for create permissions
#check_permissions(request, #namespace#, check_locked=False, check_write=True, check_deleted=True)
if request.method == 'POST':
f = CreateArticleForm(request.POST)
if f.is_valid():
article = Article()
article.slug = article_slug
if not request.user.is_anonymous():
article.created_by = request.user
article.title = f.cleaned_data.get('title')
article.namespace = namespace
a = article.save()
new_revision = f.save(commit=False)
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.article = article
new_revision.save()
return HttpResponseRedirect(wiki_reverse("wiki_view", article, course))
else:
f = CreateArticleForm(initial={'title': request.GET.get('wiki_article_name', article_slug),
'contents': _('Headline\n===\n\n')})
d = {'wiki_form': f, 'create_article': True, 'namespace': namespace.name}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_edit.html', d)
def edit(request, article_path, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, article_path, course)
if err:
return err
# Check write permissions
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True, check_deleted=False)
if perm_err:
return perm_err
if wiki_settings.WIKI_ALLOW_TITLE_EDIT:
EditForm = RevisionFormWithTitle
else:
EditForm = RevisionForm
if request.method == 'POST':
|
f = EditForm(request.POST)
if f.is_valid():
new_revision = f.save(commit=False)
new_revision.article = article
if reque
|
st.POST.__contains__('delete'):
if (article.current_revision.deleted == 1): # This article has already been deleted. Redirect
return HttpResponseRedirect(wiki_reverse('wiki_view', article, course))
new_revision.contents = ""
new_revision.deleted = 1
elif not new_revision.get_diff():
return HttpResponseR
|
odejesush/tensorflow
|
tensorflow/tools/docs/parser_test.py
|
Python
|
apache-2.0
| 12,002
| 0.003
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import parser
def test_function_for_markdown_reference(unused_arg):
"""Docstring with reference to @{test_function}."""
pass
def test_function(unused_arg, unused_kwarg='default'):
"""Docstring for test function."""
pass
def test_function_with_args_kwargs(unused_arg, *unused_args, **unused_kwargs):
"""Docstring for second test function."""
pass
def test_function_with_fancy_docstring(arg):
"""Function with a fancy docstring.
Args:
arg: An argument.
Returns:
arg: the input, and
arg: the input, again.
"""
return arg, arg
class TestClass(object):
"""Docstring for TestClass itself."""
def a_method(self, arg='default'):
"""Docstring for a method."""
pass
class ChildClass(object):
"""Docstring for a child class."""
pass
@property
def a_property(self):
"""Docstring for a property."""
pass
CLASS_MEMBER = 'a class member'
class ParserTest(googletest.TestCase):
def test_documentation_path(self):
self.assertEqual('test.md', parser.documentation_path('test'))
self.assertEqual('test/module.md', parser.documentation_path('test.module'))
def test_documentation_path_empty(self):
self.assertEqual('index.md', parser.documentation_path(''))
def test_replace_references(self):
string = 'A @{reference}, another @{tf.reference}, and a @{third}.'
duplicate_of = {'third': 'fourth'}
result = parser.replace_references(string, '../..', duplicate_of)
self.assertEqual(
'A [`reference`](../../reference.md), another '
'[`tf.reference`](../../reference.md), '
'and a [`third`](../../fourth.md).',
result)
def test_generate_markdown_for_class(self):
index = {
'TestClass': TestClass,
'TestClass.a_method': TestClass.a_method,
'TestClass.a_property': TestClass.a_property,
'TestClass.ChildClass': TestClass.ChildClass,
'TestClass.CLASS_MEMBER': TestClass.CLASS_MEMBER
}
tree = {
'TestClass': ['a_method', 'a_property', 'ChildClass', 'CLASS_MEMBER']
}
docs = parser.generate_markdown(full_name='TestClass', py_object=TestClass,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure all required docstrings are present.
self.assertTrue(inspect.getdoc(TestClass) in docs)
self.assertTrue(inspect.getdoc(TestClass.a_method) in docs)
self.assertTrue(inspect.getdoc(TestClass.a_property) in docs)
# Make sure that the signature is extracted properly and omits self.
self.assertTrue('a_method(arg=\'default\')' in docs)
# Make sure there is a link to the child class and it points the right way.
self.assertTrue('[`class ChildClass`](./TestClass/ChildClass.md)' in docs)
# Make sure CLASS_MEMBER is mentioned.
self.assertTrue('CLASS_MEMBER' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_module(self):
module = sys.modules[__name__]
index = {
'TestModule': module,
'TestModule.test_function': test_function,
'Test
|
Module.test_function_with_args_kwargs':
test_function_with_args_kwargs,
'TestModule.TestClass': TestClass,
}
tree = {
'TestModule': ['TestClass', 'test_function',
'test_function_with_args_kwargs']
}
docs = parser.generate_markdown(full_name='TestModule', py_object=module,
duplicate_of={}, duplicates={},
index=index,
|
tree=tree, base_dir='/')
# Make sure all required docstrings are present.
self.assertTrue(inspect.getdoc(module) in docs)
# Make sure that links to the members are there (not asserting on exact link
# text for functions).
self.assertTrue('./TestModule/test_function.md' in docs)
self.assertTrue('./TestModule/test_function_with_args_kwargs.md' in docs)
# Make sure there is a link to the child class and it points the right way.
self.assertTrue('[`class TestClass`](./TestModule/TestClass.md)' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_function(self):
index = {
'test_function': test_function
}
tree = {
'': ['test_function']
}
docs = parser.generate_markdown(full_name='test_function',
py_object=test_function,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up.
self.assertTrue(inspect.getdoc(test_function) in docs)
# Make sure the extracted signature is good.
self.assertTrue(
'test_function(unused_arg, unused_kwarg=\'default\')' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_function_with_kwargs(self):
index = {
'test_function_with_args_kwargs': test_function_with_args_kwargs
}
tree = {
'': ['test_function_with_args_kwargs']
}
docs = parser.generate_markdown(full_name='test_function_with_args_kwargs',
py_object=test_function_with_args_kwargs,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up.
self.assertTrue(inspect.getdoc(test_function_with_args_kwargs) in docs)
# Make sure the extracted signature is good.
self.assertTrue(
'test_function_with_args_kwargs(unused_arg,'
' *unused_args, **unused_kwargs)' in docs)
def test_references_replaced_in_generated_markdown(self):
index = {
'test_function_for_markdown_reference':
test_function_for_markdown_reference
}
tree = {
'': ['test_function_for_markdown_reference']
}
docs = parser.generate_markdown(
full_name='test_function_for_markdown_reference',
py_object=test_function_for_markdown_reference,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up and is properly processed.
expected_docs = parser.replace_references(
inspect.getdoc(test_function_for_markdown_reference),
relative_path_to_root='.', duplicate_of={})
self.assertTrue(expected_docs in docs)
def test_docstring_special_section(self):
index = {
'test_function': test_function_with_fancy_docstring
}
tree = {
'': 'test_function'
}
docs = parser.generate_markdown(
full_name='test_function',
py_object=test_function_with_fancy_docstring,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
expected = '\n'.join([
'Function with a fancy docstring.',
'',
'#### Args:',
'',
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.