repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
jiahaoliang/group-based-policy
|
gbpservice/neutron/tests/unit/services/grouppolicy/test_group_proxy_extension.py
|
Python
|
apache-2.0
| 8,529
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron import context as n_ctx
from sqlalchemy.orm import exc as orm_exc
from gbpservice.neutron.db.grouppolicy.extensions import group_proxy_db
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_extension_driver_api as test_ext_base)
class ExtensionDriverTestCaseMixin(object):
def test_proxy_group_extension(self):
l3p = self.create_l3_policy()['l3_policy']
self.assertEqual('192.168.0.0/16', l3p['proxy_ip_pool'])
self.assertEqual(28, l3p['proxy_subnet_prefix_length'])
l2p = self.create_l2_policy(l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertIsNone(ptg['proxy_group_id'])
self.assertIsNone(ptg['proxied_group_id'])
self.assertIsNone(ptg['proxy_type'])
# Verify Default L3P pool mapping on show
l3p = self.show_l3_policy(l3p['id'])['l3_policy']
self.assertEqual('192.168.0.0/16', l3p['proxy_ip_pool'])
self.assertEqual(28, l3p['proxy_subnet_prefix_length'])
ptg_proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'])['policy_target_group']
self.assertIsNone(ptg_proxy['proxy_group_id'])
self.assertEqual(ptg['id'], ptg_proxy['proxied_group_id'])
self.assertEqual('l3', ptg_proxy['proxy_type'])
# Verify relationship added
ptg = self.show_policy_target_group(ptg['id'])['policy_target_group']
self.assertEqual(ptg_proxy['id'], ptg['proxy_group_id'])
self.assertIsNone(ptg['proxied_group_id'])
pt = self.create_policy_target(
policy_target_group_id=ptg_proxy['id'])['policy_target']
self.assertFalse(pt['proxy_gateway'])
self.assertFalse(pt['group_default_gateway'])
pt = self.create_policy_target(
policy_target_group_id=ptg_proxy['id'],
proxy_gateway=True, group_default_gateway=True)['policy_target']
self.assertTrue(pt['proxy_gateway'])
self.assertTrue(pt['group_default_gateway'])
pt = self.show_policy_target(pt['id'])['policy_target']
self.assertTrue(pt['proxy_gateway'])
self.assertTrue(pt['group_default_gateway'])
def test_preexisting_pt(self):
ptg = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self.assertTrue('proxy_gateway' in pt)
self.assertTrue('group_default_gateway' in pt)
# Forcefully delete the entry in the proxy table, and verify that it's
# fixed by the subsequent GET
admin_context = n_ctx.get_admin_context()
mapping = admin_context.session.query(
group_proxy_db.ProxyGatewayMapping).filter_by(
policy_target_id=pt['id']).one()
admin_context.session.delete(mapping)
query = admin_context.session.query(
group_proxy_db.ProxyGatewayMapping).filter_by(
policy_target_id=pt['id'])
self.assertRaises(orm_exc.NoResultFound, query.one)
# Showing the object just ignores the extension
pt = self.show_policy_target(pt['id'],
expected_res_status=200)['policy_target']
self.assertFalse('proxy_gateway' in pt)
self.assertFalse('group_default_gateway' in pt)
# Updating the object just ignores the extension
pt = self.update_policy_target(
pt['id'], name='somenewname',
expected_res_status=200)['policy_target']
self.assertEqual('somenewname', pt['name'])
self.assertFalse('proxy_gateway' in pt)
self.assertFalse('group_default_gatewa
|
y' in pt)
def test_proxy_group_multiple_proxies(self):
# same PTG proxied multiple times will fail
ptg = self.create_policy_target_group
|
()['policy_target_group']
self.create_policy_target_group(proxied_group_id=ptg['id'])
# Second proxy will fail
res = self.create_policy_target_group(proxied_group_id=ptg['id'],
expected_res_status=400)
self.assertEqual('InvalidProxiedGroup', res['NeutronError']['type'])
def test_proxy_group_chain_proxy(self):
# Verify no error is raised when chaining multiple proxy PTGs
ptg0 = self.create_policy_target_group()['policy_target_group']
ptg1 = self.create_policy_target_group(
proxied_group_id=ptg0['id'],
expected_res_status=201)['policy_target_group']
self.create_policy_target_group(proxied_group_id=ptg1['id'],
expected_res_status=201)
def test_proxy_group_no_update(self):
ptg0 = self.create_policy_target_group()['policy_target_group']
ptg1 = self.create_policy_target_group()['policy_target_group']
ptg_proxy = self.create_policy_target_group(
proxied_group_id=ptg0['id'])['policy_target_group']
self.update_policy_target_group(
ptg_proxy['id'], proxied_group_id=ptg1['id'],
expected_res_status=400)
def test_different_proxy_type(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg_proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'], proxy_type='l2')['policy_target_group']
self.assertEqual('l2', ptg_proxy['proxy_type'])
ptg_proxy = self.show_policy_target_group(
ptg_proxy['id'])['policy_target_group']
self.assertEqual('l2', ptg_proxy['proxy_type'])
def test_proxy_type_fails(self):
ptg = self.create_policy_target_group()['policy_target_group']
res = self.create_policy_target_group(proxy_type='l2',
expected_res_status=400)
self.assertEqual('ProxyTypeSetWithoutProxiedPTG',
res['NeutronError']['type'])
self.create_policy_target_group(proxied_group_id=ptg['id'],
proxy_type='notvalid',
expected_res_status=400)
def test_proxy_gateway_no_proxy(self):
ptg = self.create_policy_target_group()['policy_target_group']
res = self.create_policy_target(
policy_target_group_id=ptg['id'], proxy_gateway=True,
expected_res_status=400)
self.assertEqual('InvalidProxyGatewayGroup',
res['NeutronError']['type'])
def test_proxy_pool_invalid_prefix_length(self):
l3p = self.create_l3_policy(proxy_subnet_prefix_length=29)['l3_policy']
res = self.update_l3_policy(l3p['id'], proxy_subnet_prefix_length=32,
expected_res_status=400)
self.assertEqual('InvalidDefaultSubnetPrefixLength',
res['NeutronError']['type'])
# Verify change didn't persist
l3p = self.show_l3_policy(l3p['id'])['l3_policy']
self.assertEqual(29, l3p['proxy_subnet_prefix_length'])
# Verify it fails in creation
res = self.create_l3_policy(
proxy_subnet_prefix_length=32, expected_res_status=400)
self.assertEqual('InvalidDefaultSubnetPrefixLength',
res['NeutronError']['type'])
def test_proxy_pool_invalid_version(self):
# proxy_ip_pool is of a different version
res = self.create_l3_policy(ip_version=6, ip_pool='1::1/16',
proxy_ip_pool='192.168
|
Ra93POL/EsperantoLanguage
|
HereIsYourCode.py
|
Python
|
gpl-2.0
| 713
| 0.022876
|
import LingvoObjects, Esperanto
def getAnalysis(NL):
# подгатавливаем
|
текст
list_sentence = LangModule.Prepearing(NL)
# возвращаем предложения в виде объекта
obj_sentence = LingvoObjects.Sentence(list_sentence)
# делаем полный морфологический и синтаксический анализы
result, GrammarNazi = LangModule
|
.NL2ResultA(obj_sentence)
return result.getSentence('dict')
#-----------------------------------------------------------------#
sentence = "montru dolaran kurson de gruzia banko"
#sentence = "montru dolaran kurson de gruzia banko malrapide"
#sentence = "Vi estas studento"
print getAnalysis()
|
KeepSafe/translation-real-time-validaton
|
notifier/executor.py
|
Python
|
apache-2.0
| 708
| 0
|
import asyncio
from functools import partial
class AsyncWrapper:
def __init__(self, target_instance, executor=None):
self._target_inst
|
= target_instance
self._loop = asyncio.get_event_loop()
self._executor = executor
def __getattribute__(self, name):
try:
return sup
|
er().__getattribute__(name)
except AttributeError:
method = self._target_inst.__getattribute__(name)
return partial(self._async_wrapper, method)
async def _async_wrapper(self, method_name, *args, **kwargs):
coroutine_wrapped = partial(method_name, *args, **kwargs)
return self._loop.run_in_executor(self._executor, coroutine_wrapped)
|
scylladb/scylla-cluster-tests
|
sdcm/sct_provision/aws/layout.py
|
Python
|
agpl-3.0
| 2,241
| 0.000446
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2021 ScyllaDB
from functools import cached_property
from sdcm.sct_provision.aws.cluster import OracleDBCluster, DBCluster, LoaderCluster, MonitoringCluster
from sdcm.sct_provision.common.layout import SCTProvisionLayout
from sdcm.test_config import TestConfig
class SCTProvisionAWSLayout(SCTProvisionLayout, cluster_backend='aws'):
@cached_property
def _test_config(self):
return TestConfig()
def provision(self):
if self.db_cluster:
self.db_cluster.provision()
if self.monitoring_cluster:
self.monitoring_cluster.provision()
if self
|
.loader_cluster:
self.loader_cluster.provision()
if self.cs_db_cluster:
self.cs_db_cluster.provision()
@cached_property
def db_cluster(self):
return DBCluster(
params=self._params,
common_tags=self._test_config.common_tags(),
test_id=self._test_config.test_id(),
)
@cached_property
def loade
|
r_cluster(self):
return LoaderCluster(
params=self._params,
common_tags=self._test_config.common_tags(),
test_id=self._test_config.test_id(),
)
@cached_property
def monitoring_cluster(self):
return MonitoringCluster(
params=self._params,
common_tags=self._test_config.common_tags(),
test_id=self._test_config.test_id(),
)
@cached_property
def cs_db_cluster(self):
if not self._provision_another_scylla_cluster:
return None
return OracleDBCluster(
params=self._params,
common_tags=self._test_config.common_tags(),
test_id=self._test_config.test_id(),
)
|
codenote/chromium-test
|
tools/telemetry/telemetry/page/page_test.py
|
Python
|
bsd-3-clause
| 6,182
| 0.010191
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.page.actions import all_page_actions
from telemetry.page.actions import page_action
def _GetActionFromData(action_data):
action_name = action_data['action']
action = all_page_actions.FindClassWithName(action_name)
if not action:
logging.critical('Could not find an action named %s.', action_name)
logging.critical('Check the page set for a typo and check the error '
'log for possible Python loading/compilation errors.')
raise Exception('Action "%s" not found.' % action_name)
return action(action_data)
def GetCompoundActionFromPage(page, action_name):
if not action_name:
return []
action_data_list = getattr(page, action_name)
if not isinstance(action_data_list, list):
action_data_list = [action_data_list]
action_list = []
for subaction_data in action_data_list:
subaction_name = subaction_data['action']
if hasattr(page, subaction_name):
subaction = GetCompoundActionFromPage(page, subaction_name)
else:
subaction = [_GetActionFromData(subaction_data)]
action_list += subaction * subaction_data.get('repeat', 1)
return action_list
class Failure(Exception):
"""Exception that can be thrown from PageBenchmark to indicate an
undesired but designed-for problem."""
pass
class PageTestResults(object):
def __init__(self):
self.page_successes = []
self.page_failures = []
self.skipped_pages = []
def AddSuccess(self, page):
self.page_successes.append({'page': page})
def AddFailure(self, page, message, details):
self.page_failures.append({'page': page,
'message': message,
'details': details})
def AddSkippedPage(self, page, message, details):
self.skipped_pages.append({'page': page,
'message': message,
'details': details})
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests."""
def __init__(self,
test_method_name,
action_name_to_run='',
needs_browser_restart_after_each_run=False,
discard_first_result=False):
self.options = None
try:
self._test_method = getattr(self, test_method_name)
except AttributeError:
raise ValueError, 'No such method %s.%s' % (
self.__class_, test_method_name) # pylint: disable=E1101
self._action_name_to_run = action_name_to_run
self._needs_browser_restart_after_each_run = (
needs_browser_restart_after_each_run)
self._discard_first_result = discard_first_result
@property
def needs_browser_restart_after_each_run(self):
return self._needs_browser_restart_after_each_run
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
def AddCommandLineOptions(self, parser):
"""Override to expose command-line options for this benchmark.
The provided parser is an optparse.OptionParser instance and accepts all
normal results. The parsed options are available in Run as
self.options."""
pass
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
pass
def CustomizeBrowserOptionsForPage(self, page, options):
"""Add options specific to the test and the given page."""
if not self.CanRunForPage(page):
return
for action in GetCompoundActionFromPage(page, self._action_name_to_run):
action.CustomizeBrowserOptions(options)
def SetUpBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
pass
def
|
CanRunForPage(self, page): #pylint: disable=W0613
"""Override
|
to customize if the test can be ran for the given page."""
return True
def WillRunPageSet(self, tab, results):
"""Override to do operations before the page set is navigated."""
pass
def DidRunPageSet(self, tab, results):
"""Override to do operations after page set is completed, but before browser
is torn down."""
pass
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated."""
pass
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated, but before
any waiting for completion has occurred."""
pass
def WillRunAction(self, page, tab, action):
"""Override to do operations before running the action on the page."""
pass
def DidRunAction(self, page, tab, action):
"""Override to do operations after running the action on the page."""
pass
def Run(self, options, page, tab, results):
self.options = options
compound_action = GetCompoundActionFromPage(page, self._action_name_to_run)
self._RunCompoundAction(page, tab, compound_action)
try:
self._test_method(page, tab, results)
finally:
self.options = None
def _RunCompoundAction(self, page, tab, actions):
for i, action in enumerate(actions):
prev_action = actions[i - 1] if i > 0 else None
next_action = actions[i + 1] if i < len(actions) - 1 else None
if (action.RunsPreviousAction() and
next_action and next_action.RunsPreviousAction()):
raise page_action.PageActionFailed('Consecutive actions cannot both '
'have RunsPreviousAction() == True.')
if not (next_action and next_action.RunsPreviousAction()):
action.WillRunAction(page, tab)
self.WillRunAction(page, tab, action)
try:
action.RunAction(page, tab, prev_action)
finally:
self.DidRunAction(page, tab, action)
@property
def action_name_to_run(self):
return self._action_name_to_run
|
GreenLunar/Bookie
|
bookie/tests/test_utils/test_search.py
|
Python
|
agpl-3.0
| 1,797
| 0
|
"""Test if correct arguments are passed to Whoosh to search
indexed content
"""
from mock import patch
from pyramid import testing
from unittest import TestCase
class TestSearchAttr(TestCase):
attr = []
def _return_attr(self, *args, **kwargs):
"""Saves arguments passed to WhooshFulltext
search function to attr
"""
self.attr = [args, kwargs]
return []
def setUp(self):
from pyramid.paster import get_app
from bookie.tests import BOOKIE_TEST_INI
app = get_app(BOOKIE_TEST_INI, 'bookie')
from webtest import TestApp
self.testapp = TestApp(app)
testing.setUp()
def tearDown(self):
testing.tearDown()
@patch('bookie.models.fulltext.WhooshFulltext')
def test_search_content(self, mock_search):
"""Test if correct arguments are passed to WhooshFulltext if
searched through webui"""
mock_search().search.side_effect = self._return_attr
self.testapp.get('/results/bookie')
self.assertTrue(mock_search.called)
self.assertEqual(self.attr[0][0],
'bookie',
'search term should be bookie')
self.assertTrue(self.attr[1]['content'])
@patch('bookie.models.fulltext.Wh
|
ooshFulltext')
def test_search_content_ajax(self, mock_search):
"""Test if correct arguments are passed to W
|
hooshFulltext
with ajax request"""
mock_search().search.side_effect = self._return_attr
self.testapp.get(url='/results/ajax', xhr=True)
self.assertTrue(mock_search.called)
self.assertEqual(self.attr[0][0],
'ajax',
'search term should be ajax')
self.assertTrue(self.attr[1]['content'])
|
tylerclair/py3canvas
|
py3canvas/tests/progress.py
|
Python
|
mit
| 858
| 0.001166
|
"""Progress API Tests for Version 1.0.
|
This is a testing template for the generated ProgressAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.progress import ProgressAPI
from py3canvas.apis.progress import Progress
class TestProgressAPI(unittest.TestCase):
"""Tests for the ProgressAPI."""
def setUp(self):
self.client = ProgressAPI(secrets.instance_address, secrets.access_token)
def test_query_progress(self):
"""Integrati
|
on test for the ProgressAPI.query_progress method."""
id = None # Change me!!
r = self.client.query_progress(id)
def test_query_progress(self):
"""Integration test for the ProgressAPI.query_progress method."""
course_id = None # Change me!!
id = None # Change me!!
r = self.client.query_progress(course_id, id)
|
varunarya10/rally
|
rally/deploy/engines/fuel.py
|
Python
|
apache-2.0
| 6,902
| 0.000724
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally import consts
from rally.deploy import engine
from rally.deploy.fuel import fuelclient
from rally import exceptions
from rally import objects
FILTER_SCHEMA = {
"type": "string",
"pattern": "^(ram|cpus|storage|mac)(==|<=?|>=?|!=)(.+)$",
}
NODE_SCHEMA = {
"type": "object",
"required": ["amount"],
"properties": {
"amount": {"type": "integer"},
"filters": {
"type"
|
: "array",
"uniqueItems": True,
"items": FILTER_SCHEMA,
},
},
"additionalProperties": False
}
IPV4_PATTERN = "(\d+\.){3}\d+"
IPV4_ADDRESS_PATTERN = "^%s$" % IPV4_PATTERN
IPV4_CIDR_PATTERN = "^%s\/\d+$" % IPV4_PATTERN
IP_RANGE_SCHEMA = {
"type": "array",
"maxItems": 2,
"minItems": 2,
"items": {
"type": "s
|
tring",
"pattern": IPV4_ADDRESS_PATTERN,
}
}
NETWORK_SCHEMA = {
"type": "object",
"properties": {
"cidr": {"type": "string", "pattern": IPV4_CIDR_PATTERN},
"gateway": {"type": "string", "pattern": IPV4_ADDRESS_PATTERN},
"ip_ranges": {"type": "array", "items": IP_RANGE_SCHEMA},
"vlan_start": {"type": "integer"},
}
}
NETWORKS_SCHEMA = {
"type": "object",
"properties": {
"public": NETWORK_SCHEMA,
"floating": NETWORK_SCHEMA,
"management": NETWORK_SCHEMA,
"storage": NETWORK_SCHEMA,
},
}
class FuelEngine(engine.EngineFactory):
"""Deploy with FuelWeb.
Sample configuration:
{
"type": "FuelEngine",
"deploy_name": "Rally multinode 01",
"release": "Havana on CentOS 6.4",
"api_url": "http://10.20.0.2:8000/api/v1/",
"mode": "multinode",
"nodes": {
"controller": {"amount": 1, "filters": ["storage>80G"]},
"compute": {"amount": 1, "filters": ["storage>80G"]}
},
"net_provider": "nova_network",
"dns_nameservers": ["172.18.208.44", "8.8.8.8"],
"networks": {
"public": {
"cidr": "10.3.3.0/24",
"gateway": "10.3.3.1",
"ip_ranges": [["10.3.3.5", "10.3.3.254"]],
"vlan_start": 14
},
"floating": {
"cidr": "10.3.4.0/24",
"ip_ranges": [["10.3.4.5", "10.3.4.254"]],
"vlan_start": 14
}
}
}
"""
CONFIG_SCHEMA = {
"type": "object",
"required": ["deploy_name", "api_url", "mode", "networks",
"nodes", "release", "net_provider"],
"properties": {
"release": {"type": "string"},
"deploy_name": {"type": "string"},
"api_url": {"type": "string"},
"mode": {"type": "string"},
"net_provider": {"type": "string"},
"networks": NETWORKS_SCHEMA,
"nodes": {
"type": "object",
"required": ["controller"],
"properties": {
"controller": NODE_SCHEMA,
"compute": NODE_SCHEMA,
"cinder": NODE_SCHEMA,
"cinder+compute": NODE_SCHEMA,
},
},
},
}
def validate(self):
super(FuelEngine, self).validate()
if "compute" not in self.config["nodes"]:
if "cinder+compute" not in self.config["nodes"]:
raise exceptions.ValidationError(
_("At least one compute is required."))
def _get_nodes(self, key):
if key not in self.config["nodes"]:
return []
amount = self.config["nodes"][key]["amount"]
filters = self.config["nodes"][key]["filters"]
nodes = []
for i in range(amount):
node = self.nodes.pop(filters)
if node is None:
raise exceptions.NoNodesFound(filters=filters)
nodes.append(node)
return nodes
def _get_release_id(self):
releases = self.client.get_releases()
for release in releases:
if release["name"] == self.config["release"]:
return release["id"]
raise exceptions.UnknownRelease(release=self.config["release"])
def deploy(self):
self.client = fuelclient.FuelClient(self.config["api_url"])
self.nodes = self.client.get_nodes()
controllers = self._get_nodes("controller")
computes = self._get_nodes("compute")
cinders = self._get_nodes("cinder")
computes_cinders = self._get_nodes("cinder+compute")
cluster = fuelclient.FuelCluster(
self.client,
name=self.config["deploy_name"],
release=self._get_release_id(),
mode=self.config["mode"],
net_provider=self.config["net_provider"],
net_segment_type=self.config.get("net_segment_type", "gre"),
)
cluster.set_nodes(controllers, ["controller"])
cluster.set_nodes(computes, ["compute"])
cluster.set_nodes(cinders, ["cinder"])
cluster.set_nodes(computes_cinders, ["compute", "cinder"])
cluster.configure_network(self.config["networks"])
cluster.deploy()
self.deployment.add_resource("FuelEngine",
type="cloud",
info={"id": cluster.cluster["id"]})
ip = cluster.get_endpoint_ip()
attrs = cluster.get_attributes()["editable"]["access"]
admin_endpoint = objects.Endpoint(
"http://%s:5000/v2.0/" % ip,
attrs["user"]["value"],
attrs["password"]["value"],
attrs["tenant"]["value"],
consts.EndpointPermission.ADMIN)
return {"admin": admin_endpoint}
def cleanup(self):
resources = self.deployment.get_resources(provider_name="FuelEngine",
type="cloud")
self.client = fuelclient.FuelClient(self.config["api_url"])
for res in resources:
self.client.delete_cluster(res["info"]["id"])
objects.Deployment.delete_resource(res["id"])
|
mrunge/openstack_horizon
|
openstack_horizon/dashboards/project/images/images/views.py
|
Python
|
apache-2.0
| 4,298
| 0
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distrib
|
uted under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
f
|
rom horizon_lib import forms
from horizon_lib import tabs
from horizon_lib.utils import memoized
from openstack_horizon import api
from openstack_horizon.dashboards.project.images.images \
import forms as project_forms
from openstack_horizon.dashboards.project.images.images \
import tables as project_tables
from openstack_horizon.dashboards.project.images.images \
import tabs as project_tabs
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateImageForm
template_name = 'project/images/images/create.html'
context_object_name = 'image'
success_url = reverse_lazy("horizon:project:images:index")
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateImageForm
template_name = 'project/images/images/update.html'
success_url = reverse_lazy("horizon:project:images:index")
@memoized.memoized_method
def get_object(self):
try:
return api.glance.image_get(self.request, self.kwargs['image_id'])
except Exception:
msg = _('Unable to retrieve image.')
url = reverse('horizon:project:images:index')
exceptions.handle(self.request, msg, redirect=url)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['image'] = self.get_object()
return context
def get_initial(self):
image = self.get_object()
properties = getattr(image, 'properties', {})
return {'image_id': self.kwargs['image_id'],
'name': getattr(image, 'name', None) or image.id,
'description': properties.get('description', ''),
'kernel': properties.get('kernel_id', ''),
'ramdisk': properties.get('ramdisk_id', ''),
'architecture': properties.get('architecture', ''),
'disk_format': getattr(image, 'disk_format', None),
'minimum_ram': getattr(image, 'min_ram', None),
'minimum_disk': getattr(image, 'min_disk', None),
'public': getattr(image, 'is_public', None),
'protected': getattr(image, 'protected', None)}
class DetailView(tabs.TabView):
tab_group_class = project_tabs.ImageDetailTabs
template_name = 'project/images/images/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
image = self.get_data()
table = project_tables.ImagesTable(self.request)
context["image"] = image
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(image)
return context
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:images:index')
@memoized.memoized_method
def get_data(self):
try:
return api.glance.image_get(self.request, self.kwargs['image_id'])
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve image details.'),
redirect=self.get_redirect_url())
def get_tabs(self, request, *args, **kwargs):
image = self.get_data()
return self.tab_group_class(request, image=image, **kwargs)
|
firasbenmakhlouf/JobLookup
|
metadata/models.py
|
Python
|
mit
| 467
| 0
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Tanit
|
JobsCategory(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return "%s" % self.name
class KeeJobsCategory(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return "%s" % self.
|
name
|
tboyce1/home-assistant
|
homeassistant/components/tasmota/fan.py
|
Python
|
apache-2.0
| 2,750
| 0.000364
|
"""Support for Tasmota fans."""
from hatasmota import const as tasmota_const
from homeassistant.components import fan
from homeassistant.components.fan import FanEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
HA_TO_TASMOTA_SPEED_MAP = {
fan.SPEED_OFF: tasmota_const.FAN_SPEED_OFF,
fan.SPEED_LOW: tasmota_const.FAN_SPEED_LOW,
fan.SPEED_MEDIUM: tasmota_const.FAN_SPEED_MEDIUM,
fan.SPEED_HIGH: tasmota_const.FAN_SPEED_HIGH,
}
TASMOTA_TO_HA_SPEED_MAP = {v: k for k, v in HA_TO_TASMOTA_SPEED_MAP.items()}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota fan dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota fan."""
async_add_entities(
[TasmotaFan(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(fan.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(fan.DOMAIN),
async_discover,
)
class TasmotaFan(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
FanEntity,
):
"""Representation of a Tasmota fan."""
def __init__(self, **kwds):
"""Initialize the Tasmota fan."""
self._state = None
super().__init__(
**kwds,
)
@property
def speed(self):
"""Return the current speed."""
return TASMOTA_TO_HA_SPEED_MAP.get(self._state)
@property
def speed_list(self):
"""Get the list of available speeds."""
return list(HA_TO_TASMOTA_SPEED_MAP)
@property
def supported_features(sel
|
f):
"""Flag supported features."""
return fan.SUPPORT_SET_SPEED
async def async_set_speed(self, speed):
"""Set the speed of the fan."""
if speed not in HA_TO_TASMOTA_SPEED_MAP:
raise ValueError(f"Unsupported speed {speed}")
|
if speed == fan.SPEED_OFF:
await self.async_turn_off()
else:
self._tasmota_entity.set_speed(HA_TO_TASMOTA_SPEED_MAP[speed])
async def async_turn_on(self, speed=None, **kwargs):
"""Turn the fan on."""
# Tasmota does not support turning a fan on with implicit speed
await self.async_set_speed(speed or fan.SPEED_MEDIUM)
async def async_turn_off(self, **kwargs):
"""Turn the fan off."""
self._tasmota_entity.set_speed(tasmota_const.FAN_SPEED_OFF)
|
sgrogan/freecad-extras-eaglepcb2
|
command/PCBexplode.py
|
Python
|
lgpl-2.1
| 23,993
| 0.00696
|
# -*- coding: utf8 -*-
#****************************************************************************
#* *
#* Printed Circuit Board Workbench for FreeCAD PCB *
#* Flexible Printed Circuit Board Workbench for FreeCAD FPCB *
#* Copyright (c) 2013, 2014, 2015 *
#* marmni <marmni@onet.eu> *
#* *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#****************************************************************************
import FreeCAD
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
from functools import partial
class ser:
def __init__(self):
self.dostepneWarstwy = QtGui.QComboBox()
self.dostepneWarstwy.addItems(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'])
self.dostepneWarstwy.setCurrentIndex(1)
self.dostepneWarstwy.setMaximumWidth(60)
class explodeObjectTable(QtGui.QTreeWidget):
def __init__(self, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setItemsExpandable(True)
self.setSortingEnabled(False)
self.setHeaderLabels(['Object', 'Layer'])
self.setFrameShape(QtGui.QFrame.NoFrame)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setStyleSheet('''
QTreeWidget QHeaderView
{
border:0px;
}
QTreeWidget
{
border: 1px solid #9EB6CE;
border-top:0px;
}
QTreeWidget QHeaderView::section
{
color:#4C4161;
font-size:12px;
border:1px solid #9EB6CE;
border-left:0px;
padding:5px;
}
''')
def showHideAllObj(self, value):
root = self.invisibleRootItem()
for i in range(root.childCount()):
if not root.child(i).checkState(0) == 2:
root.child(i).setCheckState(0, QtCore.Qt.Unchecked)
root.child(i).setHidden(value)
def DeSelectAllObj(self, value):
root = self.invisibleRootItem()
for i in range(root.childCount()):
if not root.child(i).isHidden():
root.child(i).setCheckState(0, value)
def schowajItem(self, name, value):
value = [False, None, True][value]
root = self.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
if item.data(0, QtCore.Qt.UserRole) == name:
item.setCheckState(0, QtCore.Qt.Unchecked)
item.setHidden(value)
return
return
class explodeWizardWidget(QtGui.
|
QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
#
self.TopStepSize = QtGui.QSpinBox()
self.TopStepSize.setRange(5, 100)
self.TopStepSize.setValue(10)
|
self.TopStepSize.setSingleStep(5)
self.BottomStepSize = QtGui.QSpinBox()
self.BottomStepSize.setRange(5, 100)
self.BottomStepSize.setValue(10)
self.BottomStepSize.setSingleStep(5)
#
self.tableTop = explodeObjectTable()
self.tableBottom = explodeObjectTable()
# partial dont work
self.connect(self.tableTop, QtCore.SIGNAL('itemClicked(QTreeWidgetItem*, int)'), self.klikGora)
self.connect(self.tableBottom, QtCore.SIGNAL('itemClicked(QTreeWidgetItem*, int)'), self.klikDol)
#
self.setActive = QtGui.QCheckBox('Active')
self.setActive.setChecked(False)
self.inversObj = QtGui.QCheckBox('Inverse')
self.inversObj.setChecked(False)
#
przSelectAllT = QtGui.QPushButton('')
przSelectAllT.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
przSelectAllT.setFlat(True)
przSelectAllT.setIcon(QtGui.QIcon(":/data/img/checkbox_checked_16x16.png"))
przSelectAllT.setToolTip('Select all')
par = partial(self.selectAllObj, self.tableTop, self.tableBottom)
self.connect(przSelectAllT, QtCore.SIGNAL('pressed ()'), par)
przSelectAllTF = QtGui.QPushButton('')
przSelectAllTF.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
przSelectAllTF.setFlat(True)
przSelectAllTF.setIcon(QtGui.QIcon(":/data/img/checkbox_unchecked_16x16.PNG"))
przSelectAllTF.setToolTip('Deselect all')
par = partial(self.deselectAllObj, self.tableTop, self.tableBottom)
self.connect(przSelectAllTF, QtCore.SIGNAL('pressed ()'), par)
przSelectAllT1 = QtGui.QPushButton('')
przSelectAllT1.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
przSelectAllT1.setFlat(True)
przSelectAllT1.setIcon(QtGui.QIcon(":/data/img/checkbox_checked_16x16.png"))
przSelectAllT1.setToolTip('Select all')
par = partial(self.selectAllObj, self.tableBottom, self.tableTop)
self.connect(przSelectAllT1, QtCore.SIGNAL('pressed ()'), par)
przSelectAllTF1 = QtGui.QPushButton('')
przSelectAllTF1.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
przSelectAllTF1.setFlat(True)
przSelectAllTF1.setIcon(QtGui.QIcon(":/data/img/checkbox_unchecked_16x16.PNG"))
przSelectAllTF1.setToolTip('Deselect all')
par = partial(self.deselectAllObj, self.tableBottom, self.tableTop)
self.connect(przSelectAllTF1, QtCore.SIGNAL('pressed ()'), par)
#
lay = QtGui.QGridLayout()
lay.addWidget(QtGui.QLabel('Top Step Size'), 0, 0, 1, 2)
lay.addWidget(self.TopStepSize, 0, 2, 1, 1)
lay.addWidget(przSelectAllT, 1, 0, 1, 1)
lay.addWidget(przSelectAllTF, 2, 0, 1, 1)
lay.addWidget(self.tableTop, 1, 1, 3, 2)
lay.addWidget(QtGui.QLabel('Bottom Step Size'), 4, 0, 1, 2)
lay.addWidget(self.BottomStepSize, 4, 2, 1, 1)
lay.addWidget(przSelectAllT1, 5, 0, 1, 1)
lay.addWidget(przSelectAllTF1, 6, 0, 1, 1)
lay.addWidget(self.tableBottom, 5, 1, 3, 2)
lay.addWidget(self.setActive, 8, 0, 1, 3)
lay.addWidget(self.inversObj, 9, 0, 1, 3)
lay.setColumnStretch(1, 10)
lay.setColumnStretch(2, 5)
self.setLayout(lay)
def selectAllObj(self, tabela_1, tabela_2):
tabela_1.DeSelectAllObj(QtCore.Qt.Checked)
tabela_2.showHideAllObj(True)
def deselectAllObj(self, tabela_1, tabela_2):
tabela_1.DeSelectAllObj(QtCore.Qt.Unchecked)
tabela_2.showHideAllObj(False)
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.0/Lib/os.py
|
Python
|
mit
| 21,471
| 0.00312
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
|
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.ext
|
end(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import curdir, pardir, sep, pathsep, defpath, altsep, devnull
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777):
"""makedirs(path [, mode=0o777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dir
|
speksi/python-mingus
|
unittest/test_track.py
|
Python
|
gpl-3.0
| 739
| 0.006766
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path += ['../']
from mingus.containers.track import Track
from mingus.containers.bar import Bar
from mingus.containers.ins
|
trument import Instrument, Piano, Guitar
import unittest
class test_Track(unittest.TestCase):
def setUp(self):
self.i = Track(Instrument())
self.p = Track(Piano())
self.g = Track(Guitar())
self.tr = Track()
def test_add(self):
pass
def test_transpose(self):
t = Track()
t + 'C'
t + 'E'
t.transpose('3')
s = Track()
s + 'E'
s + 'G#'
self.assertEqual(s, t)
def suite():
|
return unittest.TestLoader().loadTestsFromTestCase(test_Track)
|
madsryvang/LVsbp
|
python/sbp/navigation.py
|
Python
|
lgpl-3.0
| 28,612
| 0.009681
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Fergus Noble <fergus@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Geodetic navigation messages reporting GPS time, position, velocity,
and baseline position solutions. For position solutions, these
messages define several different position solutions: single-point
(SPP), RTK, and pseudo-absolute position solutions.
The SPP is the standalone, absolute GPS position solution using only
a single receiver. The RTK solution is the differential GPS
solution, which can use either a fixed/integer or floating carrier
phase ambiguity. The pseudo-absolute position solution uses a
user-provided, well-surveyed base station position (if available)
and the RTK solution in tandem.
"""
from construct import *
import json
from sbp.msg import SBP, SENDER_ID
from sbp.utils import fmt_repr, exclude_fields, walk_json_dict, containerize, greedy_string
# Automatically generated from piksi/yaml/swiftnav/sbp/navigation.yaml with generate.py.
# Please do not hand edit!
SBP_MSG_GPS_TIME = 0x0100
class MsgGPSTime(SBP):
"""SBP class for message MSG_GPS_TIME (0x0100).
You can have MSG_GPS_TIME inherent its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This message reports the GPS time, representing the time since
the GPS epoch began on midnight January 6, 1980 UTC. GPS time
counts the weeks and seconds of the week. The weeks begin at the
Saturday/Sunday transition. GPS week 0 began at the beginning of
the GPS time scale.
Within each week number, the GPS time of the week is between
between 0 and 604800 seconds (=60*60*24*7). Note that GPS time
does not accumulate leap seconds, and as of now, has a small
offset from UTC. In a message stream, this message precedes a
set of other navigation messages referenced to the same time
(but lacking the ns field) and indicates a more precise time of
these messages.
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
wn : int
GPS week number
tow : int
GPS time of week rounded to the nearest millisecond
ns : int
Nanosecond residual of millisecond-rounded TOW (ranges
from -500000 to 500000)
flags : int
Status flags (reserved)
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = Struct("MsgGPSTime",
ULInt16('wn'),
ULInt32('tow'),
SLInt32('ns'),
ULInt8('flags'),)
__slots__ = [
'wn',
'tow',
'ns',
'flags',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgGPSTime,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgGPSTime, self).__init__()
self.msg_type = SBP_MSG_GPS_TIME
self.sender = kwargs.pop('sender', SENDER_ID)
self.wn = kwargs.pop('wn')
self.tow = kwargs.pop('tow')
self.ns = kwargs.pop('ns')
self.flags = kwargs.pop('flags')
def __repr__(self):
return fmt_repr(self)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgGPSTime._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgGPSTime._parser.build(c)
return self.pack()
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
sbp = SBP.from_json_dict(d)
return MsgGPSTime(sbp)
def to_json_dict(self):
self.to_binary()
d = super( MsgGPSTime, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_DOPS = 0x0206
class MsgDops(SBP):
"""SBP class for message MSG_DOPS (0x0206).
You can have MSG_DOPS inherent its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
This dilution of precision (DOP) message describes the effect of
navigation satellite geometry on positional measurement
precision.
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
gdop : int
Geometric Dilution of Precision
pdop : int
Position Dilution of Precision
tdop : int
Time Dilution of Precision
hdop : int
Horizontal Dilution of Precision
vdop : int
Vertical Dilution of Precision
sender : int
Optional sender ID, defaults to SENDER_ID (see sbp/msg.py).
"""
_parser = Struct("MsgDops",
ULInt32('tow'),
ULInt16('gdop'),
ULInt16('pdop'),
ULInt16('tdop'),
ULInt16('hdop'),
ULInt16('vdop'),)
__slots__ = [
'tow',
'gdop',
'pdop',
'tdop',
'hdop',
'vdop',
]
def __init__(self, sbp=None, **kwargs):
if sbp:
super( MsgDops,
self).__init__(sbp.msg_type, sbp.sender, sbp.length,
sbp.payload, sbp.crc)
self.from_binary(sbp.payload)
else:
super( MsgDops, self).__init__()
self.msg_type = SBP_MSG_DOPS
self.sender = kwargs.pop('sender', SENDER_ID)
self.tow = kwargs.pop('tow')
self.gdop = kwargs.pop('gdop')
self.pdop = kwargs.pop('pdop')
self.tdop = kwargs.pop('tdop')
self.hdop = kwargs.pop('hdop')
self.vdop = kwargs.pop('vdop')
def __repr__(self):
return fmt_repr(self)
def from_binary(self, d):
"""Given a binary payload d, update the appropriate payload fields of
the message.
"""
p = MsgDops._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n))
def to_binary(self):
"""Produce a framed/packed SBP message.
"""
c = containerize(exclude_fields(self))
self.payload = MsgDops._parser.build(c)
return self.pack()
@staticmethod
def from_json(s):
"""Given a JSON-encoded string s, build a message object.
"""
d = json.loads(s)
sbp = SBP.from_json_dict(d)
return MsgDops(sbp)
def to_json_dict(self):
self.to_binary()
d = super( MsgDops, self).to_json_dict()
j = walk_json_dict(exclude_fields(self))
d.update(j)
return d
SBP_MSG_POS_ECEF = 0x0200
class MsgPosECEF(SBP):
"""SBP class for message MSG_POS_ECEF (0x0200).
You can have MSG_POS_ECEF inherent its fields directly
from an inherited SBP object, or construct it inline using a dict
of its fields.
The position solution message reports absolute Earth Centered
Earth Fixed (ECEF) coordinates and the status (single point vs
pseudo-absolute RTK) of the position solution. If the rover
receiver knows the surveyed position of the base station and has
an RTK solution, this reports a pseudo-absolute position
solution using the base st
|
ation position and the rover's RTK
baseline vector. The full GPS time is given by the preceding
MSG_GPS_TIME with the matching time-of-week (tow).
Parameters
----------
sbp : SBP
SBP parent object to inherit from.
tow : int
GPS Time of Week
x : double
ECEF X coordinate
y : double
ECEF Y coordinate
z : double
ECEF Z coordinate
accuracy : int
Position accuracy estimate (not implemented). Defaults
to 0.
n_sats
|
: int
Number of satellites used in solution
flags : int
Status flags
sender : int
Optional sender ID, defaults to SENDER_ID (see sb
|
webofthings/wot-a-mashup
|
cs/proxy/node_modules/dtrace-provider/build/c4che/Release.cache.py
|
Python
|
gpl-3.0
| 1,407
| 0
|
AR = '/usr/bin/ar'
ARFLAGS = 'rcs'
CCFLAGS = ['-g']
CCFLAGS_MACBUNDLE = ['-fPIC']
CCFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CC_VERSION = ('4', '4', '3')
COMPILER_CXX = 'g++'
CPP = '/usr/bin/cpp'
CPPFLAGS_NODE = ['-D_GNU_SOURCE']
CPPPATH_NODE = '/usr/local/include/node'
CPPPATH_ST = '-I%s'
CXX = ['/usr/bin/g++']
CXXDEFINES_ST = '-D%s'
CXXFLAGS = ['-g']
CXXFLAGS_DEBUG = ['-g
|
']
CXXFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CXXFLAGS_RELEASE = ['-O2']
CXXLNK_SRC_F = ''
CXXLNK_TGT_F = ['-o', '']
CXX_NAME = 'gcc'
CXX_SRC_F = ''
CXX_TGT_F = ['-c', '-o', '']
DEST_BINFMT = 'elf'
DEST_CPU = 'x86_64'
DEST_OS = 'linux'
FULLSTATIC_MARKER = '-static'
LIBDIR = '/usr/li
|
b/nodejs:/usr/share/javascript'
LIBPATH_NODE = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINK_CXX = ['/usr/bin/g++']
NODE_PATH = '/usr/lib/nodejs:/usr/share/javascript'
PREFIX = '/usr/local'
PREFIX_NODE = '/usr/local'
RANLIB = '/usr/bin/ranlib'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = '-Wl,-Bdynamic'
SONAME_ST = '-Wl,-h,%s'
STATICLIBPATH_ST = '-L%s'
STATICLIB_MARKER = '-Wl,-Bstatic'
STATICLIB_ST = '-l%s'
macbundle_PATTERN = '%s.bundle'
program_PATTERN = '%s'
shlib_CXXFLAGS = ['-fPIC', '-DPIC']
shlib_LINKFLAGS = ['-shared']
shlib_PATTERN = 'lib%s.so'
staticlib_LINKFLAGS = ['-Wl,-Bstatic']
staticlib_PATTERN = 'lib%s.a'
|
calico/basenji
|
tests/test_train.py
|
Python
|
apache-2.0
| 6,235
| 0.013953
|
#!/usr/bin/env python
from optparse import OptionParser
import glob
import os
import shutil
import unittest
import numpy as np
import pandas as pd
from scipy.stats import mannwhitneyu
from scipy.stats import ttest_ind
import slurm
class TestTrain(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.params_file = 'train/params.json'
cls.data_dir = 'train/data'
cls.ref_dir = 'train/ref'
cls.iterations = 4
cls.basenji_path = '/home/drk/code/basenji2/bin'
cls.conda_env = 'tf1.15-gpu2'
cls.queue = 'gtx1080ti'
def test_train(self):
exp_dir = 'train/exp'
if os.path.isdir(exp_dir):
shutil.rmtree(exp_dir)
os.mkdir(exp_dir)
################################################################
# train
################################################################
jobs = []
for i in range(self.iterations):
it_dir = '%s/%d' % (exp_dir, i)
os.mkdir(it_dir)
# basenji train
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % self.conda_env
basenji_cmd += ' %s/basenji_train.py' % self.basenji_path
basenji_cmd += ' -o %s/train' % it_dir
basenji_cmd += ' %s' % self.params_file
basenji_cmd += ' %s' % self.data_dir
basenji_job = slurm.Job(basenji_cmd,
name='train%d' % i,
out_file='%s/train.out'%it_dir,
err_file='%s/train.err'%it_dir,
queue=self.queue,
|
cpu=1,
gpu=1,
mem=23000,
|
time='2-00:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# test check
################################################################
jobs = []
for i in range(self.iterations):
it_dir = '%s/%d' % (exp_dir, i)
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % self.conda_env
basenji_cmd += ' %s/basenji_test.py' % self.basenji_path
basenji_cmd += ' -o %s/test_train' % it_dir
basenji_cmd += ' --tfr "train-*.tfr"'
basenji_cmd += ' %s' % self.params_file
basenji_cmd += ' %s/train/model_check.h5' % it_dir
basenji_cmd += ' %s' % self.data_dir
basenji_job = slurm.Job(basenji_cmd,
name='test%d' % i,
out_file='%s/test_train.out'%it_dir,
err_file='%s/test_train.err'%it_dir,
queue=self.queue,
cpu=1,
gpu=1,
mem=23000,
time='1:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# test best
################################################################
jobs = []
for i in range(self.iterations):
it_dir = '%s/%d' % (exp_dir, i)
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % self.conda_env
basenji_cmd += ' %s/basenji_test.py' % self.basenji_path
basenji_cmd += ' -o %s/test' % it_dir
basenji_cmd += ' %s' % self.params_file
basenji_cmd += ' %s/train/model_best.h5' % it_dir
basenji_cmd += ' %s' % self.data_dir
basenji_job = slurm.Job(basenji_cmd,
name='test%d' % i,
out_file='%s/test.out'%it_dir,
err_file='%s/test.err'%it_dir,
queue=self.queue,
cpu=1,
gpu=1,
mem=23000,
time='1:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# compare checkpoint on training set
################################################################
ref_cors = []
for acc_file in glob.glob('%s/*/test_train/acc.txt' % self.ref_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
ref_cors.append(acc_df.pearsonr.mean())
exp_cors = []
for acc_file in glob.glob('%s/*/test_train/acc.txt' % exp_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
exp_cors.append(acc_df.pearsonr.mean())
_, mwp = mannwhitneyu(ref_cors, exp_cors, alternative='two-sided')
_, tp = ttest_ind(ref_cors, exp_cors)
print('\nTrain:')
print('Reference PearsonR: %.4f (%.4f)' % (np.mean(ref_cors), np.std(ref_cors)))
print('Experiment PearsonR: %.4f (%.4f)' % (np.mean(exp_cors), np.std(exp_cors)))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
# self.assertGreater(mwp, 0.05)
# self.assertGreater(tp, 0.05)
################################################################
# compare best on test set
################################################################
ref_cors = []
for acc_file in glob.glob('%s/*/test/acc.txt' % self.ref_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
ref_cors.append(acc_df.pearsonr.mean())
exp_cors = []
for acc_file in glob.glob('%s/*/test/acc.txt' % exp_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
exp_cors.append(acc_df.pearsonr.mean())
_, mwp = mannwhitneyu(ref_cors, exp_cors, alternative='two-sided')
_, tp = ttest_ind(ref_cors, exp_cors)
print('\nTest:')
print('Reference PearsonR: %.4f (%.4f)' % (np.mean(ref_cors), np.std(ref_cors)))
print('Experiment PearsonR: %.4f (%.4f)' % (np.mean(exp_cors), np.std(exp_cors)))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
# self.assertGreater(mwp, 0.05)
# self.assertGreater(tp, 0.05)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
unittest.main()
|
andrewgross/pyrelic
|
tests/unit/test_base_client.py
|
Python
|
mit
| 1,927
| 0
|
import requ
|
ests
import httpretty
from nose.tools import nottest
from pyrelic import BaseClient
@nottest # Skip until we can properly simulate timeouts
@httpretty.activate
def test_make_request_timeout():
"""
Remote calls should time out
"""
httpretty.register_uri(httpretty.GET, "www.example.com",
body=None,
)
# When I make an API request and receive no response
c = Ba
|
seClient()
# Then I should raise a NewRelicApiException
c._make_request.when.called_with(requests.get,
"http://www.example.com",
timeout=0.05,
retries=1)\
.should.throw(requests.RequestException)
@httpretty.activate
def test_make_request_non_200():
"""
Bad HTTP Responses should throw an error
"""
httpretty.register_uri(httpretty.GET, "http://foobar.com",
body="123", status=400)
# When I make an API request and receive a 400
c = BaseClient()
# Then I should raise the appropriate requests exception
c._make_request.when.called_with(requests.get,
"http://foobar.com")\
.should.throw(requests.RequestException)
def test_client_proxy_string():
"""
Base Client should parse proxy strings
"""
# When I create a client with a proxy as a string
proxy = "baz:1234"
c = BaseClient(proxy=proxy)
# Then the Client should create the proxy config as a dictionary
c.proxy.should.equal({"http": proxy, "https": proxy})
def test_client_proxy_dict():
"""
Base Client should parse proxy dicts
"""
# When I create a client with a proxy as a dict
proxy = {"baz": "1234"}
c = BaseClient(proxy=proxy)
# Then the Client should create the proxy config as a dictionary
c.proxy.should.equal(proxy)
|
asconz/SystematicPrioritization
|
application/views/Delegates.py
|
Python
|
mit
| 6,089
| 0.002135
|
#! /usr/bin/python
######################
# views\Delegates.py #
######################
from PyQt5.QtWidgets import QStyleOptionViewItem, QDateEdit
from PyQt5.QtSql import QSqlRelationalDelegate
from PyQt5.QtCore import Qt, QDate
(TASK, WEIGHT, CATEGORY, DESCRIPTION, IMPORTANCE, URGENCY, DIFFICULTY,
DT_ADDED, DATE_DUE, DT_COMPLETE, STATUS, DT_ARCHIVED, PRIORITY_ID) = range(13)
class PriorityDelegate(QSqlRelationalDelegate):
def __init__(self, parent=None):
super(PriorityDelegate, self).__init__(parent)
def paint(self, painter, option, index):
""" This method will be called every time a particular cell is in view and that view
is changed in some way. We ask the delegates parent (in this case a table view)
if the index in question (the table cell) already has a widget associated with it.
If not, create one with the text for this index and connect its clicked signal
to a slot in the parent view so we are notified when its used and can do something."""
my_option = QStyleOptionViewItem(option)
if index.column() in [DT_ADDED, DATE_DUE, DT_COMPLETE, DT_ARCHIVED]:
my_option.displayAlignment |= (Qt.AlignRight | Qt.AlignVCenter)
# if index.column() == STATUS:
# my_option.displayAlignment |= (Qt.AlignJustify | Qt.AlignVCenter)
QSqlRelationalDelegate.paint(self, painter, my_option, index)
def createEditor(self, parent, option, index):
if index.column() in [TASK, WEIGHT, DT_ADDED, DT_COMPLETE, STATUS]:
return # Read-only
if index.column() == DATE_DUE:
editor = QDateEdit(parent)
editor.setMinimumDate(QDate.currentDate())
editor.setDisplayFormat("yyyy-MM-dd")
editor.setCalendarPopup(True)
editor.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
return editor
else:
return QSqlRelationalDelegate.createEditor(self, parent, option, index)
def setEditorData(self, editor, index):
if index.column() == DATE_DUE:
date = index.model().data(index, Qt.DisplayRole)
editor.setDate(QDate.fromString(date, 'yyyy-MM-dd'))
else:
QSqlRelationalDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
if index.column() == DATE_DUE:
model.setData(index, editor.date())
else:
QSqlRelationalDelegate.setModelData(self, editor, model, index)
class DetailDelegate(QSqlRelationalDelegate):
def __init__(self, parent=None):
super(DetailDelegate, self).__init__(parent)
def paint(self, painter, option, index):
my_option = QStyleOptionViewItem(option)
QSqlRelationalDelegate.paint(self, painter, my_option, index)
def createEditor(self, parent, option, index):
if index.column() == DESCRIPTION:
return # todo - DESCRIPTION is read-only - consider changing once grid-submission enabled
else:
return QSqlRelationalDelegate.createEditor(self, parent, option, index)
def setEditorData(self, editor, index):
QSqlRelationalDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
QSqlRelationalDelegate.setModelData(self, editor, model, index)
# class ButtonDelegate(QItemDelegate):
# """ A delegate that places a fully functioning QPushButton in every
# cell of the column to which it's applied """
#
# def __init__(self, parent):
# """ The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)"""
# QItemDelegate.__init__(self, parent)
#
# def paint(self, painter, option, index):
# """ This method will be called every time a particular cell is in view and that view
# is changed in some way. We ask the delegates parent (in this case a table view)
# if the index in question (the table cell) already has a widget associated with it.
# If not, create one with the text for this index and connect its clicked signal
# to a slot in the parent view so we are notified when its used and can do something."""
# if not self.parent().indexWidget(index):
# self.parent().setIndexWidget(
# index, QPushButton(index.data(),
# self.parent(),
# clicked=self.parent().cellButtonClicked))
#
# class ViewDelegate(QtWidgets.QItemDelegate):
# def __init__(self, parent, table):
# super(ViewDelegate, self).__init__(parent)
# self.table = table
#
# def sizeHint(self, option, index):
# # Get full viewport size
# table_size = self.viewport().size()
# gw = 1 # Grid line width
# rows = self.rowCount() or 1
# cols = self.columnCount() or 1
# width = (table_size.width() - (gw * (cols - 1))) / cols
# height = (table_size.height() - (gw * (rows - 1))) / rows
# return QtCore
|
.QSize(width, height)
#
# class Window(QtGui.QWidget):
# def __init__(s
|
elf, rows, columns):
# super(Window, self).__init__()
# self.lay = QtGui.QVBoxLayout()
# self.setLayout(self.lay)
# self.table = QtGui.QTableWidget(rows, columns, self)
# self.table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
# self.table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
# self.lay.addWidget(self.table)
# self.delegate = MyDelegate(self, self.table)
# self.table.setItemDelegate(self.delegate)
#
# def showEvent(self, event):
# super(Window, self).showEvent(event)
# self.resizeTable()
#
# def resizeTable(self):
# self.table.resizeRowsToContents()
# self.table.resizeColumnsToContents()
#
# def resizeEvent(self, event):
# super(Window, self).resizeEvent(event)
# self.resizeTable()
|
matthewoliver/swift
|
test/functional/s3api/test_presigned.py
|
Python
|
apache-2.0
| 9,841
| 0
|
# Copyright (c) 2016 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from swift.common.middleware.s3api.etree import fromstring
import test.functional as tf
from test.functional.s3api import S3ApiBase
from test.functional.s3api.utils import get_error_code, get_error_msg
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestS3ApiPresignedUrls(S3ApiBase):
def test_bucket(self):
bucket = 'test-bucket'
req_objects = ('object', 'object2')
max_bucket_listing = tf.cluster_info['s3api'].get(
'max_bucket_listing', 1000)
# GET Bucket (Without Object)
status, _junk, _junk = self.conn.make_request('PUT', bucket)
self.assertEqual(status, 200)
url, headers = self.conn.generate_url_and_headers('GET', bucket)
resp = requests.get(url, headers=headers)
self.assertEqual(resp.status_code, 200,
'Got %d %s' % (resp.status_code, resp.content))
self.assertCommonResponseHeaders(resp.headers)
self.assertIsNotNone(resp.headers['content-type'])
self.assertEqual(resp.headers['content-length'],
str(len(resp.content)))
elem = fromstring(resp.content, 'ListBucketResult')
self.assertEqual(elem.find('Name').text, bucket)
self.assertIsNone(elem.find('Prefix').text)
self.assertIsNone(elem.find('Marker').text)
self.assertEqual(elem.find('MaxKeys').text,
str(max_bucket_listing))
self.assertEqual(elem.find('IsTruncated').text, 'false')
objects = elem.findall('./Contents')
self.assertEqual(list(objects), [])
# GET Bucket (With Object)
for obj in req_objects:
status, _junk, _junk = self.conn.make_request('PUT', bucket, obj)
self.assertEqual(
status, 200,
'Got %d response while creating %s' % (status, obj))
resp = requests.get(url, headers=headers)
self.assertEqual(resp.status_code, 200,
|
'Got %d %s' % (resp.status_code, resp.content))
self.assertCommonResponseHeaders(resp.headers)
self.assertIsNotNone(resp.headers['content-type'])
self.assertEqual(resp.headers['content-length'],
str(len(resp.content)))
elem = fromstring(resp.content, 'ListBucketResult')
self.assertEqual(elem.find('Name').text, bucket)
self.assertIsNone(el
|
em.find('Prefix').text)
self.assertIsNone(elem.find('Marker').text)
self.assertEqual(elem.find('MaxKeys').text,
str(max_bucket_listing))
self.assertEqual(elem.find('IsTruncated').text, 'false')
resp_objects = elem.findall('./Contents')
self.assertEqual(len(list(resp_objects)), 2)
for o in resp_objects:
self.assertIn(o.find('Key').text, req_objects)
self.assertIsNotNone(o.find('LastModified').text)
self.assertRegexpMatches(
o.find('LastModified').text,
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
self.assertIsNotNone(o.find('ETag').text)
self.assertEqual(o.find('Size').text, '0')
self.assertIsNotNone(o.find('StorageClass').text is not None)
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(o.find('Owner/DisplayName').text,
self.conn.user_id)
# DELETE Bucket
for obj in req_objects:
self.conn.make_request('DELETE', bucket, obj)
url, headers = self.conn.generate_url_and_headers('DELETE', bucket)
resp = requests.delete(url, headers=headers)
self.assertEqual(resp.status_code, 204,
'Got %d %s' % (resp.status_code, resp.content))
def test_expiration_limits(self):
if os.environ.get('S3_USE_SIGV4'):
self._test_expiration_limits_v4()
else:
self._test_expiration_limits_v2()
def _test_expiration_limits_v2(self):
bucket = 'test-bucket'
# Expiration date is too far in the future
url, headers = self.conn.generate_url_and_headers(
'GET', bucket, expires_in=2 ** 32)
resp = requests.get(url, headers=headers)
self.assertEqual(resp.status_code, 403,
'Got %d %s' % (resp.status_code, resp.content))
self.assertEqual(get_error_code(resp.content),
'AccessDenied')
self.assertIn('Invalid date (should be seconds since epoch)',
get_error_msg(resp.content))
def _test_expiration_limits_v4(self):
bucket = 'test-bucket'
# Expiration is negative
url, headers = self.conn.generate_url_and_headers(
'GET', bucket, expires_in=-1)
resp = requests.get(url, headers=headers)
self.assertEqual(resp.status_code, 400,
'Got %d %s' % (resp.status_code, resp.content))
self.assertEqual(get_error_code(resp.content),
'AuthorizationQueryParametersError')
self.assertIn('X-Amz-Expires must be non-negative',
get_error_msg(resp.content))
# Expiration date is too far in the future
for exp in (7 * 24 * 60 * 60 + 1,
2 ** 63 - 1):
url, headers = self.conn.generate_url_and_headers(
'GET', bucket, expires_in=exp)
resp = requests.get(url, headers=headers)
self.assertEqual(resp.status_code, 400,
'Got %d %s' % (resp.status_code, resp.content))
self.assertEqual(get_error_code(resp.content),
'AuthorizationQueryParametersError')
self.assertIn('X-Amz-Expires must be less than 604800 seconds',
get_error_msg(resp.content))
# Expiration date is *way* too far in the future, or isn't a number
for exp in (2 ** 63, 'foo'):
url, headers = self.conn.generate_url_and_headers(
'GET', bucket, expires_in=2 ** 63)
resp = requests.get(url, headers=headers)
self.assertEqual(resp.status_code, 400,
'Got %d %s' % (resp.status_code, resp.content))
self.assertEqual(get_error_code(resp.content),
'AuthorizationQueryParametersError')
self.assertEqual('X-Amz-Expires should be a number',
get_error_msg(resp.content))
def test_object(self):
bucket = 'test-bucket'
obj = 'object'
status, _junk, _junk = self.conn.make_request('PUT', bucket)
self.assertEqual(status, 200)
# HEAD/missing object
head_url, headers = self.conn.generate_url_and_headers(
'HEAD', bucket, obj)
resp = requests.head(head_url, headers=headers)
self.assertEqual(resp.status_code, 404,
'Got %d %s' % (resp.status_code, resp.content))
# Wrong verb
resp = requests.get(head_url)
self.assertEqual(resp.status_code, 403,
'Got %d %s' % (resp.status_code, resp.content))
self.assertEqual(get_error_code(resp.content),
'SignatureDoesNotMatch')
# PUT empty object
put_url, headers = self.conn.generate_url_and_headers(
'PUT', bucket, obj)
resp = requests.put(put_url, data='', headers=headers)
self.assertEqual(r
|
microelly2/geodata
|
geodat/my_xmlparser.py
|
Python
|
lgpl-3.0
| 9,730
| 0.069476
|
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/Survey.xml'
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/bratton farm-2.0.xml'
# fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/test.xml'
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/Portsmouth Heights.xml'
fn='/home/thomas/Dokumente/freecad_buch/D006_landxml/FreeCAD_Document.xml'
# demo files
# http://www.landxml.org/webapps/LandXMLSamples.aspx
# http://landxml.org/schema/LandXML-2.0/samples/Carlson Software/corridor from CD3-2.0.xml
import geodat
import re
from geodat.say import say
import PySide
from PySide import QtGui
import FreeCADGui as Gui
import FreeCAD
class node():
def __init__(self,typ):
# print("erzuegen node,type ",typ)
self.typ=typ
self.params={}
self.content=[]
def getParam(self,param):
return self.params[param]
def getNodes(self,typ):
ns=[]
for c in self.content:
if c.typ==typ:
ns += [c]
return ns
def addContent(self,c):
self.content += [c]
def __str__(self):
return self.typ
def getiterator(self,typ):
rc=[]
for obj in self.content:
if obj.typ==typ:
rc += [obj]
rc += obj.getiterator(typ)
return rc
def parseParams(string):
params={}
s=string
while s!="":
res = re.search(r"(\S+)=\"([^\"]*)\"\s+(\S.*)", s)
if res != None:
assert len(res.groups())==3
k,v,s=res.group(1),res.group(2),res.group(3)
params[k]=v
continue
res = re.search(r"(\S+)=\"(.*)\"", s)
if res != None:
assert len(res.groups())==2
k,v,s=res.group(1),res.group(2),""
params[k]=v
continue
else:
raise Exception("parse Params Fehler:"+ s)
s=""
return params
def getData(fn,pb=None):
if pb==None:
pb=QtGui.QProgressBar()
pb.show()
stack=[0,0]*4
stackpointer=-1
objs=[]
say("Read data from cache file ...")
say(fn)
f=open(fn,"r")
content=f.readlines()
c2=[]
cl=len(content)
# FreeCAD File hack
if content[2].
|
startswith(" FreeCAD Document, see http://www.freecadweb.org"):
content=content[4:]
cl=len(content)
say(cl)
i=-1
pb.setMaximum(cl)
while i<cl-1:
pb.setValue(i)
i += 1
line=content[i].strip()
j=0
while re.search(r">\s*$", line) == None and j<60:
i += 1
j += 1
line += content[i]
c2 +=[line]
line=''
content=c2
pb.setMaximum(len(content))
for lc,line in enumerate(content):
if "<TextureHexS
|
tring>" in line:
say ("break A")
continue
pb.setValue(lc)
# if lc%100 == 0:
# say(lc)
# Gui.updateGui()
# if stackpointer != -1:
# print (res.groups())
# print (stackpointer)
# print ("\n-------------NEXT:")
# print(line)
# print ("--- PARSE IT------------------------")
if re.search(r"^\s*$",line):
continue
# ein satz
res = re.search(r"^\s*<(\S+)\s+([^<]*)/>\s*$", line)
if res != None:
# print ("complete! ",res.groups())
assert len(res.groups())==2
typ=res.group(1)
obj=node(typ)
paramstring=res.group(2)
obj.params=parseParams(paramstring)
objs += [obj]
if stackpointer != -1:
stack[stackpointer].content += [obj]
# print stack[stackpointer]
# for c in stack[stackpointer].content:
# print c,",",
# print
continue
res = re.search(r"^\s*<(\S+)\s+([^<]*)>\s*$", line)
if res != None:
# print ("!start! ",res.groups())
assert len(res.groups())==2
typ=res.group(1)
obj=node(typ)
paramstring=res.group(2)
obj.params=parseParams(paramstring)
objs += [obj]
if stackpointer != -1:
stack[stackpointer].content += [obj]
# for c in stack[stackpointer].content:
# print c,
stackpointer += 1
stack[stackpointer]=obj
continue
res = re.search(r"^\s*</([^<]*)>\s*$", line)
if res != None:
# print ("!ende---------STACKPOINTER down! ",res.groups())
assert len(res.groups())==1
stackpointer -= 1
continue
res = re.search(r"^\s*<([^<\s]*)>\s*$", line)
if res != None:
# print ("!simple start! ",res.groups())
assert len(res.groups())==1
typ=res.group(1)
obj=node(typ)
if stackpointer != -1:
stack[stackpointer].content += [obj]
stackpointer += 1
stack[stackpointer] = obj
continue
#auf und zu
res = re.search(r"^\s*<(\S+)\s*([^<]*)>(.*)</([^<]+)>\s*$", line)
if res != None:
# print ("!alles! ",res.groups())
assert len(res.groups())==4
typ=res.group(1)
obj=node(typ)
paramstring=res.group(2)
obj.params=parseParams(paramstring)
obj.text=res.group(3)
objs += [obj]
if stackpointer != -1:
stack[stackpointer].content += [obj]
# for c in stack[stackpointer].content:
# print c,
# stackpointer += 1
# stack[stackpointer]=obj
continue
raise Exception("unerwartet :" +line +":")
# x = re.findall('<([^<]*)>', line)
# for xl in x:
# print(xl)
# say("done getit--------")
FreeCAD.stackpointer=stackpointer
FreeCAD.stack=stack
FreeCAD.objs=objs
return stack[0]
if 0:
#----------------------------
# import landxml
pb=QtGui.QProgressBar()
pb.show()
# progressbar.setValue(0)
#import geodat.my_xmlparser
#reload (geodat.my_xmlparser)
from say import *
# tree=geodat.my_xmlparser.getData(fn)
tree=getData(fn)
# tree=FreeCAD.stack[0]
say("import done")
Gui.updateGui()
Ps={}
pnodes=tree.getiterator('P')
pb.setMaximum(len(pnodes))
for i,element in enumerate(pnodes):
pb.setValue(i)
# say((element.params,element.text))
_coords=element.text.split(' ')
Ps[element.params['id']]=FreeCAD.Vector(float(_coords[0]),float(_coords[1]),float(_coords[2]))
import Points
ptsa=Ps.values()
Points.show(Points.Points(ptsa))
App.activeDocument().recompute()
Gui.SendMsgToActiveView("ViewFit")
Gui.updateGui()
if 0:
for element in tree.getiterator('PntList3D')[:4]:
say((element.params,element.text))
say("Some Faces")
for element in tree.getiterator('F')[:4]:
say((element.params,element.text))
say("BREAKLINES")
for element in tree.getiterator('Breakline')[:3]:
# say((element.typ,element.params))
# say(element.content[0].text)
_coords=element.content[0].text.split(' ')
coords=np.array([float(a) for a in _coords])
coords=coords.reshape(len(_coords)/3,3)
pts=[FreeCAD.Vector(p) for p in coords]
Part.show(Part.makePolygon(pts))
App.ActiveDocument.ActiveObject.Label=element.params['desc']
Gui.updateGui()
for element in tree.getiterator('Boundary')[:10]:
say((element.typ,element.params))
# say("relations")
# for element in tree.getiterator('relation'):
# say(element.params)
1/0
col=[]
for element in tree.getiterator('F'):
say((element.params,element.text))
ixs=element.text.split(' ')
ptsf=[Ps[ix] for ix in ixs]
ptsf += [ptsf[0]]
col +=[Part.makePolygon(ptsf)]
Part.show(Part.Compound(col))
def showFace(rbf,rbf2,x,y,gridsize,shapeColor,bound):
import Draft
makeLoft=False
grids=gridsize
ws=[]
pts2=[]
xi, yi = np.linspace(np.min(x), np.max(x), grids), np.linspace(np.min(y), np.max(y), grids)
for ix in xi:
points=[]
for iy in yi:
iz=float(rbf(ix,iy))
#---------------------- special hacks #+#
if bound>0:
if iz > bound: iz = bound
if iz < -bound: iz = -bound
points.append(FreeCAD.Vector(iy,ix,iz))
if makeLoft:
w=Draft.makeWire(points,closed=False,face=False,support=None)
ws.append(w)
pts2.append(points)
if makeLoft:
ll=FreeCAD.activeDocument().addObject('Part::Loft','elevation')
ll.Sections=ws
ll.Ruled = True
ll.ViewObject.ShapeColor = shapeColor
ll.ViewObject.LineColor = (0.00,0.67,0.00)
for w in ws:
w.ViewObject.Visibility=False
ll.Label="Interpolation Gitter " + str(grids)
bs=Part.BSplineSurface()
bs.interpolate(pts2)
Part.show(bs.toShape())
import scipy.interpolate
def interpolate(x,y,z, gridsize,mode='thin_plate',rbfmode=True,shape=None):
mode=str(mode)
grids=gridsize
dx=np.max(x)-np.min(x)
dy=np.max(y)-np.min(y)
if dx>dy:
gridx=grids
gridy=int(round(dy/dx*grids))
else:
gridy=grids
gridx=int(round(dx/dy*grids))
if shape != None:
(gridy,gridx)=shape
xi, yi = np.linspace(np.min(x), np.max(x), gridx), np.linspace(np.min(y), np.max(y), gridy)
xi, yi = np.meshgri
|
auntieNeo/asterisk-testsuite
|
tests/channels/pjsip/transfers/blind_transfer/callee_refer_only/transfer.py
|
Python
|
gpl-2.0
| 7,666
| 0.00013
|
#!/usr/bin/env python
'''
Copyright (C) 2014, Digium, Inc.
John Bigelow <jbigelow@digium.com>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
'''
import logging
import pjsua as pj
from twisted.internet import reactor
LOGGER = logging.getLogger(__name__)
URI = ["sip:bob@127.0.0.1", "sip:bob_two@127.0.0.1", "sip:charlie@127.0.0.1"]
ITERATION = 0
class CharlieCallback(pj.AccountCallback):
"""Derived callback class for Charlie's account."""
def __init__(self, controller, account=None):
pj.AccountCallback.__init__(self, account)
self.controller = controller
self.charlie_call = None
def on_incoming_call2(self, call, msg):
self.charlie_call = call
LOGGER.info("Incoming call for Charlie '%s' from '%s'." %
(call.info().uri, call.info().remote_uri))
if ITERATION > 0:
referred_by_hdr = "Referred-By: <sip:bob@127.0.0.1;ob>"
if (referred_by_hdr not in msg.msg_info_buffer):
LOGGER.warn("Expected header not found: '%s'" %
referred_by_hdr)
self.controller.test_object.set_passed(False)
self.controller.test_object.stop_reactor()
inbound_cb = CharliePhoneCallCallback(call)
call.set_callback(inbound_cb)
call.answer(200)
reactor.callLater(1, self.hangup_call)
def hangup_call(self):
"""Hang up the call."""
LOGGER.info("Hanging up Charlie")
self.charlie_call.hangup(code=200, reason="Q.850;cause=16")
class BobCallback(pj.AccountCallback):
"""Derived callback class for Bob's account."""
def __init__(self, account=None):
pj.AccountCallback.__init__(self, account)
self.bob_call = None
self.bob_phone_call = None
def on_incoming_call(self, call):
self.bob_call = call
LOGGER.info("Incoming call for Bob '%s' from '%s'." %
(call.info().uri, call.info().remote_uri))
self.bob_phone_call = BobPhoneCallCallback(call)
call.set_callback(self.bob_phone_call)
call.answer(200)
class AlicePhoneCallCallback(pj.CallCallback):
"""Derived callback class for Alice's call."""
def __init__(self, call=None):
pj.CallCallback.__init__(self, call)
def on_state(self):
log_call_info(self.call.info())
if self.call.info().state == pj.CallState.DISCONNECTED:
LOGGER.info("Call disconnected: '%s'" % self.call)
class BobPhoneCallCallback(pj.CallCallback):
"""Derived callback class for Bob's call."""
def __init__(self, call=None):
pj.CallCallback.__init__(self, call)
def on_state(self):
log_call_info(self.call.info())
if self.call.info().state == pj.CallState.CONFIRMED:
LOGGER.info("Call is up: '%s'." % self.call)
if self.call.info().state == pj.CallState.DISCONNECTED:
LOGGER.info("Call disconnected: '%s'" % self.call)
def transfer_call(self):
"""Transfer the call."""
try:
LOGGER.info("Attempting to blind transfer the call.")
self.call.transfer(URI[2])
LOGGER.info("The call is %s" % self.call)
except:
LOGGER.warn("Failed to transfer the call! Retrying...")
reactor.callLater(.2, self.transfer_call)
def on_transfer_status(self, code, reason, final, cont):
log_call_info(self.call.info())
if code == 200 and reason == "OK" and final == 1 and cont == 0:
LOGGER.info("Transfer target answered the call.")
LOGGER.debug("Call uri: '%s'; remote uri: '%s'" %
(self.call.info().uri,
self.call.info().remote_uri))
LOGGER.info("Hanging up Bob")
self.call.hangup(code=200, reason="Q.850;cause=16")
return cont
class CharliePhoneCallCallback(pj.CallCallback):
"""Derived callback class for Charlie's call."""
def __init__(self, call=None):
pj.CallCallback.__init__(self, call)
def on_state(self):
log_call_info(self.call.info())
if self.call.info().state == pj.CallState.DISCONNECTED:
LOGGER.info("Call disconnected: '%s'" % self.call)
class AMICallback(object):
"""Class to set up callbacks and place calls."""
def __init__(self, test_object, accounts):
self.test_object = test_object
self.ami = self.test_object.ami[0]
self.ami.registerEvent('Hangup', self.hangup_event_handler)
self.ami.registerEvent('BridgeEnter', self.bridge_enter_handler)
self.ami.registerEvent('BridgeLeave', self.bridge_leave_handler)
self.alice = accounts.get('alice')
bob = accounts.get('bob')
charlie = accounts.get('charlie')
self.bob_cb = BobCallback()
self.charlie_cb = CharlieCallback(self)
bob.account.set_callback(self.bob_cb)
charlie.account.set_callback(self.charlie_cb)
self.channels_hungup = 0
self.alice_in_bridge = False
self.bob_in_bridge = False
def bridge_enter_handler(self, ami, event):
"""AMI bridge enter event callback."""
channel = event.get('channel')
if 'bob' in channel:
self.bob_in_bridge = True
elif 'alice' in channel:
self.alice_in_bridge = True
if self.bob_in_bridge and self.alice_in_bridge:
LOGGER.info('Both Alice and Bob are in bridge; starting transfer')
self.bob_cb.bob_phone_call.transfer_call()
def bridge_leave_handler(self, ami, event):
"""AMI bridge leave event callback."""
channel = event.get('channel')
if 'bob' in channel:
self.bob_in_bridge = False
elif 'alice' in channel:
self.alice_in_bridge = False
def hangup_event_handler(self, ami, event):
"""AMI hang up event callback."""
global ITERATION
LOGGER.debug("Hangup detected for channel '%s'" % event['channel'])
self.channels_hungup += 1
if self.channels_hungup == 3 and ITERATION == 0:
|
LOGGER.info("Starting second iteration.")
self.channels_hungup = 0
ITERATION += 1
lock = self.alice.pj_lib.auto_lock()
self.make_call(self.alice.account, URI[1])
del lock
elif self.channels_hungup == 3 and ITERATION == 1:
self.test_object.stop_reactor()
def make_call(self, acc, uri):
"""Place a call.
Keyword Argume
|
nts:
acc The pjsua to make the call from
uri The URI to dial
"""
try:
LOGGER.info("Making call to '%s'" % uri)
acc.make_call(uri, cb=AlicePhoneCallCallback())
except pj.Error, err:
LOGGER.error("Exception: %s" % str(err))
def log_call_info(call_info):
"""Log call info."""
LOGGER.debug("Call '%s' <-> '%s'" % (call_info.uri, call_info.remote_uri))
LOGGER.debug("Call state: '%s'; last code: '%s'; last reason: '%s'" %
(call_info.state_text,
call_info.last_code,
call_info.last_reason))
def transfer(test_object, accounts):
"""The test's callback method.
Keyword Arguments:
test_object The test object
accounts Configured accounts
"""
LOGGER.info("Starting first iteration.")
alice = accounts.get('alice')
obj = AMICallback(test_object, accounts)
lock = alice.pj_lib.auto_lock()
obj.make_call(accounts['alice'].account, URI[0])
del lock
# vim:sw=4:ts=4:expandtab:textwidth=79
|
njsmith/partiwm
|
parti/parti_main.py
|
Python
|
gpl-2.0
| 2,693
| 0.002228
|
# This file is part of Parti.
# Copyright (C) 2008, 2009 Nathaniel Smith <njs@pobox.com>
# Parti is released under the terms of the GNU GPL v2, or, at your option, any
# later version. See the file COPYING for details.
import gtk
import wimpiggy.lowlevel
from wimpiggy.wm import Wm
from wimpiggy.keys import HotkeyManager
from wimpiggy.util import gtk_main_quit_really
from parti.world_organizer import WorldOrganizer
from parti.tray import TraySet
from parti.addons.ipython_embed import spawn_repl_window
from parti.bus import PartiDBusService
class Parti(object):
def __init__(self, options):
self._wm = Wm("Parti", options.replace)
self._wm.connect("new-window", self._new_window_signaled)
self._wm.connect("quit", self._wm_quit)
self._trays = TraySet()
self._trays.connect("changed", self._desktop_list_changed)
# Create our display stage
self._world_organizer = WorldOrganizer(self._trays)
self._wm.get_property("toplevel").add(self._world_organizer)
self._world_organizer.show_all()
ltray = options.tray.lower()
# __import__ returns topmost module and getattr will not get sub-modules not imported
# thus (using these two functions) the module path must be specified twice
dynmodule = getattr(getattr(__import__('parti.trays.' + ltray), 'trays'), ltray)
dynclass = getattr(dynmodule, options.tray + "Tray")
self._trays.new(u"default", dynclass)
self._root_hotkeys = HotkeyManager(gtk.gdk.get_default_root_window())
self._root_hotkeys.add_hotkeys({"<shift><alt>r": "repl"})
self._root_hotkeys.connect("hotkey::repl",
lambda *args: self.spawn_repl_window())
for window in self._wm.get_property("windows"):
self._add_new_window(window)
# Start providing D-Bus api
self._dbus = PartiDBusService(self)
def main(self):
gtk.main()
def _wm_quit(self, *args):
gtk_mai
|
n_quit_really()
def _new_window_signaled(self, wm, window):
self._add_new_window(window)
def _add_new_window(self, window):
# FIXME: be less stupid
self._tr
|
ays.trays[0].add(window)
def _desktop_list_changed(self, *args):
self._wm.emit("desktop-list-changed", self._trays.tags())
def spawn_repl_window(self):
spawn_repl_window(self._wm,
{"parti": self,
"wm": self._wm,
"windows": self._wm.get_property("windows"),
"trays": self._trays,
"lowlevel": wimpiggy.lowlevel})
|
conjure-up/conjure-up
|
conjureup/controllers/base/addons/gui.py
|
Python
|
mit
| 1,165
| 0
|
from conjureup import controllers
from conjureup.app_config import app
from conjureup.download import EndpointType
from conjureup.telemetry import track_event
from conjureup.ui.views.addons import AddonsView
class AddonsController:
def render(self, going_back=False):
if not app.addons:
if going_back:
return self.prev_screen()
else:
return self.next_screen()
prev_screen = self.prev_screen
|
if app.endpoint_type == EndpointType.LOCAL_DIR:
prev_screen = None
self.view = AddonsView(self.finish, prev_screen)
self.view.show()
def finish(self):
app.selected_addons = self.view.selected
if app.selected_addons:
for addon in app.selected_addons:
track_event("Addon Selected", addon, "")
|
# reload the bundle data w/ addons merged
controllers.setup_metadata_controller()
self.next_screen()
def next_screen(self):
controllers.use('clouds').render()
def prev_screen(self):
controllers.use('spellpicker').render()
_controller_class = AddonsController
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-monitor/azure/mgmt/monitor/models/incident_py3.py
|
Python
|
mit
| 2,119
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Incident(Model):
"""An alert incident indicates the activation status of an alert rule.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Incident name.
:vartype name: str
:ivar rule_name: Rule name that is associated with the incident.
:vartype rule_name: str
:ivar is_active: A boolean to indicate whether the incident is active or
resolved.
:vartype is_active: bool
:ivar activated_time: The time at which the incident was activated in
ISO8601 format
|
.
:vartype activated_time: date
|
time
:ivar resolved_time: The time at which the incident was resolved in
ISO8601 format. If null, it means the incident is still active.
:vartype resolved_time: datetime
"""
_validation = {
'name': {'readonly': True},
'rule_name': {'readonly': True},
'is_active': {'readonly': True},
'activated_time': {'readonly': True},
'resolved_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
'is_active': {'key': 'isActive', 'type': 'bool'},
'activated_time': {'key': 'activatedTime', 'type': 'iso-8601'},
'resolved_time': {'key': 'resolvedTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs) -> None:
super(Incident, self).__init__(**kwargs)
self.name = None
self.rule_name = None
self.is_active = None
self.activated_time = None
self.resolved_time = None
|
DanielJDufour/language-detector
|
language_detector/prep/parse_un.py
|
Python
|
apache-2.0
| 986
| 0.004057
|
from xml.etree import ElementTree
from os.path import dirname, realpath
directory_of_sources = dirname(realpath(__file__)) + "/sources/"
d = {}
d['AR'] = "Arabic"
d['EN'] = "English"
d['ES'] = "Spanish"
d['FR'] = "French"
d['RU'] = "Russian"
d['ZH'] = "Mandarin"
filepath = '/tmp/uncorpora_plain_20090831.tmx'
count = 0
for event, elem in ElementTree.iterparse(filepath, events=('start', 'end', 'start-ns', 'end-ns')):
if event == "start":
print event, elem
if elem.tag == "tu":
uid = elem.attrib['tuid']
if elem.tag == "tuv":
language = elem.attrib['{http://w
|
ww.w3.org/XML/1998/namespace}lang']
if elem.tag == "seg":
text = elem.text
print language, "text is", text
if text and len(text) > 200:
with open(directory_of_sources + d[language] + "/" + uid, "wb") as f:
f.write(text.encode("utf-8"))
count += 1
if count == 50000:
brea
|
k
|
alabeduarte/pixelated-user-agent
|
service/pixelated/config/services.py
|
Python
|
agpl-3.0
| 2,509
| 0.001196
|
from pixelated.adapter.services.mail_service import MailService
from pixelated.adapter.model.mail import InputMail
from pixelated.adapter.services.mail_sender import MailSender
from pixelated.adapter.services.mailboxes import Mailboxes
from pixelated.adapter.soledad.soledad_querier import SoledadQuerier
from pixelated.adapter.search import SearchEngine
from pixelated.adapter.services.draft_service import DraftService
from pixelated.adapter.listeners.mailbox_indexer_listener import MailboxIndexerListener
class Services(object):
def __init__(self, leap_home, leap_session):
soledad_querier = SoledadQuerier(soledad=leap_session.soledad_session.soledad)
self.search_engine = self.setup_search_engine(
leap_home,
soledad_querier)
pixelated_mailboxes = Mailboxes(
leap_session.account,
soledad_querier,
self.search_engine)
self.mail_service = self.setup_mail_service(
leap_session,
soledad_querier,
sel
|
f.search_engine,
pixelated_mailboxes)
self.keymanager = self.setup_keymanager(leap_session)
self.draft_service = self.setup_draft_service(pixelated_mailboxes)
self.post_setup(soledad_querier, leap_session)
def post_setup(self, soledad_querier, leap_session):
self.search_engine.index_mails(
mails=self.mail_service.all_mails(),
cal
|
lback=soledad_querier.mark_all_as_not_recent)
soledad_querier.remove_duplicates()
InputMail.FROM_EMAIL_ADDRESS = leap_session.account_email()
def setup_keymanager(self, leap_session):
return leap_session.nicknym.keymanager
def setup_search_engine(self, leap_home, soledad_querier):
key = soledad_querier.get_index_masterkey()
search_engine = SearchEngine(key, agent_home=leap_home)
MailboxIndexerListener.SEARCH_ENGINE = search_engine
return search_engine
def setup_mail_service(self, leap_session, soledad_querier, search_engine, pixelated_mailboxes):
pixelated_mailboxes.add_welcome_mail_for_fresh_user()
pixelated_mail_sender = MailSender(
leap_session.account_email(),
leap_session.smtp)
return MailService(
pixelated_mailboxes,
pixelated_mail_sender,
soledad_querier,
search_engine)
def setup_draft_service(self, pixelated_mailboxes):
return DraftService(pixelated_mailboxes)
|
iulian787/spack
|
var/spack/repos/builtin/packages/libidl/package.py
|
Python
|
lgpl-2.1
| 614
| 0.004886
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libidl(AutotoolsPackage):
"""libraries for Interface Defini
|
tion Language files"""
homepage = "https://developer.gnome.org/"
url = "https://ftp.gnome.org/pub/gnome/sources/libIDL/0.8/libIDL-0.8.14.tar.bz2"
version('0.8.14', sha256='c5d24d8c096546353fbc7cedf208392d5a02afe9d56ebcc1cccb258d7c4d2220')
dep
|
ends_on('pkgconfig', type='build')
depends_on('glib')
|
allthroughthenight/aces
|
python/functions/EQBWLE.py
|
Python
|
gpl-3.0
| 1,556
| 0.003213
|
import math
# Transforms trapezoidal breakwater into a hydraulically equivalent
# rectangular breakwater
# INPUT
# rechd: head difference across equivalent rectangular breakwater
# traphd: head difference across trapezoidal breakwater
# d: water depth
# nummat: number of materials in the breakwater
# numlay: number of layers in the breakwater
# diam: mean diameter of material in the breakwater
# por: porosity of the various materials
# thk: thickness of each layer
# len: length of each material in the breakwater
# pref: porosity of reference material (0.435)
# dref: one half mean diameter of reference material
# OUTPUT
# lequiv: equivalent length of recta
|
ngular breakwater
# OTHER:
# betar and beta: turbulent resistance coefficients for the equivalent
# and trapezoidal breakwater
def EQBWLE(rechd, traphd, d, nummat, numlay, diam, por, thk, hlen, pref, dref):
# find betar and beta
beta0 = 2.7
betar = beta0*((1.0 - pref)/(pref**3*dref))
beta = []
for i in range(nummat):
beta.append(beta0*((
|
1.0 - por[i])/(por[i]**3*diam[i])))
# find equivalent rectangular breakwater length
ind = [0.0 for i in range(nummat)]
ind2 = [0.0 for j in range(numlay)]
for j in range(numlay): #layer is columns
for k in range(nummat): # material number is rows
ind[k] = (beta[k]/betar)*hlen[k][j]
sum1 = sum(ind)
ind2[j] = (thk[j]/d)/math.sqrt(sum1)
sum2 = sum(ind2)
lequiv = 1.0/sum2**2*(rechd/traphd)
return lequiv
|
ghackebeil/PyORAM
|
src/pyoram/tests/test_misc.py
|
Python
|
mit
| 6,387
| 0.009238
|
import os
import unittest
import tempfile
import pyoram.util.mis
|
c
class Test(unittest.TestCase):
def test_log2floor(self):
self.assertEqual(pyoram.util.misc.log2floor(1), 0)
self.assertEqual(pyoram.util.misc.log2floor(2), 1)
self.assertEqual(pyoram.util.misc.log2floor(3), 1)
self.assertEqual(pyoram.util.misc.log2floor(4), 2)
self.assertEqual(pyoram.util.misc.log2floor(5), 2)
self.assertEqual(pyoram.util.misc.log2floor(6), 2)
self.assertEqual(pyoram.util.misc.log2floor(7), 2)
self.
|
assertEqual(pyoram.util.misc.log2floor(8), 3)
self.assertEqual(pyoram.util.misc.log2floor(9), 3)
def test_log2ceil(self):
self.assertEqual(pyoram.util.misc.log2ceil(1), 0)
self.assertEqual(pyoram.util.misc.log2ceil(2), 1)
self.assertEqual(pyoram.util.misc.log2ceil(3), 2)
self.assertEqual(pyoram.util.misc.log2ceil(4), 2)
self.assertEqual(pyoram.util.misc.log2ceil(5), 3)
self.assertEqual(pyoram.util.misc.log2ceil(6), 3)
self.assertEqual(pyoram.util.misc.log2ceil(7), 3)
self.assertEqual(pyoram.util.misc.log2ceil(8), 3)
self.assertEqual(pyoram.util.misc.log2ceil(9), 4)
def test_intdivceil(self):
with self.assertRaises(ZeroDivisionError):
pyoram.util.misc.intdivceil(0, 0)
with self.assertRaises(ZeroDivisionError):
pyoram.util.misc.intdivceil(1, 0)
self.assertEqual(pyoram.util.misc.intdivceil(1, 1), 1)
self.assertEqual(pyoram.util.misc.intdivceil(2, 3), 1)
self.assertEqual(2 // 3, 0)
self.assertEqual(pyoram.util.misc.intdivceil(
123123123123123123123123123123123123123123123123,
123123123123123123123123123123123123123123123123), 1)
self.assertEqual(pyoram.util.misc.intdivceil(
2 * 123123123123123123123123123123123123123123123123,
123123123123123123123123123123123123123123123123), 2)
self.assertEqual(pyoram.util.misc.intdivceil(
2 * 123123123123123123123123123123123123123123123123 + 1,
123123123123123123123123123123123123123123123123), 3)
self.assertEqual(pyoram.util.misc.intdivceil(
2 * 123123123123123123123123123123123123123123123123 - 1,
123123123123123123123123123123123123123123123123), 2)
self.assertEqual(
(2 * 123123123123123123123123123123123123123123123123 - 1) // \
123123123123123123123123123123123123123123123123,
1)
def test_MemorySize(self):
self.assertTrue("b" in str(pyoram.util.misc.MemorySize(0.1)))
self.assertTrue("B" in str(pyoram.util.misc.MemorySize(1)))
self.assertTrue("B" in str(pyoram.util.misc.MemorySize(999)))
self.assertTrue("KB" in str(pyoram.util.misc.MemorySize(1000)))
self.assertTrue("KB" in str(pyoram.util.misc.MemorySize(999999)))
self.assertTrue("MB" in str(pyoram.util.misc.MemorySize(1000000)))
self.assertTrue("MB" in str(pyoram.util.misc.MemorySize(999999999)))
self.assertTrue("GB" in str(pyoram.util.misc.MemorySize(1000000000)))
self.assertTrue("GB" in str(pyoram.util.misc.MemorySize(9999999999)))
self.assertTrue("TB" in str(pyoram.util.misc.MemorySize(1000000000000)))
self.assertTrue("b" in str(pyoram.util.misc.MemorySize(1, unit="b")))
self.assertTrue("b" in str(pyoram.util.misc.MemorySize(2, unit="b")))
self.assertTrue("b" in str(pyoram.util.misc.MemorySize(7.9, unit="b")))
self.assertTrue("B" in str(pyoram.util.misc.MemorySize(8, unit="b")))
self.assertTrue("B" in str(pyoram.util.misc.MemorySize(1, unit="B")))
self.assertTrue("B" in str(pyoram.util.misc.MemorySize(999, unit="B")))
self.assertTrue("KB" in str(pyoram.util.misc.MemorySize(1000, unit="B")))
self.assertTrue("KB" in str(pyoram.util.misc.MemorySize(1, unit="KB")))
self.assertTrue("KB" in str(pyoram.util.misc.MemorySize(999, unit="KB")))
self.assertTrue("MB" in str(pyoram.util.misc.MemorySize(1000, unit="KB")))
self.assertTrue("MB" in str(pyoram.util.misc.MemorySize(1, unit="MB")))
self.assertTrue("MB" in str(pyoram.util.misc.MemorySize(999, unit="MB")))
self.assertTrue("GB" in str(pyoram.util.misc.MemorySize(1000, unit="MB")))
self.assertTrue("GB" in str(pyoram.util.misc.MemorySize(1, unit="GB")))
self.assertTrue("GB" in str(pyoram.util.misc.MemorySize(999, unit="GB")))
self.assertTrue("TB" in str(pyoram.util.misc.MemorySize(1000, unit="GB")))
self.assertTrue("TB" in str(pyoram.util.misc.MemorySize(1, unit="TB")))
self.assertEqual(pyoram.util.misc.MemorySize(1024).KiB, 1)
self.assertEqual(pyoram.util.misc.MemorySize(1024**2).MiB, 1)
self.assertEqual(pyoram.util.misc.MemorySize(1024**3).GiB, 1)
self.assertEqual(pyoram.util.misc.MemorySize(1024**4).TiB, 1)
def test_saveload_private_key(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
filename = f.name
try:
key = os.urandom(32)
pyoram.util.misc.save_private_key(filename, key)
loaded_key = pyoram.util.misc.load_private_key(filename)
self.assertEqual(key, loaded_key)
finally:
os.remove(filename)
def test_chunkiter(self):
self.assertEqual(list(pyoram.util.misc.chunkiter([1,2,3,4,5], 1)),
[[1],[2],[3],[4],[5]])
self.assertEqual(list(pyoram.util.misc.chunkiter([1,2,3,4,5], 2)),
[[1,2],[3,4],[5]])
self.assertEqual(list(pyoram.util.misc.chunkiter([1,2,3,4,5], 3)),
[[1,2,3],[4,5]])
self.assertEqual(list(pyoram.util.misc.chunkiter([1,2,3,4,5], 4)),
[[1,2,3,4],[5]])
self.assertEqual(list(pyoram.util.misc.chunkiter([1,2,3,4,5], 5)),
[[1,2,3,4,5]])
self.assertEqual(list(pyoram.util.misc.chunkiter([1,2,3,4,5], 6)),
[[1,2,3,4,5]])
self.assertEqual(list(pyoram.util.misc.chunkiter([], 1)),
[])
self.assertEqual(list(pyoram.util.misc.chunkiter([], 2)),
[])
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
rootulp/hackerrank
|
python/euler004.py
|
Python
|
mit
| 570
| 0
|
#!/bin/python3
import bisect
def is_palindrome(n):
return str(n) == str(n)[::-1]
def generate_palindromes():
return [i * j
for i in range(100, 1000)
for
|
j in range(100, 1000)
if is_palindrome(i * j)]
def find_lt(a, x):
'Find rightmost value less than x'
i = bisec
|
t.bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
palindromes = sorted(generate_palindromes())
test_cases = int(input().strip())
for _ in range(test_cases):
n = int(input().strip())
print(find_lt(palindromes, n))
|
moesy/AWS-Lambda-ML-Microservice-Skeleton
|
app/main.py
|
Python
|
mit
| 69
| 0.014493
|
import pandas as pd
def handler(event, context
|
):
|
return ':)'
|
jakubfindura/vnos-nfc
|
nfc-server/RPLCD/enum.py
|
Python
|
mit
| 11,214
| 0
|
# Copyright (C) 2004-2013 by Barry A. Warsaw
#
# This file is part of flufl.enum
#
# flufl.enum is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# flufl.enum is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with flufl.enum. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Barry Warsaw <barry@python.org>
"""Python enumerations."""
from __future__ import absolute_import, print_function, unicode_literals
import re
import sys
import warnings
from operator import itemgetter
COMMASPACE = ', '
SPACE = ' '
IDENTIFIER_RE = r'^[a-zA-Z_][a-zA-Z0-9_]*$'
class EnumMetaclass(type):
"""Meta class for Enums."""
def __init__(cls, name, bases, attributes):
"""Create an Enum class.
:param cls: The class being defined.
:param name: The name of the class.
:param bases: The class's base classes.
:param attributes: The class attributes.
"""
super(EnumMetaclass, cls).__init__(name, bases, attributes)
# Store EnumValues here for easy access.
cls._enums = {}
# Figure out if this class has a custom factory for building enum
# values. The default is EnumValue, but the class (or one of its
# bases) can declare a custom one with a special attribute.
factory = attributes.get('__value_factory__')
# Figure out the set of enum values on the base classes, to ensure
# that we don't get any duplicate values. At the same time, check the
# base classes for the special attribute.
for basecls in cls.__mro__:
if hasattr(basecls, '_enums'):
cls._enums.update(basecls._enums)
if hasattr(basecls, '__value_factory__'):
basecls_factory = basecls.__value_factory__
if factory is not None and basecls_factory != factory:
raise TypeError(
'Conflicting enum factory in base class: {}'.format(
basecls_factory))
factory = basecls_factory
# Set the factory default if necessary.
if factory is None:
factory = EnumValue
# For each class attribute, create an enum value and store that back
# on the class instead of the original value. Skip Python reserved
# names. Also add a mapping from the original value to the enum value
# instance so we can return the same object on conversion.
for attr in attributes:
if not (attr.startswith('__') and attr.endswith('__')):
value = attributes[attr]
enumval = factory(cls, value, attr)
if value in c
|
ls._enums:
other = cls._enums[value]
# Without this, sort order is undefined and causes
|
# unpredictable results for the test suite.
first = (attr if attr < other else other)
second = (other if attr < other else attr)
raise ValueError("Conflicting enum value '{}' "
"for names: '{}' and '{}'".format(
value, first, second))
# Store as an attribute on the class, and save the attr name.
setattr(cls, attr, enumval)
cls._enums[value] = attr
def __dir__(cls):
# For Python 3.2, we must explicitly convert the dict view to a list.
# Order is not guaranteed, so don't sort it.
return list(cls._enums.values())
def __repr__(cls):
# We want predictable reprs. Because base Enum items can have any
# value, the only reliable way to sort the keys for the repr is based
# on the attribute name, which must be Python identifiers.
return '<{0} {{{1}}}>'.format(cls.__name__, COMMASPACE.join(
'{0}: {1}'.format(value, key)
for key, value in sorted(cls._enums.items(), key=itemgetter(1))))
def __iter__(cls):
for value in sorted(cls._enums.values()):
yield getattr(cls, value)
def __getitem__(cls, item):
attr = cls._enums.get(item)
if attr is None:
# If this is an EnumValue, try it's .value attribute.
if hasattr(item, 'value'):
attr = cls._enums.get(item.value)
if attr is None:
# It wasn't value-ish -- try the attribute name. This was
# deprecated in LP: #1167091.
try:
warnings.warn('Enum[item_name] is deprecated; '
'use getattr(Enum, item_name)',
DeprecationWarning, 2)
return getattr(cls, item)
except (AttributeError, TypeError):
raise ValueError(item)
return getattr(cls, attr)
def __call__(cls, *args):
# One-argument calling is a deprecated synonym for getitem.
if len(args) == 1:
warnings.warn('MyEnum(arg) is deprecated; use MyEnum[arg]',
DeprecationWarning, 2)
return cls.__getitem__(args[0])
name, source = args
return _make(cls, name, source)
class EnumValue:
"""Class to represent an enumeration value.
EnumValue('Color', 'red', 12) prints as 'Color.red' and can be converted
to the integer 12.
"""
def __init__(self, enum, value, name):
self._enum = enum
self._value = value
self._name = name
def __repr__(self):
return '<EnumValue: {0}.{1} [value={2}]>'.format(
self._enum.__name__, self._name, self._value)
def __str__(self):
return '{0}.{1}'.format(self._enum.__name__, self._name)
def __int__(self):
warnings.warn('int() is deprecated; use IntEnums',
DeprecationWarning, 2)
return self._value
def __reduce__(self):
return getattr, (self._enum, self._name)
@property
def enum(self):
"""Return the class associated with the enum value."""
return self._enum
@property
def name(self):
"""Return the name of the enum value."""
return self._name
@property
def value(self):
"""Return the underlying value."""
return self._value
# Support only comparison by identity and equality. Ordered comparisions
# are not supported.
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __lt__(self, other):
# In Python 3, returning NotImplemented from an ordered comparison
# operator will cause a TypeError to be raised. This doesn't work in
# Python 2 though, and you'd end up with working, but incorrect,
# ordered comparisons. In Python 2 we raise the TypeError explicitly.
if sys.version_info[0] < 3:
raise TypeError(
'unorderable types: {}() < {}()'.format(
self.__class__.__name__, other.__class__.__name__))
return NotImplemented
def __gt__(self, other):
if sys.version_info[0] < 3:
raise TypeError(
'unorderable types: {}() > {}()'.format(
self.__class__.__name__, other.__class__.__name__))
return NotImplemented
def __le__(self, other):
if sys.version_info[0] < 3:
raise TypeError(
'unorderable types: {}() <= {}()'.format(
self.__class__.__name__, other.__class__.__name__))
return NotImplemented
def __ge__(self, other):
if sys.version_info[0] < 3:
raise TypeError(
'unorderable types: {}() >= {
|
yrobla/nova
|
nova/tests/test_vmwareapi_vm_util.py
|
Python
|
apache-2.0
| 5,302
| 0.000943
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use
|
this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, so
|
ftware
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500 * 1024)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
def test_get_host_ref_from_id(self):
fake_host_sys = fake.HostSystem(
fake.ManagedObjectReference("HostSystem", "host-123"))
fake_host_id = fake_host_sys.obj.value
fake_host_name = "ha-host"
ref = vm_util.get_host_ref_from_id(
fake_session([fake_host_sys]), fake_host_id, ['name'])
self.assertIsInstance(ref, fake.HostSystem)
self.assertEqual(fake_host_id, ref.obj.value)
host_name = vm_util.get_host_name_from_host_ref(ref)
self.assertEquals(fake_host_name, host_name)
def test_get_host_name_for_vm(self):
fake_vm = fake.ManagedObject(
"VirtualMachine", fake.ManagedObjectReference(
"vm-123", "VirtualMachine"))
fake_vm.propSet.append(
fake.Property('name', 'vm-123'))
vm_ref = vm_util.get_vm_ref_from_name(
fake_session([fake_vm]), 'vm-123')
self.assertIsNotNone(vm_ref)
fake_results = [
fake.ObjectContent(
None, [
fake.Property('runtime.host',
fake.ManagedObjectReference(
'host-123', 'HostSystem'))
])]
host_id = vm_util.get_host_id_from_vm_ref(
fake_session(fake_results), vm_ref)
self.assertEqual('host-123', host_id)
def test_property_from_property_set(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
results_good = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val=MoRef(value='bar1')),
DynamicProperty(
name='runtime.host', val=MoRef(value='host-123')),
DynamicProperty(name='foo', val=MoRef(value='bar2')),
]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
results_bad = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val='bar1'),
DynamicProperty(name='foo', val='bar2'), ]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
prop = vm_util.property_from_property_set(
'runtime.host', results_good)
self.assertIsNotNone(prop)
value = prop.val.value
self.assertEqual('host-123', value)
prop2 = vm_util.property_from_property_set(
'runtime.host', results_bad)
self.assertIsNone(prop2)
prop3 = vm_util.property_from_property_set('foo', results_good)
self.assertIsNotNone(prop3)
val3 = prop3.val.value
self.assertEqual('bar1', val3)
prop4 = vm_util.property_from_property_set('foo', results_bad)
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
|
bitmovin/bitmovin-python
|
bitmovin/resources/enums/s3_sig_version.py
|
Python
|
unlicense
| 91
| 0
|
import enum
class S3SignatureVersion
|
(enum.Enum):
S
|
3_V2 = 'S3_V2'
S3_V4 = 'S3_V4'
|
pytlakp/intranetref
|
src/intranet3/models/project.py
|
Python
|
mit
| 10,842
| 0.002121
|
from pprint import pformat
from sqlalchemy import Column, ForeignKey, orm
from sqlalchemy.types import String, Integer, Boolean, Text
from sqlalchemy.schema import UniqueConstraint
from intranet3 import memcache
from intranet3.models import Base, User
from intranet3.log import WARN_LOG, INFO_LOG, DEBUG_LOG
LOG = INFO_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
WARN = WARN_LOG(__name__)
SELECTOR_CACHE_KEY = 'SELECTORS_FOR_TRACKER_%s'
STATUS = [
('1', 'Initialization'),
('2', 'Analysis'),
('3', 'Conception'),
('4', 'Realization'),
('5', 'Support'),
('6', 'Closed'),
]
def bugzilla_bug_list(tracker_url, bug_ids, project_selector=None):
query = '&'.join(['bug_id=%s' % bug_id for bug_id in bug_ids])
return tracker_url + '/buglist.cgi?%s' % query
def unfuddle_bug_list(tracker_url, bug_ids, project_selector=None):
suffix = '/a#/projects/%s/ticket_reports/dynamic?conditions_string=%s'
query = '|'.join(['number-eq-%s' % bug_id for bug_id in bug_ids])
return tracker_url + (suffix % (project_selector, query))
class Project(Base):
__tablename__ = 'project'
BUG_LIST_URL_CONTRUCTORS = {
'bugzilla': bugzilla_bug_list,
'rockzilla': bugzilla_bug_list,
'igozilla': bugzilla_bug_list,
'trac': lambda *args: '#',
'cookie_trac': lambda *args: '#',
'bitbucket': lambda *args: '#',
'pivotaltracker': lambda *args: '#',
'unfuddle': unfuddle_bug_list,
}
id = Column(Integer, primary_key=True, index=True)
name = Column(String, nullable=False)
coordinator_id = Column(Integer, ForeignKey('user.id'), nullable=True, index=True)
client_id = Column(Integer, ForeignKey('client.id'), nullable=False, index=True)
tracker_id = Column(Integer, ForeignKey('tracker.id'), nullable=False, index=True)
turn_off_selectors = Column(Boolean, nullable=False, default=False)
project_selector = Column(String, nullable=True)
component_selector = Column(String, nullable=True)
ticket_id_selector = Column(String, nullable=True)
version_selector = Column(String, nullable=True)
active = Column(Boolean, nullable=False)
time_entries = orm.relationship('TimeEntry', backref='project', lazy='dynamic')
sprints = orm.relationship('Sprint', backref='project', lazy='dynamic')
google_card = Column(String, nullable=True)
google_wiki = Column(String, nullable=True)
status = Column(Integer, nullable=True)
mailing_url = Column(String, nullable=True)
working_agreement = Column(Text, nullable=False, default='')
definition_of_done = Column(Text, nullable=False, default='')
definition_of_ready = Column(Text, nullable=False, default='')
continuous_integration_url = Column(String, nullable=False, default='')
backlog_url = Column(String, nullable=False, default='')
__table_args__ = (UniqueConstraint('name', 'client_id', name='project_name_client_id_unique'), {})
def format_selector(self):
|
if self.turn_off_selectors:
return u'Turned off'
if self.ticket_id_selector:
return u'Tickets: %s' % (
|
self.ticket_id_selector, )
else:
return u'%s / %s / %s' % (
self.project_selector or u'*',
self.component_selector or u'*',
self.version_selector or u'*',
)
def get_selector_tuple(self):
"""
Returns selector tuple
([ticket_ids], project_selector, component_selector)
"""
ticket_ids = [
int(v.strip()) for v in self.ticket_id_selector.split(',')
] if self.ticket_id_selector else None
components = [
v.strip() for v in self.component_selector.split(',')
] if self.component_selector else []
versions = [
v.strip() for v in self.version_selector.split(',')
] if self.version_selector else []
return (
ticket_ids,
self.project_selector,
components,
versions,
)
def get_new_bug_url(self):
"""
Returns url for create new bug in project
"""
component_selector = self.component_selector if self.component_selector is not None and not self.component_selector.count(',') else None
return self.tracker.get_new_bug_url(self.project_selector, component_selector)
def get_bug_list_url(self, bug_ids):
constructor = self.BUG_LIST_URL_CONTRUCTORS[self.tracker.type]
return constructor(self.tracker.url, bug_ids, self.project_selector)
@property
def status_name(self):
if self.status and len(STATUS) >= self.status:
return STATUS[self.status-1][1]
return None
@property
def coordinator(self):
if self.coordinator_id is not None:
return User.query.filter(User.id==self.coordinator_id).one()
else:
return self.client.coordinator
class SelectorMapping(object):
""" Simple storage for cached project selectors """
def __init__(self, tracker):
"""
Creates a selector mapping for given tracker
None -> project_id
project_name -> project_id
(project_name, component_name) -> project_id
"""
self.tracker = tracker
self.by_ticket_id = {}
self.default = None
self.by_project = {} # key: project_name
self.by_component = {} # key: project_name, component_name
self.by_version = {} # key: project_name, version
self.by_component_version = {} # key: project_name, component_name, version
cache_key = SELECTOR_CACHE_KEY % tracker.id
mapping = memcache.get(cache_key)
if mapping:
self.clone(mapping)
return
projects = Project.query.filter(Project.tracker_id == tracker.id) \
.filter(Project.turn_off_selectors == False) \
.filter(Project.active == True)
self.projects = dict([(project.id, project.name) for project in projects])
for project in projects:
self._create_for_project(project)
memcache.set(cache_key, self)
DEBUG('Created selector mapping for tracker %s: %s, %s' % (
tracker.id, pformat(self.by_ticket_id), pformat(self.by_component))
)
def clone(self, mapping):
self.default = mapping.default
self.by_project = mapping.by_project
self.by_component = mapping.by_component
self.by_version = mapping.by_version
self.by_component_version = mapping.by_component_version
def _check_ticket_id_existance(self, ticket_id):
if ticket_id in self.by_ticket_id:
WARN(u'Overriding ticket ID for tracker from %s to %s' % (
self.by_ticket_id[ticket_id], ticket_id))
def _check_project_component_existance(self, project_component, project):
"""
Warn if we override a project
"""
if project_component is None:
if None in self.by_component:
WARN(u'Overriding default project for tracker [%s] from [%s] to [%s]' % (
self.tracker.name,
self.projects[self.by_component[None]],
project.name
))
elif isinstance(project_component, (str, unicode)):
project_name = project_component
if project_name in self.by_component:
WARN(u'Overriding project [%s] for tracker [%s] from [%s] to [%s]' % (
project_name,
self.tracker.name,
self.projects[self.by_component[project_name]],
project.name
))
else:
project_name, component_name = project_component
if (project_name, component_name) in self.by_component:
WARN(u'Overriding project [%s] and component [%s] for tracker [%s] from [%s] to [%s]' % (
project_name,
component_name,
self.tracker.name,
|
daniLOLZ/variaRoba
|
Python/34.py
|
Python
|
mit
| 633
| 0.023697
|
import json
with open("birthdays.jso
|
n", "r") as damnJson:
birthDays = json.load(damnJson)
print("We know the birth days of: ")
for i in birthDays:
print(i)
print("\nWould you like to add or retrieve a birth day?")
lol = input().strip().lower()
if lol == "add":
person = input("Who's the lucky one? ")
date = input("What's his birth day? ")
birthDays[person] = date
with open("birthdays.js
|
on", "w") as damnJson:
json.dump(birthDays, damnJson)
print("\nk thx\n")
elif lol == "retrieve":
print("\nWho would you like to know the birth date of? ")
person = input()
print(birthDays[person])
else:
print("fk u m8")
|
callmetaste/PHCpack
|
src/Python/PHCpy3/phcpy/__init__.py
|
Python
|
gpl-3.0
| 3,727
| 0.000268
|
"""
PHCpy --- a package for Polynomial Homotopy Continuation
========================================================
PHCpy is a collection of Python modules to compute solutions
of polynomial systems using PHCpack.
A homotopy defines the deformation of a start system
(system with known solutions) into the target system
(system that has to be solved).
Continuation or path tracking methods apply numerical
predictor-corrector techniques to track the solution paths
defined by the homotopy, starting at the known solutions of the
start system and ending at the solutions of the target system.
Available modules
-----------------
solver
exports the blackbox solver of PHCpack, a mixed volume calculator,
a path tracker, functions to construct start systems, and deflation
to recondition isolated singular solutions.
solutions
solutions of phcpy.solve are lists of PHCpack solution strings
and this module exports operations to convert the solution
strings into Python dictionaries, e.g. for evaluation.
interface
data transfer from string representations of polynomials and solutions
as the interface between Python and the C interface of PHCpack.
trackers
offers functions to track solution paths defined by a homotopy between
a given start system with known solutions and a target system.
maps
module to work with monomial maps, defined as solution of systems
that have exactly two monomials in every equation (binomial systems).
sets
offers tools to work with positive dimensional solution sets.
examples
defines some interesting examples taken from the research literature,
the test() solves all systems, performing a regression test.
families
polynomial system often occur in families and are defined for any
number of equations and variables, e.g.: the cyclic n-roots system.
schubert
exports the hypersurface and quantum Pieri homotopies to compute
isolated solutions to problems in enumerative geometry.
polytopes
functions to work with Newton polytopes, to compute mixed volumes
of Newton polytopes, given by tuples of supp
|
ort sets.
phcwulf
defines a simple client/server interaction to solve random trinomials.
Calling the blackbox solver
---------------------------
Polynomials and solutions
|
are represented as strings.
Below is an illustration of a session with the blackbox solver
on a system of two random trinomials, polynomials with three
monomials with random coefficients.
>>> from phcpy.solver import random_trinomials
>>> f = random_trinomials()
>>> print f[0]
(0.583339727743+0.81222826966115*i)*x^0*y^0\
+(-0.730410130891-0.68300881450520*i)*x^5*y^5\
+(0.547878834338+0.83655769847920*i)*x^5*y^0;
>>> print f[1]
(0.830635910813+0.55681593338247*i)*x^0*y^4\
+(0.456430547798-0.88975904324518*i)*x^1*y^4\
+(0.034113254002-0.99941797357332*i)*x^2*y^1;
>>> from phcpy.solver import solve
>>> s = solve(f,silent=True)
>>> len(s)
30
>>> print s[2]
t : 1.00000000000000E+00 0.00000000000000E+00
m : 1
the solution for t :
x : -9.99963006604849E-01 8.60147787997449E-03
y : 0.00000000000000E+00 0.00000000000000E+00
== err : 4.325E-17 = rco : 2.020E-01 = res : 1.665E-16 =
>>>
The solve command returned a list of 30 strings in s,
each string represents a solution that makes the polynomials in f vanish.
The module solutions offers function to evaluate the solutions
in the polynomials given as strings.
"""
try:
from phcpy.phcpy2c3 import py2c_PHCpack_version_string
print(py2c_PHCpack_version_string() + ' works!')
except:
print('Is the phcpy2c3.so not suited for this platform?')
# The version number is defined as a data attribute.
__version__ = '0.4.1'
|
bhupennewalkar1337/erpnext
|
erpnext/buying/doctype/request_for_quotation/request_for_quotation_dashboard.py
|
Python
|
gpl-3.0
| 206
| 0.058252
|
from frappe import _
def get_data():
re
|
turn {
'docstatus': 1,
'fieldname': 'request_for_quot
|
ation',
'transactions': [
{
'label': _('Related'),
'items': ['Supplier Quotation']
},
]
}
|
MartinPyka/Parametric-Anatomical-Modeling
|
pam/pam.py
|
Python
|
gpl-2.0
| 34,811
| 0.004998
|
"""THis module contains the core functions needed to compute pam models"""
import logging
import random
import bpy
import mathutils
import numpy
from . import constants
from . import grid
from . import model
from . import exceptions
from . import layer
from . import connection_mapping
from .mesh import *
import multiprocessing
import os
logger = logging.getLogger(__package__)
SEED = 0
# TODO(SK): Rephrase docstring, add parameter/return values
def computeUVScalingFactor(obj):
"""Compute the scaling factor between uv- and 3d-coordinates for a
given object
the return value is the factor that has to be multiplied with the
uv-coordinates in order to have metrical relation
"""
result = []
for i in range(len(obj.data.polygons)):
uvs = [obj.data.uv_layers.active.data[li] for li in obj.data.polygons[i].loop_indices]
rdist = (obj.data.vertices[obj.data.polygons[i].vertices[0]].co - obj.data.vertices[obj.data.polygons[i].vertices[1]].co).length
mdist = (uvs[0].uv - uvs[1].uv).length
result.append(rdist / mdist)
# TODO (MP): compute scaling factor on the basis of all edges
return numpy.mean(result), result
def map3dPointToParticle(obj, particle_system, location):
"""Determine based on a 3d-point location (e.g. given by the cursor
position) the index of the closest particle on an object
:param obj: The object from which to choose
:type obj: bpy.types.Object
:param particle_system: The name of the particle system
:type particle_system: str
:param location: The 3d point
:type location: mathutils.Vector
:return: The index of the closest particle
:rtype: int
"""
index = -1
distance = float("inf")
for (i, p) in enumerate(obj.particle_systems[particle_system].particles):
if (p.location - location).length < distance:
distance = (p.location - location).length
index = i
return index
# TODO(SK): Rephrase docstring, add parameter/return values
def maskParticle(p_layer, p_index, mask_layer, distance=0.2):
"""Return particle-indices of particle_layer that have a smaller
distance than the distance-argument to mask_layer
:param bpy.types.Object p_layer: object that contains the particles
:param int p_index: index of particle-system
:param bpy.types.Object mask_layer: mask object
:param float distance: distance threshold
:return:
:rtype:
"""
result = []
for i, p in enumerate(p_layer.particle_systems[p_index].particles):
l, n, f = mask_layer.closest_point_on_mesh(p.location)
if (p.location - l).length < distance:
result.append(i)
return result
# TODO(SK): Rephrase docstring, add parameter/return values
def distanceToMask(p_layer, p_index, particle_index, mask_layer):
"""Return the distance for a particle to a mask_layer
:param bpy.types.Object p_layer: object with particle-system
:param int p_index: index of particle-system
:param int particle_index: index of particle
:param bpy.types.Object mask_layer: object that serves as mask
:return:
:rtype:
"""
p = p_layer.particle_systems[p_index].particles[particle_index]
l, n, f =
|
mask_layer.closest_point_on_mesh(p.location)
return (p.location - l).length
# TODO(SK): missing docstring
# TODO(SK): Rephrase docstring, add parameter/return values
def co
|
mputeConnectivityProbability(uv1, uv2, func, args):
return func(uv1, uv2, args)
# TODO(SK): Rephrase docstring, add parameter/return values
def computeDistance_PreToSynapse(no_connection, pre_index, synapses=[]):
"""Compute distance for a pre-synaptic neuron and a certain
connection definition
synapses can be optionally be used to compute the distance for only a
subset of synapses
"""
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
point = con.pre_layer.getNeuronPosition(pre_index)
pre_p3d, pre_p2d, pre_d = computeMapping(layers[0:(slayer + 1)] + [layers[slayer]],
connections[0:slayer] + [connections[slayer]],
distances[0:slayer] + [distances[slayer]],
point)
if pre_p3d:
if (distances[slayer] == constants.DIS_normalUV) | (distances[slayer] == constants.DIS_euclidUV):
uv_distances = []
# if synapses is empty, simply calculate it for all synapses
if not synapses:
s2ds = model.CONNECTION_RESULTS[no_connection]['s'][pre_index]
else:
s2ds = [model.CONNECTION_RESULTS[no_connection]['s'][pre_index][s] for s in synapses]
for s2d in s2ds:
#try:
uv_distance, _ = computeDistanceToSynapse(layers[slayer], layers[slayer], pre_p3d[-1], mathutils.Vector(s2d), distances[slayer])
uv_distances.append(uv_distance)
#except exceptions.MapUVError as e:
# logger.info("Message-pre-data: ", e)
#except Exception as e:
# logger.info("A general error occured: ", e)
path_length = compute_path_length(pre_p3d) + numpy.mean(uv_distances)
else:
path_length = compute_path_length(pre_p3d)
else:
path_length = 0.
return path_length, pre_p3d
# TODO(SK): Rephrase docstring, add parameter/return values
def sortNeuronsToUV(layer, neuronset, u_or_v):
"""Sort particles according to their position on the u
or v axis and returns the permutation indices
:param bpy.types.Object layer: layer were the neurons are
:param str neuronset: name or number of the neuronset (particle system)
:param str u_or_v: `u` means sort for u
`v` means sort for v
:return:
:rtype:
"""
if u_or_v == 'u':
index = 0
elif u_or_v == 'v':
index = 1
else:
raise Exception("u_or_v must be either 'u' or 'v' ")
# get all particle positions
p3d = [i.location for i in layer.particle_systems[neuronset].particles]
# convert them to 2d and select just the u or v coordinate
p2d = [map3dPointToUV(layer, layer, p)[index] for p in p3d]
# return permutation of a sorted list (ascending)
return numpy.argsort(p2d)
# TODO(SK): Rephrase docstring, add parameter/return values
# TODO(SK): Structure return values in docstring
def computeMapping(layers, connections, distances, point, debug=False):
"""Based on a list of layers, connections-properties and distance-properties,
this function returns the 3d-point, the 2d-uv-point and the distance from a given
point on the first layer to the corresponding point on the last layer
:param list layers: layers connecting the pre-synaptic layer with the synaptic layer
:param list connections: values determining the type of layer-mapping
:param list distances: values determining the calculation of the distances between layers
:param mathutils.Vector point: vector for which the mapping should be calculated
:param bool debug: if true, the function returns a list of layers that it was able
to pass. Helps to debug the mapping-definitions in order to figure
our where exactly the mapping stops
Return values
p3d list of 3d-vector of the neuron position on all layers until the last
last position before the synapse. Note, that this might be before the
synapse layer!!! This depends on the distance-property.
p2d 2d-vector of the neuron position on the UV map of the last layer
d distance between neuron position on the first layer and last position before
the synapse! This is not the distance to the p3d point! This is either the
distance to the 3d-position of
|
rovere/utilities
|
split.py
|
Python
|
mit
| 2,848
| 0.009129
|
#!/usr/bin/env python
import sys
import re
def generateFilename(prefix, file_count, suffix):
return "%s_%04d.%s" % (prefix, file_count, suffix)
def split(filename, prefix, suffix, regexp, do_html, frequency=1):
file_count = 0
count = 0
f = open(filename,'r')
o = open(generateFilename(prefix, file_count, suffix), 'w')
for line in f:
g = re.match(regexp, line)
if g:
count += 1
if count > 1 and (count%int(frequency) == 0):
o.close()
sys.stdout.write('.')
sys.stdout.flush()
file_count += 1
if file_count%10 == 0:
sys.stdout.write('\n')
sys.stdout.flush()
count = 0
o = open(generateFilename(prefix, file_
|
count, suffix), 'w')
if g and do_html:
t
|
ry:
o.write('<div id=%s> <a href="%s#%s">%s</a> %s </div>\n' % (g.group(1),
generateFilename(prefix, file_count, suffix),
g.group(1).replace('"', ''),
g.group(1),
line.rstrip()))
except:
o.write(line)
else:
o.write(line)
sys.stdout.write('\n')
sys.stdout.flush()
if __name__ == '__main__':
from optparse import OptionParser
usage = "usage: %prog [options] filename"
opt = OptionParser(usage=usage)
opt.add_option("-p", "--prefix", dest="prefix", metavar="SUFFIX", default="xx",
help="Prefix for the output filenames [default: %default].")
opt.add_option("-s", "--suffix", dest="suffix", metavar="SUFFIX", default="txt",
help="Suffix for the output filenames, _not_ including the '.' [default: %default].")
opt.add_option("-r", "--regexp", dest="regexp", metavar="REGEXP", default="\n",
help="Regular expression used to split the file [default: %default].")
opt.add_option("-f", "--frquency", dest="frequency", metavar="FREQ", default=1000, type=int,
help="Frequency with which the input file will be split upon REGEXP matching [default: %default].")
opt.add_option("-H", "--html", dest="do_html", default=False, action="store_true",
help="Add HTML tags around the matched regexp, like <div id='matched regexp'>full lines</div>")
opts, args = opt.parse_args()
if len(args) == 0:
print >> sys.stderr, sys.argv[0], \
": split.py [options] filename"
sys.exit(1)
split(args[0], opts.prefix, opts.suffix, opts.regexp, opts.do_html, opts.frequency)
|
shweta97/pyta
|
python_ta/reporters/color_reporter.py
|
Python
|
gpl-3.0
| 1,167
| 0.003428
|
import sys
from colorama import Fore, Style
from colorama import init
from .plain_reporter import PlainReporter
class ColorReporter(PlainReporter):
def __init__(self, number_of_messages):
super().__init__(number_of_messages)
# Override this method
def print_messages(self, level='all'):
# Check if the OS curre
|
ntly running is Windows
init(wrap=(sys.platform == 'win32'), strip=False)
|
self.sort_messages()
print(Style.BRIGHT + '=== Code errors/forbidden usage (fix these right away!) ===' + Style.RESET_ALL)
for msg in self._error_messages:
code = Fore.RED + Style.BRIGHT + msg.msg_id + Style.RESET_ALL
print(code, '({}) {}\n [Line {}] {}'.format(msg.symbol, msg.obj, msg.line, msg.msg))
if level == 'all':
print('\n')
print(Style.BRIGHT + '=== Style/convention errors (fix these before submission) ===' + Style.RESET_ALL)
for msg in self._style_messages:
code = Style.BRIGHT + msg.msg_id + Style.RESET_ALL
print(code, '({}) {}\n [Line {}] {}'.format(msg.symbol, msg.obj, msg.line, msg.msg))
|
sabiodelhielo/rancher-validation
|
tests/v3_api/common.py
|
Python
|
apache-2.0
| 41,077
| 0
|
import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
CLUSTER_NAME_2 = os.environ.get("RANCHER_CLUSTER_NAME_2", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
|
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
|
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " +
|
SlateScience/MozillaJS
|
js/src/python/mozbuild/mozpack/mozjar.py
|
Python
|
mpl-2.0
| 28,516
| 0.000245
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from io import BytesIO
import struct
import zlib
import os
from zipfile import (
ZIP_STORED,
ZIP_DEFLATED,
)
from collections import OrderedDict
from urlparse import urlparse, ParseResult
import mozpack.path
JAR_STORED = ZIP_STORED
JAR_DEFLATED = ZIP_DEFLATED
MAX_WBITS = 15
class JarReaderError(Exception):
'''Error type for Jar reader errors.'''
class JarWriterError(Exception):
'''Error type for Jar writer errors.'''
class JarStruct(object):
'''
Helper used to define ZIP archive raw data structures. Data structures
handled by this helper all start with a magic number, defined in
subclasses MAGIC field as a 32-bits unsigned integer, followed by data
structured as described in subclasses STRUCT field.
The STRUCT field contains a list of (name, type) pairs where name is a
field name, and the type can be one of 'uint32', 'uint16' or one of the
field names. In the latter case, the field is considered to be a string
buffer with a length given in that field.
For example,
STRUCT = [
('version', 'uint32'),
('filename_size', 'uint16'),
('filename', 'filename_size')
]
describes a structure with a 'version' 32-bits unsigned integer field,
followed by a 'filename_size' 16-bits
|
unsigned integer field, followed by a
filename_size-long string buffer 'filename'.
Fields that are used as other fields size are not stored in objects. In the
above example, an instance of such subclass would only have two attributes:
obj['version']
obj['filename']
filename_size would be obtained with len(obj['filename']).
JarStruct subclasses instanc
|
es can be either initialized from existing data
(deserialized), or with empty fields.
'''
TYPE_MAPPING = {'uint32': ('I', 4), 'uint16': ('H', 2)}
def __init__(self, data=None):
'''
Create an instance from the given data. Data may be omitted to create
an instance with empty fields.
'''
assert self.MAGIC and isinstance(self.STRUCT, OrderedDict)
self.size_fields = set(t for t in self.STRUCT.itervalues()
if not t in JarStruct.TYPE_MAPPING)
self._values = {}
if data:
self._init_data(data)
else:
self._init_empty()
def _init_data(self, data):
'''
Initialize an instance from data, following the data structure
described in self.STRUCT. The self.MAGIC signature is expected at
data[:4].
'''
assert data is not None
self.signature, size = JarStruct.get_data('uint32', data)
if self.signature != self.MAGIC:
raise JarReaderError('Bad magic')
offset = size
# For all fields used as other fields sizes, keep track of their value
# separately.
sizes = dict((t, 0) for t in self.size_fields)
for name, t in self.STRUCT.iteritems():
if t in JarStruct.TYPE_MAPPING:
value, size = JarStruct.get_data(t, data[offset:])
else:
size = sizes[t]
value = data[offset:offset + size]
if isinstance(value, memoryview):
value = value.tobytes()
if not name in sizes:
self._values[name] = value
else:
sizes[name] = value
offset += size
def _init_empty(self):
'''
Initialize an instance with empty fields.
'''
self.signature = self.MAGIC
for name, t in self.STRUCT.iteritems():
if name in self.size_fields:
continue
self._values[name] = 0 if t in JarStruct.TYPE_MAPPING else ''
@staticmethod
def get_data(type, data):
'''
Deserialize a single field of given type (must be one of
JarStruct.TYPE_MAPPING) at the given offset in the given data.
'''
assert type in JarStruct.TYPE_MAPPING
assert data is not None
format, size = JarStruct.TYPE_MAPPING[type]
data = data[:size]
if isinstance(data, memoryview):
data = data.tobytes()
return struct.unpack('<' + format, data)[0], size
def serialize(self):
'''
Serialize the data structure according to the data structure definition
from self.STRUCT.
'''
serialized = struct.pack('<I', self.signature)
sizes = dict((t, name) for name, t in self.STRUCT.iteritems()
if not t in JarStruct.TYPE_MAPPING)
for name, t in self.STRUCT.iteritems():
if t in JarStruct.TYPE_MAPPING:
format, size = JarStruct.TYPE_MAPPING[t]
if name in sizes:
value = len(self[sizes[name]])
else:
value = self[name]
serialized += struct.pack('<' + format, value)
else:
serialized += self[name]
return serialized
@property
def size(self):
'''
Return the size of the data structure, given the current values of all
variable length fields.
'''
size = JarStruct.TYPE_MAPPING['uint32'][1]
for name, type in self.STRUCT.iteritems():
if type in JarStruct.TYPE_MAPPING:
size += JarStruct.TYPE_MAPPING[type][1]
else:
size += len(self[name])
return size
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if not key in self.STRUCT:
raise KeyError(key)
if key in self.size_fields:
raise AttributeError("can't set attribute")
self._values[key] = value
def __contains__(self, key):
return key in self._values
def __iter__(self):
return self._values.iteritems()
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__,
' '.join('%s=%s' % (n, v) for n, v in self))
class JarCdirEnd(JarStruct):
'''
End of central directory record.
'''
MAGIC = 0x06054b50
STRUCT = OrderedDict([
('disk_num', 'uint16'),
('cdir_disk', 'uint16'),
('disk_entries', 'uint16'),
('cdir_entries', 'uint16'),
('cdir_size', 'uint32'),
('cdir_offset', 'uint32'),
('comment_size', 'uint16'),
('comment', 'comment_size'),
])
CDIR_END_SIZE = JarCdirEnd().size
class JarCdirEntry(JarStruct):
'''
Central directory file header
'''
MAGIC = 0x02014b50
STRUCT = OrderedDict([
('creator_version', 'uint16'),
('min_version', 'uint16'),
('general_flag', 'uint16'),
('compression', 'uint16'),
('lastmod_time', 'uint16'),
('lastmod_date', 'uint16'),
('crc32', 'uint32'),
('compressed_size', 'uint32'),
('uncompressed_size', 'uint32'),
('filename_size', 'uint16'),
('extrafield_size', 'uint16'),
('filecomment_size', 'uint16'),
('disknum', 'uint16'),
('internal_attr', 'uint16'),
('external_attr', 'uint32'),
('offset', 'uint32'),
('filename', 'filename_size'),
('extrafield', 'extrafield_size'),
('filecomment', 'filecomment_size'),
])
class JarLocalFileHeader(JarStruct):
'''
Local file header
'''
MAGIC = 0x04034b50
STRUCT = OrderedDict([
('min_version', 'uint16'),
('general_flag', 'uint16'),
('compression', 'uint16'),
('lastmod_time', 'uint16'),
('lastmod_date', 'uint16'),
('crc32', 'uint32'),
('compressed_size', 'uint32'),
('uncompressed_size', 'uint32'),
('filename_size', 'uint16'),
('extra_field_size', 'uint16'),
('filename', 'filename_size'),
('extra_field', 'extra_field_size'),
])
cla
|
lantra/vugamedev
|
src/setup.py
|
Python
|
mit
| 1,710
| 0.008187
|
from distutils.core import setup
import py2exe
import os
import sys
sys.argv.append('py2exe')
# The filename of the script you use to start your program.
target_file = 'main.py'
# The root directory containing your assets, libraries, etc.
assets_dir = '.\\'
# Filetypes not to be included in the above.
excluded_file_types = ['py','pyc','project','pydevproject']
def get_data_files(base_dir, target_dir, list=[]):
"""
" * get_data_files
" * base_dir: The full path to the current working directory.
" * target_dir: The directory of assets to include.
" * list: Current list of assets. Used for recursion.
" *
" * returns: A list of relative and full path pairs. This is
" * specified by distutils.
"""
for file in os.listdir(base_dir + target_dir):
full_path = base_dir + targe
|
t_dir + file
if os.path.isdir(full_path):
get_data_files(base_dir, target_dir + file + '\\', list)
elif os.path.isfile(full_path):
if (len(file.split('.')) == 2 and file.split('.')[1] not in excluded_file_types):
list.append((target_dir, [full_path]))
return list
# The directory of assets to include.
my_files = get_data_files(sys.path[0] + '\\', assets_dir)
# Build a dictionary of th
|
e options we want.
opts = { 'py2exe': {
'ascii':'True',
'excludes':['_ssl','_hashlib'],
'includes' : ['anydbm', 'dbhash'],
'bundle_files':'1',
'compressed':'True'}}
# Run the setup utility.
setup(console=[target_file],
data_files=my_files,
zipfile=None,
options=opts)
|
glerm/ScriptsMusic21
|
mostrascore.py
|
Python
|
gpl-3.0
| 263
| 0.019011
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from music21 import *
import os
import sys
localcorpus=os.getcwd()+'/localcorpus/'
localXML='mikro'+str(sys.argv[1])+'.xml'
localfile=localcorpus+localXML
MIKRO=convert
|
er.parse(localfile)
MIKRO.show('lily.pd
|
f')
|
Kupoman/blendergltf
|
tests/unit/test_image.py
|
Python
|
apache-2.0
| 2,570
| 0.002335
|
def test_image_export_reference(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'REFERENCE'
gltf_image_default['uri'] = '../filepath.png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
gltf_image_default['uri'] = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACElEQVR42gMAAAAAAW'
'/dyZEAAAAASUVORK5CYII='
)
gltf_image_default['mimeType'] = 'image/png'
output = ex
|
porters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed_glb(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
state['settings']['gltf_export_binary'] = True
gltf_image_default['mimeType'] = 'image/p
|
ng'
gltf_image_default['bufferView'] = 'bufferView_buffer_Image_0'
output = exporters.ImageExporter.export(state, bpy_image_default)
for ref in state['references']:
ref.source[ref.prop] = ref.blender_name
assert output == gltf_image_default
def test_image_to_data_uri(exporters, bpy_image_default):
image_data = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\r'
b'IHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\x08'
b'IDATx\xda\x03\x00\x00\x00\x00\x01o\xdd\xc9\x91\x00\x00\x00\x00'
b'IEND\xaeB`\x82'
)
assert exporters.ImageExporter.image_to_data_uri(bpy_image_default) == image_data
def test_image_check(exporters, state, bpy_image_default):
assert exporters.ImageExporter.check(state, bpy_image_default)
def test_image_default(exporters, state, bpy_image_default):
assert exporters.ImageExporter.default(state, bpy_image_default) == {
'name': 'Image',
'uri': '',
}
def test_image_check_0_x(exporters, state, bpy_image_default):
bpy_image_default.size = [0, 1]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_0_y(exporters, state, bpy_image_default):
bpy_image_default.size = [1, 0]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_type(exporters, state, bpy_image_default):
bpy_image_default.type = 'NOT_IMAGE'
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
|
ministryofjustice/cla_backend
|
cla_backend/apps/call_centre/tests/test_forms.py
|
Python
|
mit
| 24,898
| 0.003735
|
import datetime
import mock
from django.test import TestCase
from django.utils import timezone
from core.tests.mommy_utils import make_recipe, make_user
from cla_eventlog.tests.test_forms import BaseCaseLogFormTestCaseMixin, EventSpecificLogFormTestCaseMixin
from cla_eventlog.models import Log
from legalaid.models import Case
from
|
cla_provider.helpers import ProviderAllocationHelper
from call_centre.forms import (
DeferAssignmentCaseForm,
ProviderAllocation
|
Form,
DeclineHelpCaseForm,
CallMeBackForm,
StopCallMeBackForm,
)
from cla_common.constants import CASE_SOURCE
from call_centre.tests.test_utils import CallCentreFixedOperatingHours
def _mock_datetime_now_with(date, *mocks):
dt = date.replace(tzinfo=timezone.get_current_timezone())
for _mock in mocks:
_mock.return_value = dt
return dt
class ProviderAllocationFormTestCase(TestCase):
@mock.patch("cla_provider.helpers.timezone.now")
def test_save_in_office_hours(self, timezone_mock):
_mock_datetime_now_with(datetime.datetime(2014, 1, 2, 9, 1, 0), timezone_mock)
case = make_recipe("legalaid.case")
category = case.eligibility_check.category
case.matter_type1 = make_recipe("legalaid.matter_type1", category=category)
case.matter_type2 = make_recipe("legalaid.matter_type2", category=category)
case.save()
user = make_user()
provider = make_recipe("cla_provider.provider", active=True)
make_recipe(
"cla_provider.provider_allocation", weighted_distribution=0.5, provider=provider, category=category
)
helper = ProviderAllocationHelper()
form = ProviderAllocationForm(
case=case,
data={"provider": helper.get_suggested_provider(category).pk},
providers=helper.get_qualifying_providers(category),
)
self.assertTrue(form.is_valid())
self.assertEqual(Log.objects.count(), 0)
form.save(user)
self.assertEqual(case.provider, provider)
self.assertEqual(Log.objects.count(), 1)
self.assertIn("Assigned to ", Log.objects.first().notes)
@mock.patch("cla_provider.models.timezone.now")
@mock.patch("cla_provider.helpers.timezone.now")
def test_save_out_office_hours_bank_holiday(self, timezone_mock, models_timezone_mock):
_mock_datetime_now_with(
datetime.datetime(datetime.date.today().year, 1, 1, 9, 1, 0), timezone_mock, models_timezone_mock
)
case = make_recipe("legalaid.case")
category = case.eligibility_check.category
case.matter_type1 = make_recipe("legalaid.matter_type1", category=category)
case.matter_type2 = make_recipe("legalaid.matter_type2", category=category)
case.save()
provider = make_recipe("cla_provider.provider", active=True)
make_recipe(
"cla_provider.provider_allocation", weighted_distribution=0.5, provider=provider, category=category
)
helper = ProviderAllocationHelper()
suggested = helper.get_suggested_provider(category)
self.assertIsNone(suggested)
form = ProviderAllocationForm(
case=case,
data={"provider": suggested.pk if suggested else None},
providers=helper.get_qualifying_providers(category),
)
self.assertFalse(form.is_valid())
self.assertEqual(Log.objects.count(), 0)
@mock.patch("cla_provider.models.timezone.now")
@mock.patch("cla_provider.helpers.timezone.now")
def test_save_out_office_hours(self, timezone_mock, models_timezone_mock):
_mock_datetime_now_with(datetime.datetime(2014, 1, 2, 8, 59, 0), timezone_mock, models_timezone_mock)
case = make_recipe("legalaid.case")
category = case.eligibility_check.category
case.matter_type1 = make_recipe("legalaid.matter_type1", category=category)
case.matter_type2 = make_recipe("legalaid.matter_type2", category=category)
case.save()
user = make_user()
provider = make_recipe("cla_provider.provider", active=True)
make_recipe(
"cla_provider.outofhoursrota",
provider=provider,
start_date=datetime.datetime(2013, 12, 30).replace(tzinfo=timezone.get_current_timezone()),
end_date=datetime.datetime(2014, 1, 2, 9, 0).replace(tzinfo=timezone.get_current_timezone()),
category=category,
)
make_recipe(
"cla_provider.provider_allocation", weighted_distribution=0.5, provider=provider, category=category
)
# TODO - create a ProviderAllocation for this provider with the
# same category as the case and a positive weighted_distribution
helper = ProviderAllocationHelper()
form = ProviderAllocationForm(
case=case,
data={"provider": helper.get_suggested_provider(category).pk},
providers=helper.get_qualifying_providers(category),
)
self.assertTrue(form.is_valid())
self.assertEqual(Log.objects.count(), 0)
form.save(user)
self.assertEqual(case.provider, provider)
self.assertEqual(Log.objects.count(), 1)
self.assertIn("Assigned to ", Log.objects.first().notes)
@mock.patch("cla_provider.models.timezone.now")
@mock.patch("cla_provider.helpers.timezone.now")
def test_save_out_office_hours_saturday(self, timezone_mock, models_timezone_mock):
_mock_datetime_now_with(datetime.datetime(2014, 11, 1, 10, 30, 0), timezone_mock, models_timezone_mock)
case = make_recipe("legalaid.case")
category = case.eligibility_check.category
case.matter_type1 = make_recipe("legalaid.matter_type1", category=category)
case.matter_type2 = make_recipe("legalaid.matter_type2", category=category)
case.save()
user = make_user()
provider = make_recipe("cla_provider.provider", active=True)
in_hours_provider = make_recipe("cla_provider.provider", active=True)
make_recipe(
"cla_provider.outofhoursrota",
provider=provider,
start_date=datetime.datetime(2013, 12, 30).replace(tzinfo=timezone.get_current_timezone()),
end_date=datetime.datetime(2014, 12, 2).replace(tzinfo=timezone.get_current_timezone()),
category=category,
)
make_recipe(
"cla_provider.provider_allocation", weighted_distribution=1, provider=in_hours_provider, category=category
)
make_recipe("cla_provider.provider_allocation", weighted_distribution=0, provider=provider, category=category)
with mock.patch.object(
ProviderAllocationHelper, "_get_random_provider", return_value=in_hours_provider
) as mocked_get_random_provider:
helper = ProviderAllocationHelper()
form = ProviderAllocationForm(
case=case,
data={"provider": helper.get_suggested_provider(category).pk},
providers=helper.get_qualifying_providers(category),
)
self.assertEqual(mocked_get_random_provider.call_count, 0)
self.assertTrue(form.is_valid())
self.assertEqual(Log.objects.count(), 0)
form.save(user)
self.assertEqual(case.provider, provider)
self.assertEqual(Log.objects.count(), 1)
self.assertIn("Assigned to ", Log.objects.first().notes)
@mock.patch("cla_provider.models.timezone.now")
@mock.patch("cla_provider.helpers.timezone.now")
def test_save_out_office_hours_no_valid_provider(self, timezone_mock, models_timezone_mock):
_mock_datetime_now_with(
datetime.datetime(datetime.date.today().year, 1, 1, 8, 59, 0), timezone_mock, models_timezone_mock
)
case = make_recipe("legalaid.case")
category = case.eligibility_check.category
case.matter_type1 = make_recipe("legalaid.matter_type1", category=category)
case.matter_type2 = make_recipe("legalaid.matter_type2", category=category)
case.save()
provider = make_recipe("cla_provider.pr
|
alen-alex/aqi_service
|
aqi_service/util/log_utils.py
|
Python
|
apache-2.0
| 2,106
| 0.001899
|
# -*- coding: utf-8 -*-
import os
import logging
import datetime
def get_logger(directory, name):
"""
"""
|
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
date_handler = DateFileHandler(directory, name)
fmt_str = '%(asctime)s %(process)d %(module)s.%(funcName)s.%(lineno)d %(levelname)s : %(message)s'
fmt = logging.Formatter(fmt_s
|
tr, datefmt='%Y-%m-%d %H:%M:%S')
date_handler.setFormatter(fmt)
logger.addHandler(date_handler)
return logger
class DateFileHandler(logging.StreamHandler):
"""
log by date file
"""
def __init__(self, directory, log_name='', mode='a'):
self.directory = directory
self.log_name = log_name
self.mode = mode
self.last_date = None
logging.StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
logging.StreamHandler.close(self)
self.stream = None
finally:
self.release()
def gen_file_name(self):
self.last_date = datetime.datetime.now().date()
log_directory = '%s/%04d-%02d' % (self.directory, self.last_date.year, self.last_date.month)
os.system("mkdir -p %s" % log_directory)
log_file = '%s/%s.%s.log' % (log_directory, self.last_date.day, self.log_name)
return log_file
def _open(self):
log_file = self.gen_file_name()
stream = open(log_file, self.mode)
return stream
def should_roll(self):
date = datetime.datetime.now().date()
if date == self.last_date:
return False
else:
return True
def emit(self, record):
"""
Emit a record.
"""
if self.should_roll():
self.close()
if self.stream is None:
self.stream = self._open()
logging.StreamHandler.emit(self, record)
|
jawilson/home-assistant
|
tests/components/vacuum/common.py
|
Python
|
apache-2.0
| 6,031
| 0.001326
|
"""Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.vacuum import (
ATTR_FAN_SPEED,
ATTR_PARAMS,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_SEND_COMMAND,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_START_PAUSE,
SERVICE_STOP,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.loader import bind_h
|
ass
@bind_hass
def turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum on."""
hass.add_job(async_turn_on, hass, entity_id)
async def async_turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.asyn
|
c_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
@bind_hass
def turn_off(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum off."""
hass.add_job(async_turn_off, hass, entity_id)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
@bind_hass
def toggle(hass, entity_id=ENTITY_MATCH_ALL):
"""Toggle all or specified vacuum."""
hass.add_job(async_toggle, hass, entity_id)
async def async_toggle(hass, entity_id=ENTITY_MATCH_ALL):
"""Toggle all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data, blocking=True)
@bind_hass
def locate(hass, entity_id=ENTITY_MATCH_ALL):
"""Locate all or specified vacuum."""
hass.add_job(async_locate, hass, entity_id)
async def async_locate(hass, entity_id=ENTITY_MATCH_ALL):
"""Locate all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_LOCATE, data, blocking=True)
@bind_hass
def clean_spot(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to perform a spot clean-up."""
hass.add_job(async_clean_spot, hass, entity_id)
async def async_clean_spot(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to perform a spot clean-up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, data, blocking=True)
@bind_hass
def return_to_base(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to return to base."""
hass.add_job(async_return_to_base, hass, entity_id)
async def async_return_to_base(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to return to base."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, data, blocking=True)
@bind_hass
def start_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or pause the current task."""
hass.add_job(async_start_pause, hass, entity_id)
async def async_start_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_START_PAUSE, data, blocking=True)
@bind_hass
def start(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or resume the current task."""
hass.add_job(async_start, hass, entity_id)
async def async_start(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or resume the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_START, data, blocking=True)
@bind_hass
def pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or the specified vacuum to pause the current task."""
hass.add_job(async_pause, hass, entity_id)
async def async_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or the specified vacuum to pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_PAUSE, data, blocking=True)
@bind_hass
def stop(hass, entity_id=ENTITY_MATCH_ALL):
"""Stop all or specified vacuum."""
hass.add_job(async_stop, hass, entity_id)
async def async_stop(hass, entity_id=ENTITY_MATCH_ALL):
"""Stop all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_STOP, data, blocking=True)
@bind_hass
def set_fan_speed(hass, fan_speed, entity_id=ENTITY_MATCH_ALL):
"""Set fan speed for all or specified vacuum."""
hass.add_job(async_set_fan_speed, hass, fan_speed, entity_id)
async def async_set_fan_speed(hass, fan_speed, entity_id=ENTITY_MATCH_ALL):
"""Set fan speed for all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_FAN_SPEED] = fan_speed
await hass.services.async_call(DOMAIN, SERVICE_SET_FAN_SPEED, data, blocking=True)
@bind_hass
def send_command(hass, command, params=None, entity_id=ENTITY_MATCH_ALL):
"""Send command to all or specified vacuum."""
hass.add_job(async_send_command, hass, command, params, entity_id)
async def async_send_command(hass, command, params=None, entity_id=ENTITY_MATCH_ALL):
"""Send command to all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_COMMAND] = command
if params is not None:
data[ATTR_PARAMS] = params
await hass.services.async_call(DOMAIN, SERVICE_SEND_COMMAND, data, blocking=True)
|
Richard-Mathie/cassandra_benchmark
|
vendor/github.com/datastax/python-driver/tests/integration/cqlengine/statements/test_where_clause.py
|
Python
|
apache-2.0
| 1,642
| 0.000609
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permission
|
s and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import six
from cassandra.cqlengine.operators import EqualsOperator
from cassandra.cqlengine.statements import StatementException, WhereClause
class TestWhereClause(unittest.TestCase):
def test_operator_check(self):
""" tests that creating a where statement with a non BaseWhereOperator object fails """
with self.assertRaises
|
(StatementException):
WhereClause('a', 'b', 'c')
def test_where_clause_rendering(self):
""" tests that where clauses are rendered properly """
wc = WhereClause('a', EqualsOperator(), 'c')
wc.set_context_id(5)
self.assertEqual('"a" = %(5)s', six.text_type(wc), six.text_type(wc))
self.assertEqual('"a" = %(5)s', str(wc), type(wc))
def test_equality_method(self):
""" tests that 2 identical where clauses evaluate as == """
wc1 = WhereClause('a', EqualsOperator(), 'c')
wc2 = WhereClause('a', EqualsOperator(), 'c')
assert wc1 == wc2
|
downpoured/lnzscript
|
lnzscript/util/python/zipfile_wrapper.py
|
Python
|
gpl-3.0
| 3,191
| 0.024444
|
'''
File.zipExtract(strZipfile, strDirectory) // returns # of extracted files or false on error
File.zipList() //returns entries
File.zipList(true) //returns only files, not folders
File.zipCreate(strDirectory)
'''
import os
import zipfile
from io import StringIO
def zipfile_list( filename, includeFolders=False):
try:
zf = zipfile.ZipFile( filename )
except IOError: return False
except zipfile.BadZipFile: return False
except: return False
namelist = zf.namelist()
if not includeFolders: namelist = [x for x in namelist if not x.endswith( '/' )]
return namelist
#http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/465649
def zipfile_extract( filename, dir ):
try:
zf = zipfile.ZipFile( filename )
except IOError: return False
except zipfile.BadZipFile: return False
except: return False
namelist = zf.namelist()
dirlist = [x for x in namelist if x.endswith( '/' )]
filelist = [x for x in namelist if not x.endswith( '/' )]
# make base
pushd = os.getcwd()
if not os.path.isdir( dir ):
os.mkdir( dir )
os.chdir( dir )
# create directory structure
dirlist.sort()
for dirs in dirlist:
dirs = dirs.split( '/' )
prefix = ''
for dir in dirs:
dirname = os.path.join( prefix, dir )
if dir and not os.path.isdir( dirname ):
os.mkdir( dirname )
prefix = dirname
# extract files
err = False
count = 0
try:
for filename in filelist:
out = open( filename, 'wb' )
buffer = StringIO( zf.read( filename
|
))
buflen = 2 ** 20
datum = buffer.read( buflen )
while datum:
|
out.write( datum )
datum = buffer.read( buflen )
out.close()
count+=1
except:
err = True
finally:
os.chdir( pushd )
if err: return False
else: return count
def zipfile_create(strDirectory, strFilename):
try:
zf = zipfile.ZipFile( filename, 'w', zipfile.ZIP_DEFLATED)
except: return False
err = False
pushd = os.getcwd()
os.chdir(strDirectory)
# add contents to the archive.
try:
for dirpath, dirnames, filenames in os.walk(strDirectory):
for filename in filenames:
print(dirpath + '/' + filename)
except:
err = True
finally:
os.chdir( pushd )
return not err
if __name__=='__main__':
testf = r'C:\Documents and Settings\bfisher\My Documents\winrhythm.zip'
#~ testf = r'C:\Documents and Settings\bfisher\My Documents\web hosting.txt'
z = zipfile.ZipFile(testf, 'r')
print(z.namelist())
strDirectory = r'C:\Documents and Settings\bfisher\My Documents\lnzbinary'
os.chdir(strDirectory)
for dirpath, dirnames, filenames in os.walk(strDirectory):
for filename in filenames:
if dirpath.startswith(strDirectory):
dirpath
print(dirpath + '/' + filename)
#~ z = zipfile.ZipFile('test.zip','w', zipfile.ZIP_DEFLATED)
#~ z.write('test.txt')
#~ z.close()
|
MrHalfman/gabbler
|
social/views.py
|
Python
|
gpl-2.0
| 7,760
| 0.001289
|
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, JsonResponse
import re
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from core.models import User
from social.models import Gab, ModerationReport, Regab, GabOpinion, UserRelationships, Notifications
from django.template.loader import render_to_string
from django.core.mail import send_mail as django_send_mail
import urllib2
import json
def catch_video_link(gab):
youtube_link = re.search(r'(https?\:\/\/)?(www\.youtube\.com|youtu\.?be)\/[^ ]+', gab)
if youtube_link:
embed_link = re.search(r'(https?\:\/\/)?www\.youtube\.com\/embed\/[^ ]+', youtube_link.group())
if embed_link:
return youtube_link.group()
else:
id_video = youtube_link.group().split("=")[1]
id_video = re.search(r'[^ =&]+', id_video)
if id_video:
return "http://www.youtube.com/embed/" + id_video.group()
return False
def catch_photo_link(gab):
photo_link = re.search(r'(https?\:\/\/)?[$\-./+!*(),\w]+/[\w-]+\.(png|jpg|gif)', gab)
if photo_link:
return photo_link.group()
return False
def catch_gifid(text):
giphy_request = re.findall(r'g/(([a-zA-Z0-9]+(\+[a-zA-Z0-9]+)*))', text)
if giphy_request:
get_parameters = "+".join(str(id[0]) for id in giphy_request)
url = "http://api.giphy.com/v1/gifs/search?q=" + get_parameters + "&limit=1&api_key=l41lICEpoxH594Kly"
req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
response = urllib2.urlopen(req)
if response:
decoded_json = json.loads(response.read())
if decoded_json["data"]:
return decoded_json["data"][0]["id"]
return False
def send_mail(user, title, html_body, text, type):
if not getattr(user.mail_notifications, type):
return False
mail_content = {
"mail_title": title,
"mail_body": html_body
}
html_content = render_to_string("mail_template.html", mail_content)
string_content = text
django_send_mail(
"Welcome!",
string_content,
"gabbler.noreply@gmail.com",
[user.email],
fail_silently=True,
html_message=html_content
)
@login_required
def post_gab(request):
text = request.POST.get("text")
gab = Gab(
user=request.user,
text=text
)
gif = catch_gifid(text)
video = catch_video_link(text)
picture = catch_photo_link(text)
if gif:
gab.gif_id = gif
if video:
gab.video = video
if picture:
gab.picture = picture
regex = re.compile('(@\w+)')
userlist = regex.findall(text)
gab.save()
notifications_bulk = []
for uname in userlist:
try:
user = User.objects.get(username=uname[1:])
text = "%s mentioned your name in a gab." % request.user.username
notifications_bulk.append(Notifications(
user=user,
text=text,
link="/gab/%d" % gab.pk
))
send_mail(user, "You have been mentioned on Gabbler.", text, text, "citation")
except User.DoesNotExist:
pass
Notifications.objects.bulk_create(notifications_bulk)
return HttpResponseRedirect("/")
@login_required
def delete_gab(request, gab_pk):
Gab.objects.filter(pk=gab_pk).delete()
return HttpResponseRedirect("/")
@csrf_exempt
@login_required
def report_gab(request, gab_pk):
gab = Gab.objects.get(pk=gab_pk)
ModerationReport.objects.create(
by=request.user,
gab=gab,
reason=request.GET.get("reason")
)
return JsonResponse({"success": True})
@staff_member_required
@login_required
def moderation_reports(request):
reports = ModerationReport.objects.filter(processed=False)
return render(request, "admin/moderation_reports.html", locals())
@staff_member_required
@login_required
def moderation_reports_processed(request, report_pk):
report = ModerationReport.objects.get(pk=report_pk)
report.processed = True
report.save()
return Htt
|
pResponseRedirect("/admin/repo
|
rts/")
@login_required
def regab(request, gab_pk):
gab = Gab.objects.get(pk=gab_pk)
regab, created = Regab.objects.get_or_create(
gab=gab,
user=request.user
)
regabbed = created
if not created:
regab.delete()
else:
text = "%s regabbed your gab." % request.user.username
Notifications.objects.create(
user=gab.user,
text=text,
link="/gab/%d" % regab.pk
)
send_mail(gab.user, "One of your gabs has been regabbed.", text, text, "regab")
return JsonResponse({
"success": True,
"regabbed": regabbed,
"regabs": gab.regabs.count()
})
@login_required
def like(request, gab_pk):
gab = Gab.objects.get(pk=gab_pk)
opinion, created = GabOpinion.objects.get_or_create(
user=request.user,
gab=gab
)
response = {
"success": True
}
if not created and opinion.like is True:
opinion.delete()
else:
opinion.like = True
opinion.save()
response['liking'] = True
text = "%s liked your gab." % request.user.username
if gab.user != request.user:
Notifications.objects.create(
user=gab.user,
text=text,
link="/gab/%d" % gab.pk
)
send_mail(gab.user, "One of your gabs has been liked.", text, text, "like")
response['likes'] = gab.likes.count()
response['dislikes'] = gab.dislikes.count()
return JsonResponse(response)
@login_required
def dislike(request, gab_pk):
gab = Gab.objects.get(pk=gab_pk)
opinion, created = GabOpinion.objects.get_or_create(
user=request.user,
gab=gab
)
response = {
"success": True
}
if not created and opinion.like is False:
opinion.delete()
else:
opinion.like = False
opinion.save()
response['disliking'] = True
response['dislikes'] = gab.dislikes.count()
response['likes'] = gab.likes.count()
return JsonResponse(response)
@login_required
def follow(request, user_pk):
usr = User.objects.get(pk=user_pk)
relationship, created = UserRelationships.objects.get_or_create(
user=request.user,
following=usr
)
if not created:
relationship.delete()
else:
Notifications.objects.create(
user=usr,
text="%s followed you." % request.user.username,
link="/user/%s" % request.user.username
)
return HttpResponseRedirect("/user/%s" % usr.username)
@login_required
def mark_notifications_asread(request):
request.user.unread_notifications.update(read=True)
return JsonResponse({"success": True})
@login_required
def search(request, query):
words = query.split(" ")
gabs = list()
users = list()
for word in words:
gabs += list(Gab.objects.filter(text__icontains=word))
users += list(User.objects.filter(username__icontains=word))
return render(request, "search.html", locals())
@login_required
def getGabs(request, page=0):
gabs_per_page = 10
min = int(page) * gabs_per_page
max = min + gabs_per_page
gabs = request.user.gabsfeed[min:max]
return render(request, "skeletons/gabs_list.html", {"gabs": gabs})
@login_required
def getGab(request, pk):
return render(request, "social/gab.html", {"gab": Gab.objects.get(pk=pk)})
|
MattBlack85/django-query-logger
|
query_logger/settings.py
|
Python
|
mit
| 981
| 0.002039
|
QUERY_LOGGING = {
'formatters': {
'wrapper_verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'wrapper_simple': {
'format': '%(levelname)s %(message)s'
},
'wrapper_json': {
'()': 'jsonlogger.JsonFormatter',
'format': '%(levelname)s %(asctime)s %(name)s %(module)s %(lineno)d'
'%(process)d %(thread)d %(message)s'
},
'wrapper_simple_json': {
'()': 'jso
|
nlogger.JsonFormatter',
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console_wrapper': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': '
|
wrapper_verbose',
}
},
'loggers': {
'query_logger.wrapper': {
'handlers': ['console_wrapper'],
'level': 'DEBUG',
'propagate': True
}
}
}
|
redmoo-info/proddict
|
ru/books.py
|
Python
|
mit
| 8,821
| 0.000141
|
# -*- coding: utf-8 -*-
"""'Books' part of product categories dictionary.
Must hold subcategories of 'Books'
category in the form of python dictionary data type.
"""
books = {('books', 'книги'): {
('audiobooks', 'аудиокниги'): {
('biography', 'биографии'): {},
('business, economics', 'бизнес, экономика'): {},
('kids', 'детское'): {},
('fiction, literature', 'художественная литература'): {},
('health, fitness', 'здоровье, фитнес'): {},
('history', 'история'): {},
('humor', 'юмор'): {},
('parenting, family', 'семья, воспитание'): {},
('philosophy', 'философия'): {},
('poetry', 'поэзия'): {},
('psychology', 'психология'): {},
('religion, spirituality', 'религия, духовность'): {},
('science, medicine', 'наука, медицина'): {},
('self-help', 'самопомощь'): {},
('travel', 'путешествия'): {},
('other', 'другое'): {},
},
('kids', 'детское'): {
('ABCs, numbers', 'алфавит, числа'): {},
('activity, coloring books', 'раскраски, игры'): {},
('bedtime', 'перед сном'): {},
('classics', 'классика'): {},
('fairy tales, myths', 'сказки, мифы'): {},
('fiction', 'художественная литература'): {},
('geography', 'география'): {},
('history', 'история'): {},
('humor', 'юмор'): {},
('learning to read', 'учимся читать'): {},
('nursery rhymes', 'стишки'): {},
('picture books', 'иллюстрированные книги'): {},
('poetry', 'поэзия'): {},
('pop-up, movable', '3D, подвижные'): {},
('religion', 'религия'): {},
('science, nature', 'наука, природа'): {},
('sports', 'спорт'): {},
('foreign language', 'иностранный язык'): {},
('other', 'другое'): {},
},
('poetry', 'поэзия'): {},
('cookbooks', 'кулинарные книги'): {},
('comic books', 'комиксы'): {},
('fiction', 'художественная литература'): {
('action, adventure', 'приключения'): {},
('classics', 'классика'): {},
('drama', 'драма'): {},
('fantasy', 'фэнтези'): {},
('folklore, mythology', 'фольклор, мифология'): {},
('historical', 'историческое'): {},
('horror', 'ужасы'): {},
('humor', 'юмор'): {},
('military', 'военное'): {},
('mystery, thriller', 'мистерия, триллер'): {},
('religious', 'религиозное'): {},
('romance', 'романтичное'): {},
('science fiction', 'научная фантастика'): {},
('foreign language', 'иностранный язык'): {},
('other', 'другое'): {},
},
('non-fiction', 'научно-популярные'): {
('antiques, collectibles', 'антиквариат, коллекционное'): {}, ('architecture', 'архитектура'): {},
('art', 'живопись, искусство'): {},
('photography', 'фотография'): {},
('biography', 'биографии'): {},
('business, economics', 'бизнес, экономика'): {},
('craft', 'рукоделие, ремесло'): {},
('food, cooking', 'продукты, поваренные'): {},
('design', 'дизайн'): {},
('flora, fauna', 'флора, фауна'): {},
('computers, internet', 'компьютер, интернет'): {},
('family, relationships', 'семья, отношения'): {},
('games, puzzles', 'игры, пазлы'): {},
('health, fitness', 'здоровье, фитнес'): {},
('history', 'история'): {},
('hobbies', 'хобби'): {},
('home, garden', 'дом, сад'): {},
('humor', 'юмор'): {},
('law', 'право'): {},
('medical', 'медицина'): {},
('military, war', 'военное'): {},
('movies, tv', 'фильмы, тв'): {},
('music', 'музыка'): {},
('outdoor, nature', 'на природе'): {},
('performing arts', 'исполнительное искусство'): {},
('pets', 'домашние животные'): {},
('philosophy', 'философия'): {},
('politics', 'политика'): {},
('psychology', 'психология'): {},
('reference', 'справочники'): {},
('religion, spirituality', 'религия, духовность'): {},
('science, tech', 'наука, технология'): {},
('self-help', 'самопомощь'): {},
('social sciences', 'общественные науки'): {},
('sports, recreation', 'спорт, активный отдых'): {},
('teaching, education', 'обучение, образование'): {},
('transport', 'транспорт'): {},
('travel', 'путешествия'): {},
('other', 'другое'): {},
},
('school textbooks', 'школьные учебники'): {
('biology', 'биология'): {},
('business, economics', 'бизнес, экономика'): {},
('computers, internet', 'компьютер, интернет'): {},
('chemistry', 'химия'): {},
('english, grammar', 'английский'): {},
('foreign language', 'иностранный язык'): {},
('geography', 'география'): {},
('history', 'история'): {},
('humanities', 'гуманитарные науки'): {},
('math', 'математика'): {},
('music', 'музыка'): {},
('performing arts', 'исполнительное искусство'): {},
('philosophy', 'философия'): {},
('physics', 'физика'): {},
('psychology', 'психология'): {},
('reference', 'справочники'): {},
('religion',
|
'религия'): {},
('science, tech', 'наука, технология'): {},
('social sciences', 'общественные науки'): {},
('sports', 'спорт'): {},
('other', 'другое'): {},
},
('textbooks', 'учебники'): {
('accounting', 'бухгалтерия'): {},
('architecture, desig
|
n', 'архитектура, дизайн'): {},
('art, photography', 'искусство, фотография'): {},
('biology', 'биология'): {},
('business, economics', 'бизнес, экономика'): {},
('computers, internet', 'компьютер, интернет'): {},
('chemistry', 'химия'): {},
('engineering', 'инжиниринг, технологии'): {},
('english, grammar', 'английский'): {},
('foreign language', 'иностранный язык'): {},
('gardening, landscaping', 'ландшафт, садоводство'): {},
('genetics', 'генетика'): {},
('geography', 'география'): {},
('history', 'история'): {},
('humanities', 'гуманитарные науки'): {},
('linguistics', 'лингвистика'): {},
('law', 'право'): {},
('management', 'менеджмент, управление'): {},
('math', 'математика'): {},
('medicine', 'медицина'): {},
('marketing', 'маркетинг'): {},
('music', 'музыка'): {},
('performing arts', 'исполнительное искусство'): {},
('philosophy', 'философия'): {},
('physics', 'физика'): {},
('political science', 'политические науки'): {},
('psychology', 'психология'): {},
('reference', 'справочники'): {},
('religion', 'религия'): {},
('science, tech', 'наука, технология'): {},
('social sciences', 'общественные науки'): {},
('sports', 'спорт'): {},
('teaching, education', 'обучение, образование'): {},
('other', 'другое'): {},
},
('other', 'другое'): {},
}
}
|
vermouthmjl/scikit-learn
|
sklearn/learning_curve.py
|
Python
|
bsd-3-clause
| 14,601
| 0
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for diffe
|
rent training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for paralle
|
l execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_
|
markflyhigh/incubator-beam
|
sdks/python/apache_beam/examples/snippets/transforms/element_wise/keys_test.py
|
Python
|
apache-2.0
| 1,774
| 0.002274
|
# coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from apache_beam.examples.snippets.transforms.element_wise.keys import *
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
@mock.patch('apache_beam.Pipeline', TestPipeline)
# pylint: disable=line-too-long
@mock.patch('apache_beam.examples.snippets.transforms.element_wise.keys.print', lambda elem: elem)
# pylint: enable=line-too-long
class KeysTest(unittest.TestCase):
def __init__(self, methodName):
super(KeysTest, self).__init__(methodName)
# [START icons]
icons = [
'🍓',
'🥕',
'🍆',
'🍅',
|
'🥔',
]
# [END icons]
self.icons_test =
|
lambda actual: assert_that(actual, equal_to(icons))
def test_keys(self):
keys(self.icons_test)
if __name__ == '__main__':
unittest.main()
|
vineodd/PIMSim
|
GEM5Simulation/gem5/src/learning_gem5/part2/SimpleObject.py
|
Python
|
gpl-3.0
| 1,750
| 0.000571
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jason Lowe-Power
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTI
|
ON) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power
from m5.params import *
from m5.SimObject import SimObject
class SimpleObject(SimObject):
type = 'SimpleObject'
cxx_header = "learning_gem5/part2/simple_object.hh"
|
lj020326/cloudify3-plugin-test
|
plugin/tests/file_server.py
|
Python
|
apache-2.0
| 3,222
| 0
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import subprocess
import SimpleHTTPServer
import SocketServer
import os
import sys
import socket
import time
from multiprocessing import Process
from cloudify.utils import setup_logger
from cloudify import exceptions
PORT = 53229
FNULL = open(os.devnull, 'w')
logger = setup_logger('cloudify.plugin.tests.file_server')
class FileServer(object):
def __init__(self, root_path, use_subprocess=False, timeout=5):
self.root_path = root_path
self.process = Process(target=self.start_impl)
self.use_subprocess = use_subprocess
self.timeout = timeout
def start(self):
|
logger.info('Starting file server')
if self.use_subprocess:
subprocess.Popen(
[sys.executable, __file__, self.root_path],
stdin=FNULL,
stdout=FNULL,
stderr=FNULL)
else:
self.process.start()
end_time = time.time() + self.timeout
while end_time > time.time():
if self.is_alive():
logger.info('File serve
|
r is up and serving from {0}'
.format(self.root_path))
return
logger.info('File server is not responding. waiting 10ms')
time.sleep(0.1)
raise exceptions.TimeoutException('Failed starting '
'file server in {0} seconds'
.format(self.timeout))
def stop(self):
try:
logger.info('Shutting down file server')
self.process.terminate()
while self.is_alive():
logger.info('File server is still up. waiting for 10ms')
time.sleep(0.1)
logger.info('File server has shut down')
except BaseException as e:
logger.warning(str(e))
def start_impl(self):
logger.info('Starting file server and serving files from: %s',
self.root_path)
os.chdir(self.root_path)
class TCPServer(SocketServer.TCPServer):
allow_reuse_address = True
httpd = TCPServer(('0.0.0.0', PORT),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.serve_forever()
@staticmethod
def is_alive():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('localhost', PORT))
s.close()
return True
except socket.error:
return False
if __name__ == '__main__':
FileServer(sys.argv[1]).start_impl()
|
autopower/thermeq3
|
support/del_dev.py
|
Python
|
gpl-3.0
| 972
| 0.001029
|
import socket
import base64
def read_lines():
global client_socket
lines_buffer = ""
data = True
while data:
try:
data = client_socket.recv(4096)
lines_buffer += data
except socket.timeout:
break
return lines_buffer
print("thermeq3 delete device from MAX!Cube\n")
# please edit max cube address
max_ip = "192.168.0.200"
# please edit device for deletion
dev_id = "DEAD01"
print("Deleting device id: " + dev_id + "from MAX!Cube with IP: " + max_ip)
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.settimeout(2)
client_socket.connect((m
|
ax_ip, 62910))
result = read_lines()
dev_id_plain = bytearray.fromhex(dev_id).decode()
message = "t:01,1," + base64.b64encode(dev_id_plain) + "\r\n"
client_socket.sendall(message)
# result must be "A:"
print("Command issued. Please read result. Result must begin with A:\n----------")
pr
|
int read_lines()
client_socket.close()
|
AndrewNeudegg/CIMC
|
run.py
|
Python
|
mit
| 65
| 0.015385
|
from CIMC import ap
|
p
if __name__ == '__main__':
|
app.run()
|
abelcarreras/aiida_extensions
|
plugins/jobs/lammps/md.py
|
Python
|
mit
| 9,997
| 0.003201
|
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.utils import classproperty
from potentials import LammpsPotential
import numpy as np
def generate_LAMMPS_structure(structure):
import numpy as np
types = [site.kind_name for site in structure.sites]
type_index_unique = np.unique(types, return_index=True)[1]
count_index_unique = np.diff(np.append(type_index_unique, [len(types)]))
atom_index = []
for i, index in enumerate(count_index_unique):
atom_index += [i for j in range(index)]
masses = [site.mass for site in structure.kinds]
positions = [site.position for site in structure.sites]
number_of_atoms = len(positions)
lammps_data_file = 'Generated using dynaphopy\n\n'
lammps_data_file += '{0} atoms\n\n'.format(number_of_atoms)
lammps_data_file += '{0} atom types\n\n'.format(len(masses))
cell = np.array(structure.cell)
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.arccos(np.dot(c
|
ell
|
[1], cell[2])/(c*b))
gamma = np.arccos(np.dot(cell[1], cell[0])/(a*b))
beta = np.arccos(np.dot(cell[2], cell[0])/(a*c))
xhi = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
yhi = np.sqrt(pow(b,2)- pow(xy,2))
yz = (b*c*np.cos(alpha)-xy * xz)/yhi
zhi = np.sqrt(pow(c,2)-pow(xz,2)-pow(yz,2))
xhi = xhi + max(0,0, xy, xz, xy+xz)
yhi = yhi + max(0,0, yz)
lammps_data_file += '\n{0:20.10f} {1:20.10f} xlo xhi\n'.format(0, xhi)
lammps_data_file += '{0:20.10f} {1:20.10f} ylo yhi\n'.format(0, yhi)
lammps_data_file += '{0:20.10f} {1:20.10f} zlo zhi\n'.format(0, zhi)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f} xy xz yz\n\n'.format(xy, xz, yz)
lammps_data_file += 'Masses\n\n'
for i, mass in enumerate(masses):
lammps_data_file += '{0} {1:20.10f} \n'.format(i+1, mass)
lammps_data_file += '\nAtoms\n\n'
for i, row in enumerate(positions):
lammps_data_file += '{0} {1} {2:20.10f} {3:20.10f} {4:20.10f}\n'.format(i+1, atom_index[i]+1, row[0],row[1],row[2])
return lammps_data_file
def generate_LAMMPS_input(parameters,
potential_obj,
structure_file='potential.pot',
trajectory_file='trajectory.lammpstr'):
random_number = np.random.randint(10000000)
names_str = ' '.join(potential_obj._names)
lammps_input_file = 'units metal\n'
lammps_input_file += 'boundary p p p\n'
lammps_input_file += 'box tilt large\n'
lammps_input_file += 'atom_style atomic\n'
lammps_input_file += 'read_data {}\n'.format(structure_file)
lammps_input_file += potential_obj.get_input_potential_lines()
lammps_input_file += 'neighbor 0.3 bin\n'
lammps_input_file += 'neigh_modify every 1 delay 0 check no\n'
lammps_input_file += 'timestep {}\n'.format(parameters.dict.timestep)
lammps_input_file += 'thermo_style custom step etotal temp vol press\n'
lammps_input_file += 'thermo 1000\n'
lammps_input_file += 'velocity all create {0} {1} dist gaussian mom yes\n'.format(parameters.dict.temperature, random_number)
lammps_input_file += 'velocity all scale {}\n'.format(parameters.dict.temperature)
lammps_input_file += 'fix int all nvt temp {0} {0} {1}\n'.format(parameters.dict.temperature, parameters.dict.thermostat_variable)
lammps_input_file += 'run {}\n'.format(parameters.dict.equilibrium_steps)
lammps_input_file += 'reset_timestep 0\n'
lammps_input_file += 'dump aiida all custom {0} {1} element x y z\n'.format(parameters.dict.dump_rate, trajectory_file)
lammps_input_file += 'dump_modify aiida format "%4s %16.10f %16.10f %16.10f"\n'
lammps_input_file += 'dump_modify aiida sort id\n'
lammps_input_file += 'dump_modify aiida element {}\n'.format(names_str)
lammps_input_file += 'run {}\n'.format(parameters.dict.total_steps)
return lammps_input_file
class MdCalculation(JobCalculation):
"""
A basic plugin for calculating force constants using Lammps.
Requirement: the node should be able to import phonopy
"""
def _init_internal_params(self):
super(MdCalculation, self)._init_internal_params()
self._INPUT_FILE_NAME = 'input.in'
self._INPUT_POTENTIAL = 'potential.pot'
self._INPUT_STRUCTURE = 'input.data'
self._OUTPUT_TRAJECTORY_FILE_NAME = 'trajectory.lammpstrj'
self._OUTPUT_FILE_NAME = 'log.lammps'
self._default_parser = 'lammps.md'
@classproperty
def _use_methods(cls):
"""
Additional use_* methods for the namelists class.
"""
retdict = JobCalculation._use_methods
retdict.update({
"parameters": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the lammps input data "
"for the namelists"),
},
"potential": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'potential',
'docstring': ("Use a node that specifies the lammps potential "
"for the namelists"),
},
"structure": {
'valid_types': StructureData,
'additional_parameter': None,
'linkname': 'structure',
'docstring': "Use a node for the structure",
},
})
return retdict
def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
parameters_data = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters_data, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
potential_data = inputdict.pop(self.get_linkname('potential'))
except KeyError:
raise InputValidationError("No potential specified for this "
"calculation")
if not isinstance(potential_data, ParameterData):
raise InputValidationError("potential is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("no structure is specified for this calculation")
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("no code is specified for this calculation")
##############################
# END OF INITIAL INPUT CHECK #
##############################
# =================== prepare the python input files =====================
potential_object = LammpsPotential(potential_data, structure, potential_filename=self._INPUT_POTENTIAL)
structure_txt = generate_LAMMPS_structure(structure)
input_txt = generate_LAMMPS_input(parameters_data,
poten
|
fmount/tmux-layout
|
scripts/menu/tmuxer.py
|
Python
|
mit
| 1,794
| 0.034002
|
#!/usr/bin/env python
#
# v0.1 --
#
# fmount <francesco.pantano@linux.com>
#
#
from argparse import ArgumentParser as argpar
|
ser
import libtmux
import logging
import subprocess as sub
'''
'''
class Tmuxer(object):
def __init__(self, args):
self.logger = logging.getLogger('Tmuxer')
for attr in args.keys():
setattr(self, attr, args.get(attr, None))
self.server = libtmux.Serv
|
er()
self.logger.debug("Acquiring parameters: [%s] - [%s] - [%s] [%s]" \
% (self.layout, self.layout_home, self.session, self.main_pane))
def exist_session(self, curr_sess):
if curr_sess is not None:
return True
return False
def build_command(self):
cmd = "tmux source-file " + '/'.join([self.layout_home, self.layout])
self.logger.debug("Building command %s" % cmd)
return cmd
def get_pane_by_id(self, pane, pid):
if pane._pane_id == pid:
return True
return False
def select_pane(self, session):
current_window = session.attached_window
for p in current_window.list_panes():
self.logger.debug("[#PANE PROCESSING] Found %s" % (p._pane_id))
if self.get_pane_by_id(p, self.main_pane):
return p
return None
def tmux_loadconf(self):
session = self.server.get_by_id("$" + str(self.session))
if self.exist_session(session) and self.layout is not None:
#Get the main pane where the command can be send
#pane = session.attached_pane
pane = self.select_pane(session)
cmd = self.build_command()
self.logger.debug("Sending command to main pane %s" % str(pane))
pane.send_keys(cmd, enter=True)
# MAIN FOR DEBUG PURPOSES ...
if __name__ == '__main__':
params = {"layout": "monitor", "session": 2, 'layout_home': "/home/fmount/tmux-menu-files", "main_pane": 75}
t = Tmuxer(params)
t.build_command()
t.tmux_loadconf()
|
ETShax/Coffeebot
|
pybot/botcommands.py
|
Python
|
gpl-3.0
| 1,196
| 0.01113
|
# -*- coding: UTF-8 -*-
"""
muutama esimerkki komennoista
"""
import random, time, os
from subprocess import call
# tähän sanastoon lisätään komennot ja niitä vastaavat oliot
command_dict = {}
class Test:
def main(self, irc, line):
irc.send('PRIVMSG %s :Hell World!' % line[2])
command_dict[':!test'] = Test()
class Join:
def main(self, irc, line):
if line[0]
|
in irc.users:
irc.send('JOIN %s' % (line[4]))
command_dict[':!join'] = Join()
class Quit:
def main(self, irc, line):
# määritellään komento vain pääkÃ
|
¤yttäjille
if line[0] in irc.users:
irc.send('QUIT')
irc.socket.close()
irc.done = 1
command_dict[':!quit'] = Quit()
class Kahvia:
# tarttee olla yhteys karahkaan
def main(self, irc, line):
os.system('ssh user@raspi "python ~/raspit/harkka/kahvitulos.py "' )
os.system('scp user@raspi:~/raspit/harkka/kahvitulos.txt .')
tulos = open('kahvitulos.txt', 'r')
for laini in tulos:
irc.send('PRIVMSG %s :%s' % (line[2], laini))
command_dict[':!kahvi'] = Kahvia()
|
phenoxim/nova
|
nova/virt/powervm/tasks/storage.py
|
Python
|
apache-2.0
| 14,010
| 0.000071
|
# Copyright 2015, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pypowervm import exceptions as pvm_exc
from pypowervm.tasks import scsi_mapper as pvm_smap
from taskflow import task
from taskflow.types import failure as task_fail
from nova import exception
from nova.virt.powervm import media
from nova.virt.powervm import mgmt
LOG = logging.getLogger(__name__)
class CreateDiskForImg(task.Task):
"""The Task to create the disk from an image in the storage."""
def __init__(self, disk_dvr, context, instance, image_meta):
"""Create the Task.
Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr
create_disk_from_image method.
:param disk_dvr: The storage driver.
:param context: The context passed into the driver method.
:param instance: The nova instance.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
"""
super(CreateDiskForImg, self).__init__(
name='create_disk_from_img', provides='disk_dev_info')
self.disk_dvr = disk_dvr
self.instance = instance
self.context = context
self.image_meta = image_meta
def execute(self):
return self.disk_dvr.create_disk_from_image(
self.context, self.instance, self.image_meta)
def revert(self, result, flow_failures):
# If there is no result, or its a direct failure, then there isn't
# anything to delete.
if result is None or isinstance(result, task_fail.Failure):
return
# Run the delete. The result is a single disk. Wrap into list
# as the method works with plural disks.
try:
self.disk_dvr.delete_disks([result])
except pvm_exc.Error:
# Don't allow revert exceptions to interrupt the revert flow.
LOG.exception("Disk deletion failed during revert. Ignoring.",
instance=self.instance)
class AttachDisk(task.Task):
"""The task to attach the disk to the instance."""
def __init__(self, disk_dvr, instance, stg_ftsk):
"""Create the Task for the attach disk to instance method.
Requires disk info through requirement of disk_dev_info (provided by
crt_disk_from_img)
:param disk_dvr: The disk driver.
:param instance: The nova instance.
:param stg_ftsk: FeedTask to defer storage connectivity operations.
"""
super(AttachDisk, self).__init__(
name='attach_disk', requires=['disk_dev_info'])
self.disk_dvr = disk_dvr
self.instance = instance
self.stg_ftsk = stg_ftsk
def execute(self, disk_dev_info):
self.disk_dvr.attach_disk(self.instance, disk_dev_info, self.stg_ftsk)
def revert(self, disk_dev_info, result, flow_failures):
try:
self.disk_dvr.detach_disk(self.instance)
except pvm_exc.Error:
# Don't allow revert exceptions to interrupt the revert flow.
LOG.exception("Disk detach failed during revert. Ignoring.",
instance=self.instance)
class DetachDisk(task.Task):
"""The task to detach the disk storage from the instance."""
def __init__(self, disk_dvr, instance):
"""Creates the Task to detach the storage adapters.
Provides the stor_adpt_mappings. A list of pypowervm
VSCSIMappings or VFCMappings (depending on the storage adapter).
:param disk_dvr: The DiskAdapter for the VM.
:param instance: The nova instance.
"""
super(DetachDisk, self).__init__(
name='detach_disk', provides='stor_adpt_mappings')
self.instance = instance
self.disk_dvr = disk_dvr
def execute(self):
return self.disk_dvr.detach_disk(self.instance)
class DeleteDisk(task.Task):
"""The task to delete the backing storage."""
def __init__(self, disk_dvr):
"""Creates the Task to delete the disk storage from the system.
Requires the stor_adpt_mappings.
:param disk_dvr: The DiskAdapter for the VM.
"""
super(DeleteDisk, self).__init__(
name='delete_disk', requires=['stor_adpt_mappings'])
self.disk_dvr = disk_dvr
def execute(self, stor_adpt_mappings):
self.disk_dvr.delete_disks(stor_adpt_mappings)
class CreateAndConnectCfgDrive(task.Task):
"""The task to create the configuration drive."""
def __init__(self, adapter, instance, injected_files,
network_info, stg_ftsk, admin_pass=None):
"""Create the Task that creates and connects the config drive.
Requires the 'mgmt_cna'
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance
:param injected_files: A list of file paths that will be injected into
the ISO.
:param network_info: The network_info from the nova spawn method.
:param stg_ftsk: FeedTask to defer storage connectivity operations.
:param admin_pass (Optional, Default None): Password to inject for the
VM.
"""
su
|
per(CreateAndConnectCfgDrive, self).__init__(
name='cfg_drive', requires=['mgmt_cna'])
self.adapter = adapter
self.instance = instance
|
self.injected_files = injected_files
self.network_info = network_info
self.stg_ftsk = stg_ftsk
self.ad_pass = admin_pass
self.mb = None
def execute(self, mgmt_cna):
self.mb = media.ConfigDrivePowerVM(self.adapter)
self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
self.network_info, self.stg_ftsk,
admin_pass=self.ad_pass, mgmt_cna=mgmt_cna)
def revert(self, mgmt_cna, result, flow_failures):
# No media builder, nothing to do
if self.mb is None:
return
# Delete the virtual optical media. We don't care if it fails
try:
self.mb.dlt_vopt(self.instance, self.stg_ftsk)
except pvm_exc.Error:
LOG.exception('VOpt removal (as part of reversion) failed.',
instance=self.instance)
class DeleteVOpt(task.Task):
"""The task to delete the virtual optical."""
def __init__(self, adapter, instance, stg_ftsk=None):
"""Creates the Task to delete the instance's virtual optical media.
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance.
:param stg_ftsk: FeedTask to defer storage connectivity operations.
"""
super(DeleteVOpt, self).__init__(name='vopt_delete')
self.adapter = adapter
self.instance = instance
self.stg_ftsk = stg_ftsk
def execute(self):
media_builder = media.ConfigDrivePowerVM(self.adapter)
media_builder.dlt_vopt(self.instance, stg_ftsk=self.stg_ftsk)
class InstanceDiskToMgmt(task.Task):
"""The task to connect an instance's disk to the management partition."
This task will connect the instance's disk to the management partition and
discover it. We do these two pieces together because their reversion
happens in the same order.
"""
def __init__(self, disk_dvr, instance):
"""Create the Task for connecting boot disk to mgmt partition.
Provides:
stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
connected.
|
ralphbean/ansible
|
v2/ansible/plugins/connections/__init__.py
|
Python
|
gpl-3.0
| 1,862
| 0.001611
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError
# FIXME: this object should be created upfront and passed through
# the entire chain of calls to here, as there are other things
# which may want to output display/logs too
from ansible.utils.display import Display
__all__ = ['ConnectionBase']
class ConnectionBase:
'''
A base class for connections to contain common code.
'''
has_pipelining = False
become_methods = C.BECOME_METHODS
|
def __init__(self, connection_info, *args, **kwargs):
self._connection_info = connection_info
self._display = Display(verbosity=connection_info.verbosity)
def _become_method_supported(self, become_method):
''' Checks if the current class supports this
|
privilege escalation method '''
if become_method in self.__class__.become_methods:
return True
raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method)
|
Tima-Is-My-Association/TIMA
|
oai_pmh/models.py
|
Python
|
lgpl-3.0
| 3,783
| 0.005551
|
from django.db import models
class TextFieldSingleLine(models.TextField):
pass
class MetadataFormat(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
prefix = TextFieldSingleLine(unique=True)
schema = models.URLField(max_length=2048)
namespace = models.URLField(max_length=2048)
def __str__(self):
return self.prefix
class Meta:
ordering = ('prefix',)
class Set(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
spec = TextFieldSingleLine(unique=True)
name = TextFieldSingleLine()
description = models.TextField(blank=True, null=True)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Header(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
identifier = TextFieldSingleLine(unique=True)
timestamp = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
metadata_formats = models.ManyToManyField(MetadataFormat, related_name='identifiers', blank=True)
sets = models.ManyToManyField(Set, related_name='headers', blank=True)
def __str__(self):
return self.identifier
class Meta:
ordering = ('identifier',)
class ResumptionToken(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expiration_date = models.DateTimeField()
complete_list_size = models.IntegerField(default=0)
cursor = models.IntegerField(defau
|
lt=0)
token = TextFieldSingleLine(unique=True)
from_timestamp = models.DateTimeField(blank=True, null=True)
until_timestamp = models.DateTimeField(blank=True, null=True)
metadata_prefix = models.ForeignKey(MetadataFormat, blank=True, null=Tru
|
e)
set_spec = models.ForeignKey(Set, blank=True, null=True)
def __str__(self):
return self.token
class Meta:
ordering = ('expiration_date',)
class DCRecord(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
header = models.OneToOneField(Header, primary_key=True)
dc_title = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:title')
dc_creator = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:creator')
dc_subject = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:subject')
dc_description = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:description')
dc_publisher = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:publisher')
dc_contributor = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:contributor')
dc_date = models.DateTimeField(auto_now=True, verbose_name=' dc:date')
dc_type = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:type')
dc_format = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:format')
dc_identifier = TextFieldSingleLine(verbose_name=' dc:identifier')
dc_source = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:source')
dc_language = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:language')
dc_relation = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:relation')
dc_coverage = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:coverage')
dc_rights = TextFieldSingleLine(blank=True, null=True, verbose_name=' dc:rights')
def __str__(self):
return str(self.header)
class Meta:
ordering = ('header',)
verbose_name = 'Dublin Core record'
|
lino-framework/xl
|
lino_xl/lib/tim2lino/utils.py
|
Python
|
bsd-2-clause
| 12,793
| 0.001564
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Luc Saffre
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
Import legacy data from TIM (basic version).
"""
import traceback
import os
from clint.textui import puts, progress
from django.conf import settings
from django.db import models
from lino.utils import AttrDict
from lino.api import dd, rt
from lino.utils import dbfreader
from lino_xl.lib.ledger.utils import DC
class TimLoader(object):
LEN_IDGEN = 6
ROOT = None
archived_tables = set()
archive_name = None
codepage = 'cp850'
# codepage = 'cp437'
# etat_registered = "C"¹
etat_registered = u"¹"
def __init__(self, dbpath, languages=None, **kwargs):
self.dbpath = dbpath
self.VENDICT = dict()
self.FINDICT = dict()
self.GROUPS = dict()
self.languages = dd.resolve_languages(
languages or dd.plugins.tim2lino.languages)
self.must_register = []
self.must_match = {}
self.duplicate_zip_codes = dict()
for k, v in kwargs.items():
assert hasattr(self, k)
setattr(self, k, v)
def finalize(self):
if len(self.duplicate_zip_codes):
for country, codes in self.duplicate_zip_codes.items():
dd.logger.warning(
"%d duplicate zip codes in %s : %s",
len(codes), country, ', '.join(codes))
if self.ROOT is None:
return
ses = rt.login(self.ROOT.username)
Journal = rt.models.ledger.Journal
dd.logger.info("Register %d vouchers", len(self.must_register))
failures = 0
for doc in progress.bar(self.must_register):
# puts("Registering {0}".format(doc))
try:
doc.register(ses)
except Exception as e:
dd.logger.warning("Failed to register %s : %s ", doc, e)
failures += 1
if failures > 100:
dd.logger.warning("Abandoned after 100 failures.")
break
# Given a string `ms` of type 'VKR940095', locate the corresponding
# movement.
dd.logger.info("Resolving %d matches", len(self.must_match))
for ms, lst in self.must_match.items():
for (voucher, matching) in lst:
if matching.pk is None:
dd.logger.warning("Ignored match %s in %s (pk is None)" % (
ms, matching))
continue
idjnl, iddoc = ms[:3], ms[3:]
try:
year, num = year_num(iddoc)
except ValueError as e:
dd.logger.warning("Ignored match %s in %s (%s)" % (
ms, matching, e))
try:
jnl = Journal.objects.get(ref=idjnl)
except Journal.DoesNotExist:
dd.logger.warning("Ignored match %s in %s (invalid JNL)" % (
ms, matching))
continue
qs = Movement.objects.filter(
voucher__journal=jnl, voucher__number=num,
voucher__year=year, partner__isnull=False)
if qs.count() == 0:
dd.logger.warning("Ignored match %s in %s (no movement)" % (
ms, matching))
continue
matching.match = qs[0]
matching.save()
voucher.deregister(ses)
voucher.register(ses)
def par_class(self, row):
# wer eine nationalregisternummer hat ist eine Person, selbst wenn er
# auch eine MwSt-Nummer hat.
if True: # must convert them manually
return rt.models.contacts.Company
prt = row.idprt
if prt == 'O':
return rt.models.contacts.Company
elif prt == 'L':
return rt.models.lists.List
elif prt == 'P':
return rt.models.contacts.Person
elif prt == 'F':
return rt.models.households.Household
# dd.logger.warning("Unhandled PAR->IdPrt %r",prt)
def dc2lino(self, dc):
if dc == "D":
return DC.debit
elif dc == "C":
return DC.credit
elif dc == "A":
return DC.debit
elif dc == "E":
return DC.credit
raise Exception("Invalid D/C value %r" % dc)
def create_users(self):
pass
def dbfmemo(self, s):
if s is None:
return ''
s = s.replace('\r\n', '\n')
s = s.replace(u'\xec\n', '')
# s = s.replace(u'\r\nì',' ')
# if u'ì' in s:
# raise Exception("20121121 %r contains \\xec" % s)
# it might be at the end of the string:
s = s.replace(u'ì','')
return s.strip()
def after_gen_load(self):
return
Account = rt.models.ledger.Account
sc = dict()
for k, v in dd.plugins.tim2lino.siteconfig_accounts.items():
sc[k] = Account.get_by_ref(v)
settings.SITE.site_config.update(**sc)
# func = dd.plugins.tim2lino.setup_tim2lino
# if func:
# func(self)
def decode_string(self, v):
return v
# return v.decode(self.codepage)
def babel2kw(self, tim_fld, lino_fld, row, kw):
if dd.plugins.tim2lino.use_dbf_py:
import dbf
ex = dbf.FieldMissingError
else:
ex = Exception
for i, lng in enumerate(self.languages):
try:
v = getattr(row, tim_fld + str(i + 1), '').strip()
if v:
v = self.decode_string(v)
kw[lino_fld + lng.suffix] = v
if lino_fld not in kw:
kw[lino_fld] = v
except ex as e:
pass
dd.logger.info("Ignoring %s", e)
def load_jnl_alias(self, row, **kw):
vcl = None
if row.alias == 'VEN':
vat = rt.models.vat
ledger = rt.models.ledger
sales = rt.models.sales
if row.idctr == 'V':
kw.update(trade_type=vat.TradeTypes.sales)
kw.update(journal_group=ledger.JournalGroups.sales)
vcl = sales.VatProductInvoice
elif row.idctr == 'E':
kw.update(trade_type=vat.TradeTypes.purchases)
vcl = vat.VatAccountInvoice
kw.update(journal_group=ledger.JournalGroups.purchases)
else:
raise Exception("Invalid JNL->IdCtr '{0}'".format(row.idctr))
elif row.alias == 'FIN':
vat = rt.models.vat
finan = rt.models.finan
ledger = rt.models.ledger
idgen = row.idgen.strip()
kw.update(journal_group=ledger.JournalGroups.financial)
if idgen:
kw.update(account=ledger.Account.get_by_ref(idgen))
if idgen.startswith('58'):
|
kw.update(trade_type=vat.TradeTypes.purchases)
vcl = finan.PaymentOrder
elif idgen.startswith('5'):
vcl = finan.BankStatement
else:
vcl = finan.JournalEntry
# if vcl is None:
# raise Exception("Journal type not recognized: %s" % row.idjnl)
return vcl, kw
def load_jnl(self, row, **kw):
vcl = None
kw.update(ref=row.idjnl.strip(), n
|
ame=row.libell)
kw.update(dc=self.dc2lino(row.dc))
# kw.update(seqno=self.seq2lino(row.seq.strip()))
kw.update(seqno=int(row.seq.strip()))
# kw.update(seqno=row.recno())
kw.update(auto_check_clearings=False)
vcl, kw = self.load_jnl_alias(row, **kw)
if vcl is not None:
return vcl.create_journal(**kw)
def load_dbf(self, tableName, row2obj=None):
if row2obj is None:
row2obj = getattr(self, 'load_' + tableName[-3:].lower())
fn = self.dbpath
if self.archive_name is no
|
mc706/prog-strat-game
|
core/features/environment.py
|
Python
|
mit
| 1,165
| 0.003433
|
"""
This file is a pre-setup environments file to make splinter play nice with behave
It is setup to take screenshots whenever of the browser if your steps fail
To change which
|
browser you are using, change the string in Browser(). Note you may need to install Chromedriver for chrome
It is lovingly shared and free to use and modify by Ryan McDevitt (mc706.com)
"""
import datetime
import re
import os
from splinter.browser import Browser
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TODAY = datetime.date.today()
SCREENSHOT_DIRECTORY = BASE_DIR + "/screenshots/"
def before_all(context):
context.browser = Brows
|
er('phantomjs')
context.browser.driver.maximize_window()
def after_all(context):
context.browser.quit()
context.browser = None
def after_step(context, step):
if step.status == "failed":
day = "{0.year}_{0.month}_{0.day}".format(TODAY)
name = '[{0}]-{1}'.format(day, step.name)
name = re.sub(' ', '_', name)
name = re.sub('[\\\/]', '-', name)
name = re.sub('[\"\']', '', name)
name += '.png'
context.browser.driver.save_screenshot(SCREENSHOT_DIRECTORY + name)
|
sjohannes/exaile
|
xl/metadata/mod.py
|
Python
|
gpl-2.0
| 2,441
| 0.000819
|
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the
|
code, but you are not obligated to
# do so. If y
|
ou do not wish to do so, delete this exception statement
# from your version.
from xl.metadata._base import BaseFormat
import os
try:
import ctypes
modplug = ctypes.cdll.LoadLibrary("libmodplug.so.0")
modplug.ModPlug_Load.restype = ctypes.c_void_p
modplug.ModPlug_Load.argtypes = (ctypes.c_void_p, ctypes.c_int)
modplug.ModPlug_GetName.restype = ctypes.c_char_p
modplug.ModPlug_GetName.argtypes = (ctypes.c_void_p,)
modplug.ModPlug_GetLength.restype = ctypes.c_int
modplug.ModPlug_GetLength.argtypes = (ctypes.c_void_p,)
except (ImportError, OSError):
modplug = None
class ModFormat(BaseFormat):
writable = False
def load(self):
if modplug:
data = open(self.loc, "rb").read()
f = modplug.ModPlug_Load(data, len(data))
if f:
name = modplug.ModPlug_GetName(f) or os.path.split(self.loc)[-1]
length = modplug.ModPlug_GetLength(f) / 1000.0 or -1
self.mutagen = {'title': name, '__length': length}
else:
self.mutagen = {}
def get_length(self):
try:
return self.mutagen['__length']
except KeyError:
return -1
def get_bitrate(self):
return -1
# vim: et sts=4 sw=4
|
Edzvu/Edzvu.github.io
|
M2Crypto-0.35.2/doc/conf.py
|
Python
|
mit
| 9,150
| 0.007541
|
# -*- coding: utf-8 -*-
#
# M2Crypto documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 20 11:15:12 2017.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'M2Crypto'
copyright = u'2017, Matej Cepl <mcepl@cepl.eu>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'M2Cryptodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, targe
|
t name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'M2Crypto.tex', u'M2Crypto Documentation',
u'Matej Cepl \\textless{}mcepl@cepl.eu\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chap
|
ters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'm2crypto', u'M2Crypto Documentation',
[u'Matej Cepl <mcepl@cepl.eu>'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'M2Crypto', u'M2Crypto Documentation',
u'Matej Cepl <mcepl@cepl.eu>', 'M2Crypto', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'M2Crypto'
epub_author = u'Matej Cepl <mcepl@cepl.eu>'
epub_publisher = u'Matej Cepl <mcepl@cepl.eu>'
epub_copyright = u'2017, Matej Cepl <mcepl@cepl.eu>'
# The language of the text
|
AMOboxTV/AMOBox.LegoBuild
|
plugin.video.salts/scrapers/moviexk_scraper.py
|
Python
|
gpl-2.0
| 7,244
| 0.005384
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://www.moviexk.net'
class MoxieXK_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'MovieXK'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
sources = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
fragment = dom_parser.parse_dom(html, 'div', {'class': 'poster'})
if fragment:
movie_url = dom_parser.parse_dom(fragment[0], 'a', ret='href')
if movie_url:
url = urlparse.urljoin(self.base_url, movie_url[0])
html = self._http_get(url, cache_limit=.5)
episodes = self.__get_episodes(html)
url = self.__get_best_page(episodes)
if not url:
return sources
else:
url = urlparse.urljoin(self.base_url, url)
html = self._http_get(url, cache_limit=.5)
for match in re.finditer('''<source[^>]+src=['"]([^'"]+)([^>]+)''', html):
stream_url, extra = match.groups()
if 'video.php' in stream_url:
redir_url = self._http_get(stream_url, allow_redirect=False, method='HEAD', cache_limit=.25)
if redir_url.startswith('http'): stream_url = redir_url
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
match = re.search('''data-res\s*=\s*["']([^"']+)''', extra)
if match:
height = re.sub('(hd|px)', '', match.group(1))
quality = scraper_utils.height_get_quality(height)
else:
quality = QUALITIES.HIGH
str
|
eam_url += '|User-Agent=%s' % (scraper_utils.get_ua())
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'ratin
|
g': None, 'direct': True}
sources.append(source)
return sources
def __get_best_page(self, episodes):
if 'EPTRAILER' in episodes: del episodes['EPTRAILER']
if 'EPCAM' in episodes: del episodes['EPCAM']
qualities = ['EPHD1080P', 'EPHD720P', 'EPHD', 'EPFULL']
for q in qualities:
if q in episodes:
return episodes[q]
if len(episodes) > 0:
return episodes.items()[0][1]
def __get_episodes(self, html):
labels = dom_parser.parse_dom(html, 'a', {'data-type': 'watch'})
labels = [label.replace(' ', '').upper() for label in labels]
urls = dom_parser.parse_dom(html, 'a', {'data-type': 'watch'}, ret='href')
return dict(zip(labels, urls))
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year, season=''):
results = []
search_url = urlparse.urljoin(self.base_url, '/search/')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=1)
for fragment in dom_parser.parse_dom(html, 'div', {'class': 'inner'}):
name = dom_parser.parse_dom(fragment, 'div', {'class': 'name'})
if name:
match = re.search('href="([^"]+)[^>]+>(.*?)</a>', name[0])
if match:
match_url, match_title_year = match.groups()
if 'tv-series' in match_url and video_type == VIDEO_TYPES.MOVIE: continue
match_title_year = re.sub('</?[^>]*>', '', match_title_year)
match_title_year = re.sub('[Ww]atch\s+[Mm]ovie\s*', '', match_title_year)
match_title_year = match_title_year.replace('’', "'")
match = re.search('(.*?)\s+\((\d{4})[^)]*\)$', match_title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = match_title_year
match_year = ''
if not match_year:
year_span = dom_parser.parse_dom(fragment, 'span', {'class': 'year'})
if year_span:
year_text = dom_parser.parse_dom(year_span[0], 'a')
if year_text:
match_year = year_text[0].strip()
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'url': scraper_utils.pathify_url(match_url), 'year': match_year}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
url = urlparse.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=24)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'poster'})
if fragment:
show_url = dom_parser.parse_dom(fragment[0], 'a', ret='href')
if show_url:
episode_pattern = '<a[^>]+href="([^"]+)[^>]+>[Ee][Pp]\s*[Ss]0*%s-E?p?0*%s\s*<' % (video.season, video.episode)
return self._default_get_episode_url(show_url[0], video, episode_pattern)
|
google/differentiable-atomistic-potentials
|
dap/tests/test_tf_neighborlist.py
|
Python
|
apache-2.0
| 10,324
| 0.005231
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the tensorflow neighborlist module.
pydoc:dap.tf.neighborlist
"""
import numpy as np
import tensorflow as tf
from ase.build import bulk
from ase.neighborlist import NeighborList
# import sys
# sys.path.insert(0, '.')
# from .ase_nl import NeighborList
from dap.tf.neighborlist import (get_distances, get_neighbors_oneway)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class TestNeighborlist(tf.test.TestCase):
"""Tests comparing the TF version and ASE neighborlist implementations.
"""
def test_basic(self):
"""Basic neighborlist test in TF"""
a = 3.6
Rc = a / np.sqrt(2) / 2
atoms = bulk('Cu', 'fcc', a=a)
nl = NeighborList(
[Rc] * len(atoms), skin=0.01, self_interaction=False, bothways=True)
nl.update(atoms)
distances = get_distances({
'cutoff_radius': 2 * Rc
}, atoms.positions, atoms.cell, np.ones((len(atoms), 1)))
mask = (distances <= 2 * Rc) & (distances > 0)
tf_nneighbors = tf.reduce_sum(tf.cast(mask, tf.int32), axis=[1, 2])
with self.test_session():
for i, atom in enumerate(atoms):
inds, disps = nl.get_neighbors(i)
ase_nneighbors = len(inds)
self.assertEqual(ase_nneighbors, tf_nneighbors.eval()[i])
def test_structure_repeats(self):
'Check several structures and repeats for consistency with ase.'
for repeat in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 3), (4, 1,
1)):
for structure in ('fcc', 'bcc', 'sc', 'hcp', 'diamond'):
a = 3.6
# Float tolerances are tricky. The 0.01 in the next line is important.
# This test fails without it due to subtle
|
differences in computed
# positions.
Rc = 2 * a + 0.01
atoms =
|
bulk('Cu', structure, a=a).repeat(repeat)
nl = NeighborList(
[Rc] * len(atoms), skin=0.0, self_interaction=False, bothways=True)
nl.update(atoms)
distances = get_distances({
'cutoff_radius': 2 * Rc
}, atoms.positions, atoms.cell, np.ones((len(atoms), 1)))
mask = (distances <= 2 * Rc) & (distances > 0)
tf_nneighbors = tf.reduce_sum(tf.cast(mask, tf.int32), axis=[1, 2])
with self.test_session():
for i, atom in enumerate(atoms):
inds, disps = nl.get_neighbors(i)
ase_nneighbors = len(inds)
self.assertEqual(ase_nneighbors, tf_nneighbors.eval()[i])
# These are the indices of each neighbor in the atom list.
tf_inds = tf.where(mask[i])[:, 0].eval()
self.assertCountEqual(inds, tf_inds)
def test_atom_types(self):
"""Tests if the neighbor indices agree with ase.
This is important to find the
chemical element associated with a specific neighbor.
"""
a = 3.6
Rc = a / np.sqrt(2) / 2 + 0.01
atoms = bulk('Cu', 'fcc', a=a).repeat((3, 1, 1))
atoms[1].symbol = 'Au'
nl = NeighborList(
[Rc] * len(atoms), skin=0.01, self_interaction=False, bothways=True)
nl.update(atoms)
nns = [nl.get_neighbors(i) for i in range(len(atoms))]
ase_nau = [np.sum(atoms.numbers[inds] == 79) for inds, offs in nns]
au_mask = tf.convert_to_tensor(atoms.numbers == 79, tf.int32)
distances = get_distances({
'cutoff_radius': 2 * Rc
}, atoms.positions, atoms.cell)
mask = (distances <= (2 * Rc)) & (distances > 0)
nau = tf.reduce_sum(tf.cast(mask, tf.int32) * au_mask[:, None], [1, 2])
with self.test_session():
self.assertTrue(np.all(ase_nau == nau.eval()))
class TestOneWayNeighborlist(tf.test.TestCase):
"""These tests are a pain.
The actual neighbors are pretty sensitive to the unit cell, and it is hard to
get exact agreement on the number of neighbors.
"""
def test0(self):
import warnings
warnings.filterwarnings('ignore')
a = 3.6
Rc = 5
atoms = bulk('Cu', 'bcc', a=a).repeat((1, 1, 1))
atoms.rattle(0.02)
nl = NeighborList(
[Rc] * len(atoms), skin=0.0, self_interaction=False, bothways=False)
nl.update(atoms)
inds, dists, N = get_neighbors_oneway(
atoms.positions, atoms.cell, 2 * Rc, skin=0.0)
with self.test_session() as sess:
inds, dists, N = sess.run([inds, dists, N])
for i in range(len(atoms)):
ase_inds, ase_offs = nl.get_neighbors(i)
these_inds = np.array([x[1] for x in inds if x[0] == i])
these_offs = N[np.where(inds[:, 0] == i)]
self.assertAllClose(ase_inds, these_inds)
self.assertAllClose(ase_offs, these_offs)
def test_molecules(self):
"""Tests oneway list on a bunch of molecules.
These are in large unit cells, so practically they don't have periodic
boundary conditions.
"""
from ase.build import molecule
from ase.collections import g2
Rc = 2.0
pos = tf.placeholder(tf.float64, [None, 3])
cell = tf.placeholder(tf.float64, [3, 3])
inds, dists, N = get_neighbors_oneway(pos, cell, 2 * Rc, skin=0.0)
with self.test_session() as sess:
for mlc in g2.names:
atoms = molecule(mlc)
atoms.set_cell((50.0, 50.0, 50.0))
atoms.center()
if len(atoms) < 2:
continue
nl = NeighborList(
[Rc] * len(atoms), skin=0.0, bothways=False, self_interaction=0)
nl.update(atoms)
_inds, _N = sess.run(
[inds, N], feed_dict={
pos: atoms.positions,
cell: atoms.cell
})
for i in range(len(atoms)):
ase_inds, ase_offs = nl.get_neighbors(i)
these_inds = np.array([x[1] for x in _inds if x[0] == i])
these_offs = _N[np.where(_inds[:, 0] == i)]
# Check indices are the same
self.assertAllClose(ase_inds, these_inds)
# Check offsets are the same
if ase_offs.shape[0] > 0:
self.assertAllClose(ase_offs, these_offs)
def test_structure_repeats(self):
'Check several structures and repeats for consistency with ase.'
import warnings
warnings.filterwarnings('ignore')
import numpy as np
np.set_printoptions(precision=3, suppress=True)
a = 3.6
Rc = 2 * a + 0.01
pos = tf.placeholder(tf.float64, [None, 3])
cell = tf.placeholder(tf.float64, [3, 3])
inds, dists, N = get_neighbors_oneway(pos, cell, 2 * Rc, skin=0.0)
for repeat in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 3)):
for structure in ('fcc', 'bcc', 'sc', 'hcp', 'diamond'):
print('\n', structure, repeat, '\n')
print('============================\n')
atoms = bulk('Cu', structure, a=a).repeat(repeat)
atoms.rattle(0.02)
nl = NeighborList(
[Rc] * len(atoms), skin=0.0, self_interaction=False, bothways=False)
nl.update(atoms)
with tf.Session() as sess:
_inds, _dists, _N = sess.run(
[inds, dists, N],
feed_dict={
pos: atoms.positions,
cell: atoms.cell
})
for i in range(len(atoms)):
ase_inds, ase_offs = nl.get_neighbors(i)
these_inds = np.array([x[1] for x in _inds if x[0] == i])
these_offs = np.array(
[offset for x, offset in zip(_inds, _N) if x[0] == i])
# Check indices are the same
#print('Indices are equal: ', np.all(ase_inds == these_inds))
#print(ase_inds)
#print(these_inds)
self.assertAllClose(ase_inds, these_inds)
# Chec
|
rohitsinha54/Learning-Python
|
algorithms/symbal.py
|
Python
|
mit
| 889
| 0.00225
|
#!/usr/bin/env python
"""symbal.py: Check for braces to be balanced"""
from stack import Stack
__author__ = 'Rohit Sinha'
def match_braces(opener, closer):
return "({[".index(opener) == ")}]".inde
|
x(closer)
def brace_checker(brace_string):
stack = Stack()
isbal = True
index = 0
while index < len(brace_string) and isbal:
if brace_string[index] in "({[":
stack.push(brace_string[index])
else:
if stack.isEmpty():
isbal = False
else:
opener = stack.pop()
if not match_braces(opener, brace_string[index]):
isbal = False
index += 1
if stack.isEmpty() and isbal:
|
return True
else:
return False
# check the above code
if __name__ == '__main__':
print(brace_checker("([({})])"))
print(brace_checker("[({)]"))
|
cecep-edu/edx-platform
|
lms/djangoapps/badges/models.py
|
Python
|
agpl-3.0
| 12,190
| 0.003035
|
"""
Database models for the badges app
"""
from importlib import import_module
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from lazy import lazy
from model_utils.models import TimeStampedModel
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from badges.utils import deserialize_count_specs
from config_models.models import ConfigurationModel
from xmodule.modulestore.django import modulestore
from xmodule_django.models import CourseKeyField
def validate_badge_image(image):
"""
Validates that a particular image is small enough to be a badge and square.
"""
if image.width != image.height:
raise ValidationError(_(u"The badge image must be square."))
if not image.size < (250 * 1024):
raise ValidationError(_(u"The badge image file size must be less than 250KB."))
def validate_lowercase(string):
"""
Validates that a string is lowercase.
"""
if not string.islower():
raise ValidationError(_(u"This value must be all lowercase."))
class CourseBadgesDisabledError(Exception):
"""
|
Exception raised whe
|
n Course Badges aren't enabled, but an attempt to fetch one is made anyway.
"""
class BadgeClass(models.Model):
"""
Specifies a badge class to be registered with a backend.
"""
slug = models.SlugField(max_length=255, validators=[validate_lowercase])
issuing_component = models.SlugField(max_length=50, default='', blank=True, validators=[validate_lowercase])
display_name = models.CharField(max_length=255)
course_id = CourseKeyField(max_length=255, blank=True, default=None)
description = models.TextField()
criteria = models.TextField()
# Mode a badge was awarded for. Included for legacy/migration purposes.
mode = models.CharField(max_length=100, default='', blank=True)
image = models.ImageField(upload_to='badge_classes', validators=[validate_badge_image])
def __unicode__(self):
return u"<Badge '{slug}' for '{issuing_component}'>".format(
slug=self.slug, issuing_component=self.issuing_component
)
@classmethod
def get_badge_class(
cls, slug, issuing_component, display_name=None, description=None, criteria=None, image_file_handle=None,
mode='', course_id=None, create=True
):
"""
Looks up a badge class by its slug, issuing component, and course_id and returns it should it exist.
If it does not exist, and create is True, creates it according to the arguments. Otherwise, returns None.
The expectation is that an XBlock or platform developer should not need to concern themselves with whether
or not a badge class has already been created, but should just feed all requirements to this function
and it will 'do the right thing'. It should be the exception, rather than the common case, that a badge class
would need to be looked up without also being created were it missing.
"""
slug = slug.lower()
issuing_component = issuing_component.lower()
if course_id and not modulestore().get_course(course_id).issue_badges:
raise CourseBadgesDisabledError("This course does not have badges enabled.")
if not course_id:
course_id = CourseKeyField.Empty
try:
return cls.objects.get(slug=slug, issuing_component=issuing_component, course_id=course_id)
except cls.DoesNotExist:
if not create:
return None
badge_class = cls(
slug=slug,
issuing_component=issuing_component,
display_name=display_name,
course_id=course_id,
mode=mode,
description=description,
criteria=criteria,
)
badge_class.image.save(image_file_handle.name, image_file_handle)
badge_class.full_clean()
badge_class.save()
return badge_class
@lazy
def backend(self):
"""
Loads the badging backend.
"""
module, klass = settings.BADGING_BACKEND.rsplit('.', 1)
module = import_module(module)
return getattr(module, klass)()
def get_for_user(self, user):
"""
Get the assertion for this badge class for this user, if it has been awarded.
"""
return self.badgeassertion_set.filter(user=user)
def award(self, user, evidence_url=None):
"""
Contacts the backend to have a badge assertion created for this badge class for this user.
"""
return self.backend.award(self, user, evidence_url=evidence_url)
def save(self, **kwargs):
"""
Slugs must always be lowercase.
"""
self.slug = self.slug and self.slug.lower()
self.issuing_component = self.issuing_component and self.issuing_component.lower()
super(BadgeClass, self).save(**kwargs)
class Meta(object):
app_label = "badges"
unique_together = (('slug', 'issuing_component', 'course_id'),)
verbose_name_plural = "Badge Classes"
class BadgeAssertion(TimeStampedModel):
"""
Tracks badges on our side of the badge baking transaction
"""
user = models.ForeignKey(User)
badge_class = models.ForeignKey(BadgeClass)
data = JSONField()
backend = models.CharField(max_length=50)
image_url = models.URLField()
assertion_url = models.URLField()
def __unicode__(self):
return u"<{username} Badge Assertion for {slug} for {issuing_component}".format(
username=self.user.username, slug=self.badge_class.slug,
issuing_component=self.badge_class.issuing_component,
)
@classmethod
def assertions_for_user(cls, user, course_id=None):
"""
Get all assertions for a user, optionally constrained to a course.
"""
if course_id:
return cls.objects.filter(user=user, badge_class__course_id=course_id)
return cls.objects.filter(user=user)
class Meta(object):
app_label = "badges"
# Abstract model doesn't index this, so we have to.
BadgeAssertion._meta.get_field('created').db_index = True # pylint: disable=protected-access
class CourseCompleteImageConfiguration(models.Model):
"""
Contains the icon configuration for badges for a specific course mode.
"""
mode = models.CharField(
max_length=125,
help_text=_(u'The course mode for this badge image. For example, "verified" or "honor".'),
unique=True,
)
icon = models.ImageField(
# Actual max is 256KB, but need overhead for badge baking. This should be more than enough.
help_text=_(
u"Badge images must be square PNG files. The file size should be under 250KB."
),
upload_to='course_complete_badges',
validators=[validate_badge_image]
)
default = models.BooleanField(
help_text=_(
u"Set this value to True if you want this image to be the default image for any course modes "
u"that do not have a specified badge image. You can have only one default image."
),
default=False,
)
def __unicode__(self):
return u"<CourseCompleteImageConfiguration for '{mode}'{default}>".format(
mode=self.mode,
default=u" (default)" if self.default else u''
)
def clean(self):
"""
Make sure there's not more than one default.
"""
# pylint: disable=no-member
if self.default and CourseCompleteImageConfiguration.objects.filter(default=True).exclude(id=self.id):
raise ValidationError(_(u"There can be only one default image."))
@classmethod
def image_for_mode(cls, mode):
"""
Get the image for a particular mode.
"""
try:
return cls.objects.get(mode=mode).icon
except cls.DoesNotExist:
# Fall back to default, if th
|
damorim/compilers-cin
|
antigo/ap3/CymbolCheckerVisitor.py
|
Python
|
mit
| 1,390
| 0.039568
|
from antlr4 import *
from autogen.CymbolParser import CymbolParser
from autogen.CymbolVisitor import CymbolVisitor
class Type:
VOID = "void"
INT = "int"
class CymbolCheckerVisitor(CymbolVisitor):
id_values = {}
def visitIntExpr(self, ctx:CymbolParser.IntExprContext):
print("visting "+Type.INT)
return Typ
|
e.INT
def visitVarDecl(self, ctx:CymbolParser.VarDeclContext):
var_name = ctx.ID().getText()
tyype = ctx.tyype().
|
getText()
print("tyype = " + tyype)
if (tyype == Type.VOID):
result = Type.VOID
print("Mensagem de erro 1...")
exit(1)
else:
if ctx.expr() != None:
init = ctx.expr().accept(self)
print("init = " + init)
if init != tyype:
print("Mensagem de erro 2...")
exit(2)
result = tyype
self.id_values[var_name] = tyype
print("saved variable " + var_name + " of type " + tyype)
return result
def visitAddSubExpr(self, ctx:CymbolParser.AddSubExprContext):
left = ctx.expr()[0].accept(self)
right = ctx.expr()[1].accept(self)
if left == Type.INT and right == Type.INT:
result = Type.INT
else:
reult = Type.VOID
print("Mensagem de erro 3...")
exit()
print("addition or subtraction of " + left + " " + right + " that results in a " + result)
return result
def aggregateResult(self, aggregate:Type, next_result:Type):
return next_result if next_result != None else aggregate
|
hagleitn/Openstack-Devstack2
|
devstack/components/quantum.py
|
Python
|
apache-2.0
| 8,662
| 0.001039
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from devstack import cfg
from devstack import component as comp
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.components import db
LOG = logging.getLogger("devstack.components.quantum")
# Openvswitch special settings
VSWITCH_PLUGIN = 'openvswitch'
V_PROVIDER = "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
# Config files (some only modified if running as openvswitch)
PLUGIN_CONF = "plugins.ini"
QUANTUM_CONF = 'quantum.conf'
PLUGIN_LOC = ['etc']
AGENT_CONF = 'ovs_quantum_plugin.ini'
AGENT_LOC = ["etc", "quantum", "plugins", "openvswitch"]
AGENT_BIN_LOC = ["quantum", "plugins", "openvswitch", 'agent']
CONFIG_FILES = [PLUGIN_CONF, AGENT_CONF]
# This db will be dropped and created
DB_NAME = 'ovs_quantum'
# Opensvswitch bridge setup/teardown/name commands
OVS_BRIDGE_DEL = ['ovs-vsctl', '--no-wait', '--', '--if-exists', 'del-br', '%OVS_BRIDGE%']
OVS_BRIDGE_ADD = ['ovs-vsctl', '--no-wait', 'add-br', '%OVS_BRIDGE%']
OVS_BRIDGE_EXTERN_ID = ['ovs-vsctl', '--no-wait', 'br-set-external-id', '%OVS_BRIDGE%',
|
'bridge-id', '%OVS_EXTERNAL_ID%']
OVS_BRIDGE_CMDS = [OVS_BRIDGE_DEL, OVS_BRIDGE_ADD, OVS_BRIDGE_EXTERN_ID]
# Subdirs of the downloaded
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
# What to start (only if openvswitch enabled)
APP_Q_SERVER = 'quantum-server'
APP_Q_AGENT = 'ovs_quantum_a
|
gent.py'
APP_OPTIONS = {
APP_Q_SERVER: ["%QUANTUM_CONFIG_FILE%"],
APP_Q_AGENT: ["%OVS_CONFIG_FILE%", "-v"],
}
class QuantumUninstaller(comp.PkgUninstallComponent):
def __init__(self, *args, **kargs):
comp.PkgUninstallComponent.__init__(self, *args, **kargs)
class QuantumInstaller(comp.PkgInstallComponent):
def __init__(self, *args, **kargs):
comp.PkgInstallComponent.__init__(self, *args, **kargs)
self.q_vswitch_agent = False
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
self.q_vswitch_agent = True
self.q_vswitch_service = True
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "quantum_repo"),
'branch': ("git", "quantum_branch"),
})
return places
def known_options(self):
return set(['no-ovs-db-init', 'no-ovs-bridge-init'])
def _get_config_files(self):
return list(CONFIG_FILES)
def _get_target_config_name(self, config_fn):
if config_fn == PLUGIN_CONF:
tgt_loc = [self.app_dir] + PLUGIN_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
elif config_fn == AGENT_CONF:
tgt_loc = [self.app_dir] + AGENT_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
else:
return comp.PkgInstallComponent._get_target_config_name(self, config_fn)
def _config_adjust(self, contents, config_fn):
if config_fn == PLUGIN_CONF and self.q_vswitch_service:
# Need to fix the "Quantum plugin provider module"
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
provider = config.get("PLUGIN", "provider")
if provider != V_PROVIDER:
config.set("PLUGIN", "provider", V_PROVIDER)
with io.BytesIO() as outputstream:
config.write(outputstream)
outputstream.flush()
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
elif config_fn == AGENT_CONF and self.q_vswitch_agent:
# Need to adjust the sql connection
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
db_dsn = config.get("DATABASE", "sql_connection")
if db_dsn:
generated_dsn = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
if generated_dsn != db_dsn:
config.set("DATABASE", "sql_connection", generated_dsn)
with io.BytesIO() as outputstream:
config.write(outputstream)
outputstream.flush()
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
else:
return comp.PkgInstallComponent._config_adjust(self, contents, config_fn)
def _setup_bridge(self):
if not self.q_vswitch_agent or \
'no-ovs-bridge-init' in self.options:
return
bridge = self.cfg.getdefaulted("quantum", "ovs_bridge", 'br-int')
LOG.info("Fixing up ovs bridge named %s.", bridge)
external_id = self.cfg.getdefaulted("quantum", 'ovs_bridge_external_name', bridge)
params = dict()
params['OVS_BRIDGE'] = bridge
params['OVS_EXTERNAL_ID'] = external_id
cmds = list()
for cmd_templ in OVS_BRIDGE_CMDS:
cmds.append({
'cmd': cmd_templ,
'run_as_root': True,
})
utils.execute_template(*cmds, params=params)
def post_install(self):
comp.PkgInstallComponent.post_install(self)
self._setup_db()
self._setup_bridge()
def _setup_db(self):
if not self.q_vswitch_service or \
'no-ovs-db-init' in self.options:
return
LOG.info("Fixing up database named %s.", DB_NAME)
db.drop_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
db.create_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
def _get_source_config(self, config_fn):
if config_fn == PLUGIN_CONF:
srcloc = [self.app_dir] + PLUGIN_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
elif config_fn == AGENT_CONF:
srcloc = [self.app_dir] + AGENT_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
else:
return comp.PkgInstallComponent._get_source_config(self, config_fn)
class QuantumRuntime(comp.ProgramRuntime):
def __init__(self, *args, **kargs):
comp.ProgramRuntime.__init__(self, *args, **kargs)
self.q_vswitch_agent = False
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
# Default to on if not specified
self.q_vswitch_agent = True
self.q_vswitch_service = True
def _get_apps_to_start(self):
app_list = comp.ProgramRuntime._get_apps_to_start(self)
if self.q_vswitch_service:
app_list.append({
'name': APP_Q_SERVER,
'path': sh.joinpths(self.app_dir, BIN_DIR, APP_Q_SERVER),
})
if self.q_vswitch_agent:
full_pth = [self.app_dir] + AGENT_BIN_LOC + [APP_Q_AGENT]
app_list.append({
'name': APP_Q_AGENT,
'path': sh.joinpths(*full_pth)
})
return app_list
def _get_app_options(self, app_name):
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/sortedcontainers/tests/test_stress_sortedlistwithkey.py
|
Python
|
mit
| 8,149
| 0.005522
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from sys import hexversion
import copy
import bisect
import random
from .context import sortedcontainers
from sortedcontainers import SortedListWithKey
from nose.tools import raises
from functools import wraps
class SortedList(SortedListWithKey):
pass
if hexversion < 0x03000000:
from itertools import izip as zip
range = xrange
random.seed(0)
actions = []
def frange(start, stop, step):
while start < stop:
yield start
start += step
class actor:
def __init__(self, count):
self._count = count
def __call__(self, func):
actions.extend([func] * self._count)
return func
def not_empty(func):
@wraps(func)
def wrapper(slt):
if len(slt) < 100:
stress_update(slt)
func(slt)
return wrapper
@actor(1)
def stress_clear(slt):
if random.randrange(100) < 10:
slt.clear()
else:
values = list(slt)
slt.clear()
slt.update(values[:int(len(values) / 2)])
@actor(1)
def stress_add(slt):
if random.randrange(100) < 10:
slt.clear()
slt.add(random.random())
@actor(1)
def stress_update(slt):
slt.update((random.random() for rpt in range(350)))
@actor(1)
@not_empty
def stress_contains(slt):
if random.randrange(100) < 10:
slt.clear()
assert 0 not in slt
else:
val = slt[random.randrange(len(slt))]
assert val in slt
assert 1 not in slt
@actor(1)
@not_empty
def stress_discard(slt):
val = slt[random.randrange(len(slt))]
slt.discard(val)
@actor(1)
def stress_discard2(slt):
if random.randrange(100) < 10:
slt.clear()
slt.discard(random.random())
@actor(1)
def stress_remove(slt):
if len(slt) > 0:
val = slt[random.randrange(len(slt))]
slt.remove(val)
try:
slt.remove(1)
assert False
except ValueError:
pass
try:
slt.remove(-1)
assert False
except ValueError:
pass
@actor(1)
@not_empty
def stress_delitem(slt):
del slt[random.randrange(len(slt))]
@actor(1)
def stress_getitem(slt):
if len(slt) > 0:
pos = random.randrange(len(slt))
assert slt[pos] == list(slt)[pos]
try:
slt[-(slt._len + 5)]
assert False
except IndexError:
pass
try:
slt[slt._len + 5]
assert False
except IndexError:
pass
else:
try:
slt[0]
assert False
except IndexError:
pass
@actor(1)
@not_empty
def stress_setitem(slt):
pos = random.randrange(len(slt))
slt[pos] = slt[pos]
@actor(1)
@not_empty
def stress_setitem2(slt):
pos = random.randrange(int(len(slt) / 100)) * 100
slt[pos] = slt[pos]
@actor(1)
@not_empty
def stress_getset_slice(slt):
start, stop = sorted(random.randrange(len(slt)) for rpt in range(2))
step = random.choice([-3, -2, -1, 1, 1, 1, 1, 1, 2, 3])
lst = slt[start:stop:step]
assert all(lst[pos - 1] <= lst[pos] for pos in range(1, len(lst)))
slt[start:stop:step] = lst
@actor(1)
@not_empty
def stress_delitem_slice(slt):
start, stop = sorted(random.randrange(len(slt)) for rpt in range(2))
step = random.choice([-3, -2, -1, 1, 1, 1, 1, 1, 2, 3])
del slt[start:stop:step]
@actor(1)
def stress_iter(slt):
itr1 = iter(slt)
itr2 = (slt[pos] for pos in range(len(slt)))
assert all(tup[0] == tup[1] for tup in zip(itr1, itr2))
@actor(1)
def stress_reversed(slt):
itr = reversed(list(reversed(slt)))
assert all(tup[0] == tup[1] for tup in zip(slt, itr))
@actor(1)
def stress_bisect_left(slt):
values = list(slt)
value = random.random()
values.sort()
assert bisect.bisect_left(values, value) == slt.bisect_left(value)
@actor(1)
def stress_bisect(slt):
values = list(slt)
value = random.random()
values.sort()
assert bisect.bisect(values, value) == slt.bisect(value)
@actor(1)
def stress_bisect_right(slt):
values = list(slt)
value = random.random()
values.sort()
assert bisect.bisect_right(values, value) == slt.bisect_right(value)
@actor(1)
@not_empty
def stress_dups(slt):
pos = min(random.randrange(len(slt)), 300)
val = slt[pos]
for rpt in range(pos):
slt.add(val)
@actor(1)
@not_empty
def stress_count(slt):
values = list(slt)
val = slt[random.randrange(len(slt))]
assert slt.count(val) == values.count(val)
@actor(1)
def stress_append(slt):
if random.randrange(100) < 10:
slt.clear()
if len(slt) == 0:
slt.append(random.random())
else:
slt.append(slt[-1])
@actor(1)
def stress_extend(slt):
if random.randrange(100) < 10:
slt.clear()
if len(slt) == 0:
slt.extend(float(val) / 1000 for val in range(1000))
else:
slt.extend(frange(slt[-1], 1, 0.001))
@actor(1)
@not_empty
def stress_insert(slt):
slt.insert(0, slt[0])
slt.insert(-(len(slt) + 10), slt[0])
slt.insert(len(slt), slt[-1])
slt.insert(len(slt) + 10, slt[-1])
pos = random.randrange(len(slt))
slt.insert(pos, slt[pos])
@actor(1)
def stress_insert2(slt):
if random.randrange(100) < 10:
slt.clear()
if len(slt) == 0:
slt.insert(0, random.random())
else:
values = list(slt)[:250]
for val in values:
slt.insert(slt.index(val), val)
@actor(1)
@not_empty
def stress_pop(slt):
pos = random.randrange(len(slt)) + 1
assert slt[-pos] == slt.pop(-pos)
@actor(1)
@not_empty
def stress_index(slt):
values = set(slt)
slt.clear()
slt.update(values)
pos = random.randrange(len(slt))
assert slt.index(slt[pos]) == pos
@actor(1)
@not_empty
def stress_index2(slt):
values = list(slt)[:3] * 200
slt = SortedList(values)
for idx, val in enumerate(slt):
assert slt.index(val, idx) == idx
@actor(1)
def stress_mul(slt):
values = list(slt)
mult = random.randrange(10)
values *= mult
values.sort()
assert (slt * mult) == values
@actor(1)
def stress_imul(slt):
mult = random.randrange(10)
slt *= mult
@actor(1)
@not_empty
def stress_reversed(slt):
itr = reversed(slt)
pos = random.randrange(1, len(slt))
for rpt in range(pos):
val = next(itr)
assert val == slt[-pos]
@actor(1)
@not_empty
def stress_eq(slt):
values = []
assert not (values == slt)
@actor(1) # Disabled!!!
@not_empty
def stress_lt(slt):
values = list(slt) # Doesn't work with nose!
assert not (values < slt)
values = SortedList(value - 1 for value in values)
assert values < slt
values = []
assert values < slt
assert not (slt < values)
def test_stress(repeat=1000):
slt = SortedList((random.random() for rpt in range(1000)))
for rpt in range(repeat):
action = random.choice(actions)
action(slt)
slt._check()
while len(slt) > 2000:
# Shorten the sortedlist. This maintains the "jaggedness"
# of the sublists which helps coverage.
pos = random.randrange(len(slt._maxes))
del slt._maxes[pos]
del slt._keys[pos]
del slt._lists[pos]
slt._len = sum(len(sublist) for sublist in slt._lists)
slt._index = []
slt._check()
slt._check()
stress_update(slt)
while len(slt) > 0:
pos = random.randrange(len(slt))
del slt[pos]
slt._check()
if __name__ == '__main__':
import sys
from datetime
|
import datetime
start = datetime.now()
print('Python', sys.version_info)
try:
num = int(sys.argv[1])
print('Setting iterations to', num)
except:
print('Setting iterations to 1000 (default)')
num = 1000
try:
pea = int(sys.argv[2])
random.seed(pea)
print('Setting seed to', pea)
except:
print('Setting seed to 0 (default)')
random.seed(0)
try:
test_stress(num)
except:
raise
|
finally:
print('Exiting after', (datetime.now() - start))
|
elbow-jason/flask-simple-alchemy
|
tests/test_relator.py
|
Python
|
mit
| 3,028
| 0.003633
|
from flask_simple_alchemy import Relator
from testers import db, app, FakeTable, OtherTable
this_table = Relator(db)
this_table.add('FakeTable')
this_table.add('OtherTable', foreign_key='uuid')
class ThirdTable(db.Model, this_table.HasOneToOneWith.FakeTable):
__tablename__ = 'thirdtable'
id = db.Column(db.Integer, primary_key=True)
elf = db.Column(db.Boolean(False))
monkey = db.Column(db.String, default='yep')
def test_Relator_setattrs():
this_table = Relator(db)
this_table.add('FakeTable')
this_table.add('OtherTable', foreign_key='uuid')
assert this_table.HasForeignKeyOf
assert this_table.HasOneToOneWith
assert this_table.HasManyToOneWith
assert this_table.HasForeignKeyOf.FakeTable
assert this_table.HasOneToOneWith.FakeTable
assert this_table.HasManyToOneWith.FakeTable
assert this_table.HasForeignKeyOf.OtherTable
assert this_table.HasOneToOneWith.OtherTable
assert this_table.HasManyToOneWith.OtherTable
def test_Realtor_relationship():
assert
|
ThirdTable.faketable_id
assert ThirdTable.faketable
with app.app_context():
fk = FakeTable()
fk.unique_name = 'gggg'
db.session.add(fk)
db.session.commit()
saved = FakeTable.query.filter_by(unique_name='gggg').first()
tt = ThirdTable()
tt.faketable_id = saved.id
db.session.add(tt)
db.session.commit()
saved2 = ThirdTable.query.filter_by(monkey='yep').first()
assert saved
assert tt
|
assert saved2
def test_Realtor_relationship_again():
this_table = Relator(db)
this_table.add('FakeTable')
this_table.add('OtherTable', foreign_key='uuid', relation_name='OtherTableUUID1')
class FourthTable(db.Model, this_table.HasManyToOneWith.OtherTableUUID1):
__tablename__ = 'fourthtable'
id = db.Column(db.Integer, primary_key=True)
assert FourthTable.othertable_uuid
assert FourthTable.othertable
def test_Realtor_relation_name():
this_table = Relator(db)
this_table.add('FakeTable')
this_table.add('OtherTable')
this_table.add('OtherTable', foreign_key='uuid', relation_name="OtherTableUUID")
class SixthTable(db.Model, this_table.HasManyToOneWith.OtherTable):
__tablename__ = 'sixthtable'
id = db.Column(db.Integer, primary_key=True)
class FifthTable(db.Model, this_table.HasManyToOneWith.OtherTableUUID):
__tablename__ = 'fifthtable'
id = db.Column(db.Integer, primary_key=True)
assert SixthTable.othertable_id
assert SixthTable.othertable
assert FifthTable.othertable_uuid
assert FifthTable.othertable
def test_database_creation():
this_table = Relator(db)
this_table.add('FakeTable')
this_table.add('OtherTable', foreign_key='uuid')
#class ThirdTable(db.Model, this_table.HasOneToOneWith.FakeTable):
# __tablename__ = 'thirdtable'
# id = db.Column(db.Integer, primary_key=True)
db.drop_all()
db.create_all()
db.drop_all()
|
creditbit/electrum-creditbit-server
|
run_electrum_creditbit_server.py
|
Python
|
agpl-3.0
| 11,252
| 0.003022
|
#!/usr/bin/env python
# Copyright(C) 2012 thomasv@gitorious
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
import argparse
import ConfigParser
import logging
import socket
import sys
import time
import threading
import json
import os
import imp
if os.path.dirname(os.path.realpath(__file__)) == os.getcwd():
imp.load_module('electrumcreditbitserver', *imp.find_module('src'))
from electrumcreditbitserver import storage, networks, utils
from electrumcreditbitserver.processor import Dispatcher, print_log
from electrumcreditbitserver.server_processor import ServerProcessor
from electrumcreditbitserver.blockchain_processor import BlockchainProcessor
from electrumcreditbitserver.stratum_tcp import TcpServer
from electrumcreditbitserver.stratum_http import HttpServer
logging.basicConfig()
if sys.maxsize <= 2**32:
print "Warning: it looks like you are using a 32bit system
|
. You may experience crashes caused by mmap"
if os.getuid() == 0:
print "Do not run this program as root!"
print "Run the install script to create a non-privileged user."
sys.exit()
def attempt_read_config(config, filename):
try:
|
with open(filename, 'r') as f:
config.readfp(f)
except IOError:
pass
def load_banner(config):
try:
with open(config.get('server', 'banner_file'), 'r') as f:
config.set('server', 'banner', f.read())
except IOError:
pass
def setup_network_params(config):
type = config.get('network', 'type')
params = networks.params.get(type)
utils.PUBKEY_ADDRESS = int(params.get('pubkey_address'))
utils.SCRIPT_ADDRESS = int(params.get('script_address'))
storage.GENESIS_HASH = params.get('genesis_hash')
if config.has_option('network', 'pubkey_address'):
utils.PUBKEY_ADDRESS = config.getint('network', 'pubkey_address')
if config.has_option('network', 'script_address'):
utils.SCRIPT_ADDRESS = config.getint('network', 'script_address')
if config.has_option('network', 'genesis_hash'):
storage.GENESIS_HASH = config.get('network', 'genesis_hash')
def create_config(filename=None):
config = ConfigParser.ConfigParser()
# set some defaults, which will be overwritten by the config file
config.add_section('server')
config.set('server', 'banner', 'Welcome to Creditbit Electrum!')
config.set('server', 'banner_file', '/etc/electrum-creditbit.banner')
config.set('server', 'host', 'localhost')
config.set('server', 'electrum_rpc_port', '8002')
config.set('server', 'report_host', '')
config.set('server', 'stratum_tcp_port', '50001')
config.set('server', 'stratum_http_port', '8081')
config.set('server', 'stratum_tcp_ssl_port', '50002')
config.set('server', 'stratum_http_ssl_port', '8082')
config.set('server', 'report_stratum_tcp_port', '50001')
config.set('server', 'report_stratum_http_port', '8081')
config.set('server', 'report_stratum_tcp_ssl_port', '50002')
config.set('server', 'report_stratum_http_ssl_port', '8082')
config.set('server', 'ssl_certfile', '')
config.set('server', 'ssl_keyfile', '')
config.set('server', 'irc', 'no')
config.set('server', 'irc_nick', '')
config.set('server', 'coin', 'creditbit')
config.set('server', 'logfile', '/var/log/electrum-creditbit.log')
config.set('server', 'donation_address', '')
config.set('server', 'max_subscriptions', '10000')
config.add_section('leveldb')
config.set('leveldb', 'path', '/dev/shm/electrum-creditbit_db')
config.set('leveldb', 'pruning_limit', '100')
config.set('leveldb', 'utxo_cache', str(64*1024*1024))
config.set('leveldb', 'hist_cache', str(128*1024*1024))
config.set('leveldb', 'addr_cache', str(16*1024*1024))
config.set('leveldb', 'profiler', 'no')
# set network parameters
config.add_section('network')
config.set('network', 'type', 'creditbit_main')
# try to find the config file in the default paths
if not filename:
for path in ('/etc/', ''):
filename = path + 'electrum-creditbit.conf'
if os.path.isfile(filename):
break
if not os.path.isfile(filename):
print 'could not find electrum configuration file "%s"' % filename
sys.exit(1)
attempt_read_config(config, filename)
load_banner(config)
return config
def run_rpc_command(params, electrum_rpc_port):
cmd = params[0]
import xmlrpclib
server = xmlrpclib.ServerProxy('http://localhost:%d' % electrum_rpc_port)
func = getattr(server, cmd)
r = func(*params[1:])
if cmd == 'sessions':
now = time.time()
print 'type address sub version time'
for item in r:
print '%4s %21s %3s %7s %.2f' % (item.get('name'),
item.get('address'),
item.get('subscriptions'),
item.get('version'),
(now - item.get('time')),
)
else:
print json.dumps(r, indent=4, sort_keys=True)
def cmd_banner_update():
load_banner(dispatcher.shared.config)
return True
def cmd_getinfo():
return {
'blocks': chain_proc.storage.height,
'peers': len(server_proc.peers),
'sessions': len(dispatcher.request_dispatcher.get_sessions()),
'watched': len(chain_proc.watched_addresses),
'cached': len(chain_proc.history_cache),
}
def cmd_sessions():
return map(lambda s: {"time": s.time,
"name": s.name,
"address": s.address,
"version": s.version,
"subscriptions": len(s.subscriptions)},
dispatcher.request_dispatcher.get_sessions())
def cmd_numsessions():
return len(dispatcher.request_dispatcher.get_sessions())
def cmd_peers():
return server_proc.peers.keys()
def cmd_numpeers():
return len(server_proc.peers)
def cmd_debug(s):
import traceback
from guppy import hpy;
hp = hpy()
if s:
try:
result = str(eval(s))
except:
err_lines = traceback.format_exc().splitlines()
result = '%s | %s' % (err_lines[-3], err_lines[-1])
return result
def get_port(config, name):
try:
return config.getint('server', name)
except:
return None
# global
shared = None
chain_proc = None
server_proc = None
dispatcher = None
transports = []
def start_server(config):
global shared, chain_proc, server_proc, dispatcher
logfile = config.get('server', 'logfile')
utils.init_logger(logfile)
host = config.get('server', 'host')
stratum_tcp_port = get_port(config, 'stratum_tcp_port')
stratum_http_port = get_port(config, 'stratum_http_port')
stratum_tcp_ssl_port = get_port(config, 'stratum_tcp_ssl_port')
stratum_http_ssl_port = get_port(config, 'stratum_http_ssl_port')
ssl_certfile = config.get('server', 'ssl_certfile')
ssl_keyfile = config.get('server', 'ssl_keyfile')
setup_network_params(config)
if ssl_certfile is '' or ssl_keyfile is '':
stratum_tcp_ssl_port = None
stratum_http_ssl_port = None
print_log("Starting Electrum server on", host)
# Create hub
dispatcher = Dispatcher(config)
shared = dispatcher.shared
# handle termination signals
import sig
|
fhirschmann/penchy
|
penchy/jobs/tools.py
|
Python
|
mit
| 2,762
| 0.002172
|
"""
This module provides tools.
.. moduleauthor:: Felix Mueller
.. moduleauthor:: Pascal Wittmann
:copyright: PenchY Developers 2011-2012, see AUTHORS
:license: MIT License, see LICENSE
"""
import os.path
from penchy.jobs.elements import Tool
from penchy.jobs.hooks import Hook
from penchy.jobs.typecheck import Types
from penchy.maven import MavenDependency
class Tamiflex(Tool):
"""
This tool implements the play-out agent of tamiflex. The play-out agent has no
configuration options. For general usage information visit the
`tamiflex homepage <http://code.google.com/p/tamiflex/>`_.
Outputs:
- ``reflection_log``: log file of all uses of the reflection API
- ``classfolder``: folder of all classes that were used (including generated)
""
|
"
_POA = MavenDependency(
'de.tu_darmstadt.penchy',
'poa',
'2.0.0.0',
'http://mvn.0x0b.de',
checksum='df4418bed92205e4f27135bbf077895bd4c8c652')
DEPENDENCIES = set((_POA,))
outputs = Types(('reflection_log', list, str),
('classfolder', list, str))
def __init__(self, name=None):
"""
:param name: descriptive name of this tool
|
:type name: str
"""
super(Tamiflex, self).__init__(name)
self.hooks.extend([
Hook(teardown=lambda: self.out['reflection_log']
.append(os.path.abspath('out/refl.log'))),
Hook(teardown=lambda: self.out['classfolder']
.append(os.path.abspath('out/')))])
@property
def arguments(self):
return ["-javaagent:%s" % Tamiflex._POA.filename]
class HProf(Tool):
"""
This tool implements the hprof agent. Valid
options can be obtained with the command::
java -agentlib:hprof=help
For example: The instruction::
t = tools.HProf('heap=dump')
extends the commandline of the jvm about::
-agentlib:hprof=heap=dump
Outputs:
- ``hprof``: HProf output, i.e. the path to the java.hprof.txt file
"""
DEPENDENCIES = set()
outputs = Types(('hprof', list, str))
def __init__(self, option, name=None):
"""
:param option: the argument for hprof
:param name: descriptive name of this tool
:type name: str
"""
super(HProf, self).__init__(name)
# chooses always the right file because a new directory
# is generated for each invocation
self.hooks.append(Hook(teardown=lambda: self.out['hprof']
.append(os.path.abspath('java.hprof.txt'))))
self.option = option
@property
def arguments(self):
return ["-agentlib:hprof={0}".format(self.option)]
|
USGSDenverPychron/pychron
|
docs/user_guide/operation/scripts/examples/helix/extraction/felix_co2.py
|
Python
|
apache-2.0
| 2,035
| 0.010319
|
'''
eqtime: 30
'''
def main():
info('Felix CO2 analysis')
gosub('felix:WaitForCO2Access')
gosub('felix:PrepareForCO2Analysis')
set_motor('beam',beam_diameter)
if analysis_type=='blank':
info('is blank. not heating')
'''
sleep cumulative time to account for blank
during a multiple position analysis
'''
close(description='Bone to Turbo')
close('A')
close('C')
open('F')
numPositions=len(position)
sleep(duration*max(1,numPositions))
else:
'''
this is the most generic what to move and fire the laser
position is always a list even if only one hole is specified
'''
enable()
for p_i in position:
'''
position the laser at p_i, p_i can be an holenumber or (x,y)
'''
move_to_position(p_i)
sleep(5)
close(description='Bone to Turbo')
do_extraction()
if d
|
isable_between_positions:
end_extract()
end_extract()
disable()
info('cleaning gas {} seconds'.format(cleanup))
sleep(cleanup)
def do_extraction():
if ramp_rate>0:
'''
style 1.
'''
# begin_interval(duration)
#
|
info('ramping to {} at {} {}/s'.format(extract_value, ramp_rate, extract_units)
# ramp(setpoint=extract_value, rate=ramp_rate)
# complete_interval()
'''
style 2.
'''
elapsed=ramp(setpoint=extract_value, rate=ramp_rate)
pelapsed=execute_pattern(pattern)
sleep(min(0, duration-elapsed-pelapsed))
else:
begin_interval(duration)
info('set extract to {}'.format(extract_value))
extract(extract_value)
sleep(2)
if pattern:
info('executing pattern {}'.format(pattern))
execute_pattern(pattern)
complete_interval()
|
nohona/cron-crm
|
usr/local/certbot/acme/acme/jose/b64.py
|
Python
|
gpl-3.0
| 1,523
| 0
|
"""JOSE Base64.
`JOSE Base64`_ is defined as:
- URL-safe Base64
- padding stripped
.. _`JOSE Base64`:
https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-37#appendix-C
.. Do NOT try to call this module "base64", as it will "shadow" the
standard library.
"""
import base64
import six
def b64encode(data):
"""JOSE Base64 encode.
:param data: Data to be encoded.
:type data: `bytes`
:returns: JOSE Base64 string.
:rtype: bytes
:raises TypeError: if `data` is of incorrect type
"""
if not isinstance(data, six.binary_type):
raise TypeError('argument should be {0}'.format(six.binary_type))
return base64.urlsafe_b64encode(data).rstrip(b'=')
def b64decode(data):
"""JOSE Base64 decode.
:param data: Base64 string to be decoded. If it's unicode, then
only ASCII characters are allowed.
:type data: `bytes` or `unicode`
:returns: Decoded data.
:rtype: bytes
:raises TypeError
|
: if input is of incorrect type
:raises ValueError: if input is unicode with non-ASCII characters
"""
|
if isinstance(data, six.string_types):
try:
data = data.encode('ascii')
except UnicodeEncodeError:
raise ValueError(
'unicode argument should contain only ASCII characters')
elif not isinstance(data, six.binary_type):
raise TypeError('argument should be a str or unicode')
return base64.urlsafe_b64decode(data + b'=' * (4 - (len(data) % 4)))
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/parcats/_meta.py
|
Python
|
mit
| 435
| 0
|
import _plotly_utils.
|
basevalidators
|
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="parcats", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
derv82/wifite2
|
wifite/attack/pmkid.py
|
Python
|
gpl-2.0
| 7,652
| 0.003921
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..model.attack import Attack
from ..config import Configuration
from ..tools.hashcat import HcxDumpTool, HcxPcapTool, Hashcat
from ..util.color import Color
from ..util.timer import Timer
from ..model.pmkid_result import CrackResultPMKID
from threading import Thread
import os
import time
import re
class AttackPMKID(Attack):
def __init__(self, target):
super(AttackPMKID, self).__init__(target)
self.crack_result = None
self.success = False
self.pcapng_file = Configuration.temp('pmkid.pcapng')
def get_existing_pmkid_file(self, bssid):
'''
Load PMKID Hash from a previously-captured hash in ./hs/
Returns:
The hashcat hash (hash*bssid*station*essid) if found.
None if not found.
'''
if not os.path.exists(Configuration.wpa_handshake_dir):
return None
bssid = bssid.lower().replace(':', '')
file_re = re.compile('.*pmkid_.*\.16800')
for filename in os.listdir(Configuration.wpa_handshake_dir):
pmkid_filename = os.path.join(Configuration.wpa_handshake_dir, filename)
if not os.path.isfile(pmkid_filename):
continue
if not re.match(file_re, pmkid_filename):
continue
with open(pmkid_filename, 'r') as pmkid_handle:
pmkid_hash = pmkid_handle.read().strip()
if pmkid_hash.count('*') < 3:
continue
existing_bssid = pmkid_hash.split('*')[1].lower().replace(':', '')
if existing_bssid == bssid:
return pmkid_filename
return None
def run(self):
'''
Performs PMKID attack, if possible.
1) Captures PMKID hash (or re-uses existing hash if found).
2) Cracks the hash.
Returns:
True if handshake is captured. False otherwise.
'''
from ..util.process import Process
# Check that we have all hashcat programs
dependencies = [
Hashcat.dependency_name,
HcxDumpTool.dependency_name,
HcxPcapTool.dependency_name
]
missing_deps = [dep for dep in dependencies if not Process.exists(dep)]
if len(missing_deps) > 0:
Color.pl('{!} Skipping PMKID attack, missing required tools: {O}%s{W}' % ', '.join(missing_deps))
return False
pmkid_file = None
if Configuration.ignore_old_handshakes == False:
# Load exisitng PMKID hash from filesystem
pmkid_file = self.get_existing_pmkid_file(self.target.bssid)
if pmkid_file is not None:
Color.pattack('PMKID', self.target, 'CAPTURE',
'Loaded {C}existing{W} PMKID hash: {C}%s{W}\n' % pmkid_file)
if pmkid_file is None:
# Capture hash from live target.
pmkid_file = self.capture_pmkid()
if pmkid_file is None:
return False # No hash found.
# Crack it.
try:
self.success = self.crack_pmkid_file(pmkid_file)
except KeyboardInterrupt:
Color.pl('\n{!} {R}Failed to crack PMKID: {O}Cracking interrupted by user{W}')
self.success = False
return False
return True # Even if we don't crack it, capturing a PMKID is 'successful'
def capture_pmkid(self):
'''
Runs hashcat's hcxpcaptool to extract PMKID hash from the .pcapng file.
Returns:
The PMKID hash (str) if found, otherwise None.
'''
self.keep_capturing = True
self.timer = Timer(Configuration.pmkid_timeout)
# Start hcxdumptool
t = Thread(target=self.dumptool_thread)
t.start()
# Repeatedly run pcaptool & check output for hash for self.target.essid
pmkid_hash = None
pcaptool = HcxPcapTool(self.target)
while self.timer.remaining() > 0:
pmkid_hash = pcaptool.get_pmkid_hash(self.pcapng_file)
if pmkid_hash is not None:
break # Got PMKID
Color.pattack('PMKID', self.target, 'CAPTURE',
'Waiting for PMKID ({C}%s{W})' % str(self.timer))
time.sleep(1)
self.keep_capturing = False
if pmkid_hash is None:
Color.pattack('PMKID', self.target, 'CAPTURE',
'{R}Failed{O} to capture PMKID\n')
Color.pl('')
return None # No hash found.
Color.clear_entire_line()
Color.pattack('PMKID', self.target, 'CAPTURE', '{G}Captured PMKID{W}')
pmkid_file = self.save_pmkid(pmkid_hash)
return pmkid_file
def crack_pmkid_file(self, pmkid_file):
'''
Runs hashcat containing PMKID hash (*.16800).
If cracked, saves results in self.crack_result
Returns:
True if cracked, False otherwise.
'''
# Check that wordlist exists before cracking.
if Configuration.wordlist is None:
Color.pl('\n{!} {O}Not cracking PMKID ' +
'because there is no {R}wordlist{O} (re-run with {C}--dict{O})')
# TODO: Uncomment once --crack is updated to support recracking PMKIDs.
#Color.pl('{!} {O}Run Wifite with the {R}--crack{O} and {R}--dict{O} options to try again.')
key = None
else:
Color.clear_entire_line()
Color.pattack('PMKID', self.target, 'CRACK', 'Cracking PMKID using {C}%s{W} ...\n' % Configuration.wordlist)
key = Hashcat.crack_pmkid(pmkid_file)
if key is None:
# Failed to crack.
if Configuration.wordlist is not None:
Color.clear_entire_line()
Color.pattack('PMKID', self.target, '{R}CRACK',
'{R}Failed {O}Passphrase not found in dictionary.\n')
return False
else:
# Successfully cracked.
Color.clear_entire_line()
Color.pattack('PMKID', self.target, 'CRACKED', '{C}Key: {G}%s{W}' % key)
self.crack_result = CrackResultPMKID(self.target.bssid, self.target.essid,
pmkid_file, key)
Color.pl('\n')
self.crack_result.dump()
return True
def dumptool_thread(self):
'''Runs hashcat's hcxdumptool until it dies o
|
r `keep_capturing == False`'''
dumptool = HcxDumpTool(self.target, self.pcapng_file)
# Let the dump tool run until we have the hash.
while self.keep_capturing and dumptool.poll() is None:
time.sleep(0.5)
dumptool.interrupt(
|
)
def save_pmkid(self, pmkid_hash):
'''Saves a copy of the pmkid (handshake) to hs/ directory.'''
# Create handshake dir
if not os.path.exists(Configuration.wpa_handshake_dir):
os.makedirs(Configuration.wpa_handshake_dir)
# Generate filesystem-safe filename from bssid, essid and date
essid_safe = re.sub('[^a-zA-Z0-9]', '', self.target.essid)
bssid_safe = self.target.bssid.replace(':', '-')
date = time.strftime('%Y-%m-%dT%H-%M-%S')
pmkid_file = 'pmkid_%s_%s_%s.16800' % (essid_safe, bssid_safe, date)
pmkid_file = os.path.join(Configuration.wpa_handshake_dir, pmkid_file)
Color.p('\n{+} Saving copy of {C}PMKID Hash{W} to {C}%s{W} ' % pmkid_file)
with open(pmkid_file, 'w') as pmkid_handle:
pmkid_handle.write(pmkid_hash)
pmkid_handle.write('\n')
return pmkid_file
|
mojodna/debian-mapnik
|
plugins/input/templates/helloworld/build.py
|
Python
|
lgpl-2.1
| 2,425
| 0.002474
|
#!/usr/bin/env python
# Mapnik uses the build tool SCons.
# This python file is run to compile a plugin
# It must be called from the main 'SConstruct' file like:
# SConscript('path/to/this/file.py')
# see docs at: http://www.scons.org/wiki/SConscript()
import os
# Give this plugin a name
# here this happens to be the same as the directory
PLUGIN_NAME = 'hello'
# Here we pull from the SCons environment exported from the main instance
Import ('env')
# the below install details are also pulled from the
# main SConstruct file where configuration happens
# plugins can go anywhere, and be registered in custom locations by Mapnik
# but the standard location is '/usr/local/lib/mapnik/input'
install_dest = env['MAPNIK_INPUT_PLUGINS_DEST']
# clone the environment here
# so that if we modify the env it in this file
# those changes to not pollute other builds later on...
plugin_env = env.Clone()
# Add the cpp files that need to be compiled
plugin_sources = Split(
"""
%(PLUGIN_NAME)s_datasource.cpp
%(PLUGIN_NAME)s_featureset.cpp
""" % locals()
)
# Add any external libraries this plugin should
# directly link to
libraries = [ '' ] # eg 'libfoo'
libraries.append('mapnik')
# link libicuuc, but
|
ICU_LIB_NAME is used custom builds of icu can
# have different library names like osx which offers /usr/lib/libicucore.dylib
libraries.append(env['ICU_LIB_NAME'])
TARGET = plugin_env.SharedLibrary(
# the name of the target to build, eg 'sqlite.input'
|
'../%s' % PLUGIN_NAME,
# prefix - normally none used
SHLIBPREFIX='',
# extension, mapnik expects '.input'
SHLIBSUFFIX='.input',
# list of source files to compile
source=plugin_sources,
# libraries to link to
LIBS=libraries,
# any custom linkflags, eg. LDFLAGS
# in this case CUSTOM_LDFLAGS comes
# from Mapnik's main SConstruct file
# and can be removed here if you do
# not need it
LINKFLAGS=env.get('CUSTOM_LDFLAGS')
)
# if 'uninstall' is not passed on the command line
# then we actually create the install targets that
# scons will install if 'install' is passed as an arg
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(install_dest, TARGET)
env.Alias('install', install_dest)
|
markgw/jazzparser
|
lib/nltk/sem/glue.py
|
Python
|
gpl-3.0
| 27,061
| 0.008647
|
# Natural Language Toolkit: Glue Semantics
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2010 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import os
import nltk
from nltk.internals import Counter
from nltk.parse import *
from nltk.parse import MaltParser
from nltk.corpus import brown
from nltk.tag import *
import logic
import drt
import linearlogic
SPEC_SEMTYPES = {'a' : 'ex_quant',
'an' : 'ex_quant',
'every' : 'univ_quant',
'the' : 'def_art',
'no' : 'no_quant',
'default' : 'ex_quant'}
OPTIONAL_RELATIONSHIPS = ['nmod', 'vmod', 'punct']
class GlueFormula(object):
def __init__(self, meaning, glue, indices=None):
if not indices:
indices = set()
if isinstance(meaning, str):
self.meaning = logic.LogicParser().parse(meaning)
elif isinstance(meaning, logic.Expression):
self.meaning = meaning
else:
raise RuntimeError, 'Meaning term neither string or expression: %s, %s' % (meaning, meaning.__class__)
if isinstance(glue, str):
self.glue = linearlogic.LinearLogicParser().parse(glue)
elif isinstance(glue, linearlogic.Expression):
self.glue = glue
else:
raise RuntimeError, 'Glue term neither string or expression: %s, %s' % (glue, glue.__class__)
self.indices = indices
def applyto(self, arg):
""" self = (\\x.(walk x), (subj -o f))
arg = (john , subj)
returns ((walk john), f)
"""
if self.indices & arg.indices: # if the sets are NOT disjoint
raise linearlogic.LinearLogicApplicationException, "'%s' applied to '%s'. Indices are not disjoint." % (self, arg)
else: # if the sets ARE disjoint
return_indices = (self.indices | arg.indices)
try:
return_glue = linearlogic.ApplicationExpression(self.glue, arg.glue, arg.indices)
except linearlogic.LinearLogicApplicationException:
raise linearlogic.LinearLogicApplicationException, "'%s' applied to '%s'" % (self.simplify(), arg.simplify())
arg_meaning_abstracted = arg.meaning
if return_indices:
for dep in self.glue.simplify().antecedent.dependencies[::-1]: # if self.glue is (A -o B), dep is in A.dependencies
arg_meaning_abstracted = self.make_LambdaExpression(logic.Variable('v%s' % dep),
arg_meaning_abstracted)
return_meaning = self.meaning.applyto(arg_meaning_abstracted)
return self.__class__(return_meaning, return_glue, return_indices)
def make_VariableExpression(self, name):
return logic.VariableExpression(name)
def make_LambdaExpression(self, variable, term):
return logic.LambdaExpression(variable, term)
def lambda_abstract(self, other):
assert isinstance(other, GlueFormula)
assert isinstance(other.meaning, logic.AbstractVariableExpression)
return self.__class__(self.make_LambdaExpression(other.meaning.variable,
self.meaning),
linearlogic.ImpExpression(other.glue, self.glue))
def compile(self, counter=None):
"""From Iddo Lev's PhD Dissertation p108-109"""
if not counter:
counter = Counter()
(compiled_glue, new_forms) = self.glue.simplify().compile_pos(counter, self.__class__)
return new_forms + [self.__class__(self.meaning, compiled_glue, set([counter.get()]))]
def simplify(self):
return self.__class__(self.meaning.simplify(), self.glue.simplify(), self.indices)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.meaning == other.meaning and self.glue == other.glue
def __str__(self):
assert isinstance(self.indices, set)
accum = '%s : %s' % (self.meaning, self.glue)
if self.indices:
accum += ' : {' + ', '.join([str(index) for index in self.indices]) + '}'
return accum
def __repr__(self):
return str(self)
class GlueDict(dict):
def __init__(self, filename):
self.filename = filename
self.read_file()
def read_file(self, empty_first=True):
if empty_first:
self.clear()
try:
f = nltk.data.find(
os.path.join('grammars', 'sample_grammars', self.filename))
# if f is a ZipFilePathPointer or a FileSystemPathPointer
# then we need a little extra massaging
if hasattr(f, 'open'):
f = f.open()
except LookupError, e:
try:
f = open(self.filename)
except LookupError:
raise e
lines = f.readlines()
f.close()
for line in lines: # example: 'n : (\\x.(<word> x), (v-or))'
# lambdacalc -^ linear logic -^
line = line.strip() # remove trailing newline
if not len(line): continue # skip empty lines
if line[0] == '#': continue # skip commented out lines
parts = line.split(' : ', 2) # ['verb', '(\\x.(<word> x), ( subj -o f ))', '[subj]']
glue_formulas = []
parenCount = 0
tuple_start = 0
tuple_comma = 0
relationships = None
if len(parts) > 1:
for (i, c) in enumerate(parts[1]):
if c == '(':
if parenCount == 0: # if it's the first '(' of a tuple
tuple_start = i+1 # then save the index
parenCount += 1
elif c == ')':
parenCount -= 1
if parenCount == 0: # if it's the last ')' of a tuple
meaning_term = parts[1][tuple_start:tuple_comma] # '\\x.(<word> x)'
|
glue_term = parts[1][tuple_comma+1:i] # '(v-r)'
glue_formulas.append([meaning_term, glue_term]) # add the GlueFormula to the list
elif c == ',':
if parenCount == 1: # if it's a comma separating the parts of the tuple
tuple_comma = i # then save the index
el
|
if c == '#': # skip comments at the ends of lines
if parenCount != 0: # if the line hasn't parsed correctly so far
raise RuntimeError, 'Formula syntax is incorrect for entry ' + line
break # break to the next line
if len(parts) > 2: #if there is a relationship entry at the end
relStart = parts[2].index('[')+1
relEnd = parts[2].index(']')
if relStart == relEnd:
relationships = frozenset()
else:
relationships = frozenset([r.strip() for r in parts[2][relStart:relEnd].split(',')])
try:
startInheritance = parts[0].index('(')
endInheritance = parts[0].index(')')
sem = parts[0][:startInheritance].strip()
supertype = parts[0][startInheritance+1:endInheritance]
except:
sem = parts[0].strip()
supertype = None
if sem not in self:
self[sem] = {}
if relationships is None: #if not specified for a specific relationship set
#add all relationship entries for parents
if supertype:
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/preferences_v30_rc1.py
|
Python
|
mit
| 3,447
| 0.00029
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PreferencesV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'locale': 'str'
}
attribute_map = {
'locale': 'locale'
}
def __init__(self, locale=None): # noqa: E501
"""PreferencesV30Rc1 - a model defined in Swagger""" # noqa: E501
self._locale = None
self.discriminator = None
if locale is not None:
self.locale = locale
@property
def locale(self):
"""Gets the locale of this PreferencesV30Rc1. # noqa: E501
:return: The locale of this PreferencesV30Rc1. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this PreferencesV30Rc1.
:param locale: The locale of this PreferencesV30Rc1. # noqa: E501
:type: str
"""
allowed_values = ["AR", "CS", "DE", "EN", "ES", "FR", "IT", "JA", "KO", "PT", "RU", "ZH_CN", "ZH_TW", "XX"] # noqa: E501
if locale not in allowed_values:
raise ValueError(
"Invalid value for `locale` ({0}), must be one of {1}" # noqa: E501
.format(locale, allowed_values)
)
self._lo
|
cale = lo
|
cale
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PreferencesV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreferencesV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
hmedina/KaSaAn
|
KaSaAn/functions/observable_coplotter.py
|
Python
|
mit
| 6,362
| 0.003458
|
#! /usr/bin/env python3
import ast
import glob
import warnings
import numpy as np
from typing import List, Tuple
from .observable_plotter import observable_file_reader
from .numerical_sort import numerical_sort
def _find_data_files(pattern: str) -> List[str]:
"""Find files that match the pattern."""
file_list = glob.glob(pattern)
if not file_list:
raise ValueError('No files found matching: ' + str(pattern))
sorted_file_list = sorted(file_list, key=numerical_sort)
return sorted_file_list
def _multi_data_axis_annotator(co_plot_axis, file_data_list: List[Tuple[List[str], np.array, str]],
coplot_index: int, coplot_name: str, coplot_expression: str,
diff_toggle: bool = False, log_x: bool = False, log_y: bool = False,
omit_legend: bool = False):
"""Annotate the provided axis."""
legend_entries = []
for file_data in file_data_list:
legend_data, numeric_data, file_name = file_data
# find indexes to plot:
if coplot_index:
var_index = coplot_index - 1
data_y = numeric_data[:, var_index]
legend_entry = legend_data[var_index]
elif coplot_name:
try:
var_index = legend_data.index(coplot_name)
except ValueError as ve:
raise ValueError('Requested variable name not found in variable list; available options are:\n' +
' | '.join(legend_data)) from ve
d
|
ata_y = numeric_data[:, var_index]
legend_entry = legend_data[var_index]
elif coplot_expression:
# to render an algebraic expression, we create an abstract sy
|
ntax tree,
# then replace the tokens that are strings found in the legend with
# the name of the data array, with proper indexing; finally
# the new tree can be executed (with fixed linenos & indents).
# TokenTransformer is declared here so it can include in its scope
# the legend_data values; this avoids more extensive subclassing
class TokenTransformer(ast.NodeTransformer):
"""Swap column names for the indexed-array."""
def visit_Constant(self, node, obs_list=legend_data):
"""Transform node if it's a string found in the legend data."""
if isinstance(node.value, str):
if node.value in obs_list:
return ast.copy_location(
ast.Subscript(
value=ast.Name(id='numeric_data', ctx=ast.Load()),
slice=ast.Tuple(elts=[ast.Slice(), ast.Constant(value=obs_list.index(node.value))],
ctx=ast.Load()),
ctx=ast.Load()), node)
else:
raise ValueError('Error: <{}> not found in observables: {}'.format(node.value, obs_list))
else:
return node
my_ast = ast.parse(coplot_expression, mode='eval')
TokenTransformer().visit(my_ast)
data_y = eval(compile(ast.fix_missing_locations(my_ast), '<string>', 'eval'))
legend_entry = coplot_expression
else:
raise ValueError('Function requires a variable index, a variable name, or an expression of variables.')
data_x = numeric_data[:, 0]
if diff_toggle:
d_t = np.diff(data_x)
d_v = np.diff(data_y)
if np.any(d_t == 0.0):
raise ValueError('Time difference of zero found in file ' + file_name)
data_y = d_v / d_t
data_x = data_x[1:]
legend_entries.append(legend_entry)
if len(data_x) < 1000:
plot_draw_style = 'steps-post'
else:
plot_draw_style = 'default'
co_plot_axis.plot(data_x, data_y, label=legend_entry, drawstyle=plot_draw_style)
co_plot_axis.set_xlabel('Time')
# if plotting a time differential, adjust Y-axis label
if diff_toggle:
co_plot_axis.set_ylabel(r'$\frac{\Delta \mathrm{x}}{\Delta t}$', rotation='horizontal')
else:
co_plot_axis.set_ylabel('Value')
# if requested index yielded one observable name (all tracks of same observable), the file-names;
# else use the variable names that resulted from the requested index
if len(set(legend_entries)) == 1:
co_plot_axis.set_title(legend_entries[0])
if not omit_legend:
co_plot_axis.legend([item[2] for item in file_data_list])
else:
if not omit_legend:
co_plot_axis.legend()
# adjust plot scales
if log_x:
co_plot_axis.set_xscale('log')
if log_y:
co_plot_axis.set_yscale('log')
return co_plot_axis
def observable_coplot_axis_annotator(target_axis, file_pattern: str,
variable_index: int, variable_name: str, variable_expr: str,
differential_toggle: bool = False,
log_axis_x: bool = False, log_axis_y: bool = False, no_legend: bool = False):
"""See file under `KaSaAn.scripts` for usage."""
file_names = _find_data_files(file_pattern)
file_data_list = []
for file_name in file_names:
legend_data, numeric_data = observable_file_reader(file_name)
if numeric_data.shape[0] <= 1:
warnings.warn('Only one time point in file ' + file_name)
file_data_list.append((legend_data, numeric_data, file_name))
if not variable_index and not variable_name and not variable_expr:
raise ValueError('Function requires the index of a variable,'
' a name for one, or an expression of variables found in the observable file.')
_multi_data_axis_annotator(co_plot_axis=target_axis, file_data_list=file_data_list,
coplot_index=variable_index, coplot_name=variable_name, coplot_expression=variable_expr,
diff_toggle=differential_toggle, log_x=log_axis_x, log_y=log_axis_y,
omit_legend=no_legend)
return target_axis
|
srange/SU2
|
TestCases/parallel_regression.py
|
Python
|
lgpl-2.1
| 66,999
| 0.018254
|
#!/usr/bin/env python
## \file parallel_regression.py
# \brief Python script for automated regression testing of SU2 examples
# \author A. Aranake, A. Campos, T. Economon, T. Lukaczyk, S. Padron
# \version 6.2.0 "Falcon"
#
# The current SU2 release has been coordinated by the
# SU2 International Developers Society <www.su2devsociety.org>
# with selected contributions from the open-source community.
#
# The main research teams contributing to the current release are:
# - Prof. Juan J. Alonso's group at Stanford University.
# - Prof. Piero Colo
|
nna's group at Delft University of Technology.
# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# - Prof. Alberto Guardone's group at Polytechnic University of Milan.
# - Prof. Rafael Palacios' group at Imperial College London.
# - Prof. Vincent Terrapon's group at the University of Liege.
# - Prof. Edwin van der Weide's group at the University of Twente.
# -
|
Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics.
#
# Copyright 2012-2019, Francisco D. Palacios, Thomas D. Economon,
# Tim Albring, and the SU2 contributors.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# make print(*args) function available in PY2.6+, does'nt work on PY < 2.6
from __future__ import print_function
import sys
from TestCase import TestCase
def main():
'''This program runs SU2 and ensures that the output matches specified values.
This will be used to do checks when code is pushed to github
to make sure nothing is broken. '''
test_list = []
##########################
### Compressible Euler ###
##########################
# Channel
channel = TestCase('channel')
channel.cfg_dir = "euler/channel"
channel.cfg_file = "inv_channel_RK.cfg"
channel.test_iter = 20
channel.test_vals = [-2.652944, 2.813720, 0.033489, 0.002890] #last 4 columns
channel.su2_exec = "parallel_computation.py -f"
channel.timeout = 1600
channel.tol = 0.00001
test_list.append(channel)
# NACA0012
naca0012 = TestCase('naca0012')
naca0012.cfg_dir = "euler/naca0012"
naca0012.cfg_file = "inv_NACA0012_Roe.cfg"
naca0012.test_iter = 20
naca0012.test_vals = [-4.080618, -3.586817, 0.337090, 0.022611] #last 4 columns
naca0012.su2_exec = "parallel_computation.py -f"
naca0012.timeout = 1600
naca0012.tol = 0.00001
test_list.append(naca0012)
# Supersonic wedge
wedge = TestCase('wedge')
wedge.cfg_dir = "euler/wedge"
wedge.cfg_file = "inv_wedge_HLLC.cfg"
wedge.test_iter = 20
wedge.test_vals = [-0.816407, 4.925831, -0.251950, 0.044386] #last 4 columns
wedge.su2_exec = "parallel_computation.py -f"
wedge.timeout = 1600
wedge.tol = 0.00001
test_list.append(wedge)
# ONERA M6 Wing
oneram6 = TestCase('oneram6')
oneram6.cfg_dir = "euler/oneram6"
oneram6.cfg_file = "inv_ONERAM6.cfg"
oneram6.test_iter = 10
oneram6.test_vals = [-10.392429, -9.840519, 0.282580, 0.012694] #last 4 columns
oneram6.su2_exec = "parallel_computation.py -f"
oneram6.timeout = 3200
oneram6.tol = 0.00001
test_list.append(oneram6)
# Fixed CL NACA0012
fixedCL_naca0012 = TestCase('fixedcl_naca0012')
fixedCL_naca0012.cfg_dir = "fixed_cl/naca0012"
fixedCL_naca0012.cfg_file = "inv_NACA0012.cfg"
fixedCL_naca0012.test_iter = 100
fixedCL_naca0012.test_vals = [-2.474140, 2.927471, 0.290169, 0.019080] #last 4 columns
fixedCL_naca0012.su2_exec = "parallel_computation.py -f"
fixedCL_naca0012.timeout = 1600
fixedCL_naca0012.tol = 0.00001
test_list.append(fixedCL_naca0012)
# Polar sweep of the inviscid NACA0012
polar_naca0012 = TestCase('polar_naca0012')
polar_naca0012.cfg_dir = "polar/naca0012"
polar_naca0012.cfg_file = "inv_NACA0012.cfg"
polar_naca0012.polar = True
polar_naca0012.test_iter = 10
polar_naca0012.test_vals = [-1.301350, 4.133308, -0.002728, 0.008768] #last 4 columns
polar_naca0012.su2_exec = "compute_polar.py -i 11"
polar_naca0012.timeout = 1600
polar_naca0012.tol = 0.00001
test_list.append(polar_naca0012)
# HYPERSONIC FLOW PAST BLUNT BODY
bluntbody = TestCase('bluntbody')
bluntbody.cfg_dir = "euler/bluntbody"
bluntbody.cfg_file = "blunt.cfg"
bluntbody.test_iter = 20
bluntbody.test_vals = [0.626808, 7.014695, -0.000000, 1.648026] #last 4 columns
bluntbody.su2_exec = "parallel_computation.py -f"
bluntbody.timeout = 1600
bluntbody.tol = 0.00001
test_list.append(bluntbody)
##########################
### Compressible N-S ###
##########################
# Laminar flat plate
flatplate = TestCase('flatplate')
flatplate.cfg_dir = "navierstokes/flatplate"
flatplate.cfg_file = "lam_flatplate.cfg"
flatplate.test_iter = 20
flatplate.test_vals = [-4.648252, 0.813253, -0.130643, 0.024357] #last 4 columns
flatplate.su2_exec = "parallel_computation.py -f"
flatplate.timeout = 1600
flatplate.tol = 0.00001
test_list.append(flatplate)
# Laminar cylinder (steady)
cylinder = TestCase('cylinder')
cylinder.cfg_dir = "navierstokes/cylinder"
cylinder.cfg_file = "lam_cylinder.cfg"
cylinder.test_iter = 25
cylinder.test_vals = [-6.759137, -1.291223, 0.107133, 0.853339] #last 4 columns
cylinder.su2_exec = "parallel_computation.py -f"
cylinder.timeout = 1600
cylinder.tol = 0.00001
test_list.append(cylinder)
# Laminar cylinder (low Mach correction)
cylinder_lowmach = TestCase('cylinder_lowmach')
cylinder_lowmach.cfg_dir = "navierstokes/cylinder"
cylinder_lowmach.cfg_file = "cylinder_lowmach.cfg"
cylinder_lowmach.test_iter = 25
cylinder_lowmach.test_vals = [-6.870761, -1.408778, -0.228736, 112.418622] #last 4 columns
cylinder_lowmach.su2_exec = "parallel_computation.py -f"
cylinder_lowmach.timeout = 1600
cylinder_lowmach.tol = 0.00001
test_list.append(cylinder_lowmach)
# 2D Poiseuille flow (body force driven with periodic inlet / outlet)
poiseuille = TestCase('poiseuille')
poiseuille.cfg_dir = "navierstokes/poiseuille"
poiseuille.cfg_file = "lam_poiseuille.cfg"
poiseuille.test_iter = 10
poiseuille.test_vals = [-5.050864, 0.648220, 0.000349, 13.639525] #last 4 columns
poiseuille.su2_exec = "parallel_computation.py -f"
poiseuille.timeout = 1600
poiseuille.tol = 0.001
test_list.append(poiseuille)
# 2D Poiseuille flow (inlet profile file)
poiseuille_profile = TestCase('poiseuille_profile')
poiseuille_profile.cfg_dir = "navierstokes/poiseuille"
poiseuille_profile.cfg_file = "profile_poiseuille.cfg"
poiseuille_profile.test_iter = 10
poiseuille_profile.test_vals = [-12.493460, -7.672043, -0.000000, 2.085796] #last 4 columns
poiseuille_profile.su2_exec = "parallel_computation.py -f"
poiseuille_profile.timeout = 1600
poiseuille_profile.tol = 0.00001
test_list.append(poiseuille_profile)
##########################
### Compressible RANS ###
##########################
# RAE2822 SA
rae2822_sa = TestCase('rae2822_sa')
rae2822_sa.cfg_dir = "rans/rae2822"
rae2822_sa.cfg_file = "turb_SA_RAE2822.cfg"
rae2822_sa.test_iter = 20
rae28
|
our-iot-project-org/pingow-web-service
|
src/blog/settings.py
|
Python
|
mit
| 3,829
| 0.001567
|
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their
|
values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# BASE_DIR = "/Users/jmitch/desktop/blog/src/"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY
|
WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['tienduong.pythonanywhere.com', '127.0.0.1', '10.169.3.13', '172.20.10.5', '172.20.10.10']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party
'crispy_forms',
'markdown_deux',
'pagedown',
'rest_framework',
'django_tables2',
# local apps
'comments',
'posts',
'pingow_api',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
LOGIN_URL = "/login/"
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
|
jackliusr/scrapy-crawlers
|
crawlers/crawlers/spiders/jd_hardware.py
|
Python
|
apache-2.0
| 2,190
| 0.018265
|
import scrapy
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from crawlers.items import JdItem
from scrapy.http import TextResponse,FormRequest,Request
import json
class JdHardwareSpider(scrapy.Spider):
name = 'jd-hardware'
allowed_domains = ['jd.com','p.3.cn']
start_urls = ['http://list.jd.com/737-1277-934-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
'http://list.jd.com/737-1277-3979-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
'http://list.jd.com/737-1277-6974-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
'http://list.jd.com/737-1277-900-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
'http://list.jd.com/737-1277-1295-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
|
'http://list.jd.com/737-1277-6975-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
'http://list.jd.com/737-1277-4934-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.html',
'http://list.jd.com/737-1277-5004-0-0-0-0-0-0-0-1-1-1-1-1-72-4137-0.ht
|
ml'
]
def parse(self,response):
sel = Selector(response)
urls = sel.xpath("//a[contains(@href, 'item.jd.com/')]/@href").extract()
for url in urls:
itemid = url[url.rindex('/')+1:-5]
yield Request(url, callback=self.parse_item)
def parse_item(self, response):
sel = Selector(response)
i = JdItem()
url = response.url
itemid = url[url.rindex('/')+1:-5]
i['name'] = sel.xpath("//div[@id='name']/h1/text()").extract()
i['description'] = sel.xpath("//div[@id='product-detail-1']/ul").extract()
i['category'] = sel.xpath("//div[@class='breadcrumb']/span/a/text()").extract()[1]
i['image_urls'] = sel.xpath("//div[@id='spec-n1']/img/@src").extract()
request= Request("http://p.3.cn/prices/get?skuid=J_%s&type=1" % itemid, callback=self.parse_price)
request.meta['item'] = i
yield request
def parse_price(self, response):
i = response.meta['item']
jsonData = json.loads(response.body)
i['price'] = jsonData[0]['p']
return i
|
uclapi/uclapi
|
backend/uclapi/timetable/models.py
|
Python
|
mit
| 65,300
| 0
|
from django.db import models
models.options.DEFAULT_NAMES += ('_DATABASE',)
class Weekstructure(models.Model):
setid = models.TextField(max_length=10)
weeknumber = models.BigIntegerField(null=True, blank=True)
startdate = models.DateField(primary_key=True)
|
description = models.TextField(null=True, max_length=80)
mappedto = models.BigIntegerField(null=True, blank=True)
class Meta:
managed = False
db_table = '"CMIS_OWNER"."WEEKS
|
TRUCTURE"'
_DATABASE = 'roombookings'
class WeekstructureA(models.Model):
id = models.AutoField(primary_key=True)
setid = models.TextField(max_length=10)
weeknumber = models.BigIntegerField(null=True, blank=True)
startdate = models.DateField()
description = models.TextField(max_length=80, null=True)
mappedto = models.BigIntegerField(null=True, blank=True)
class Meta:
_DATABASE = 'gencache'
class WeekstructureB(models.Model):
id = models.AutoField(primary_key=True)
setid = models.TextField(max_length=10)
weeknumber = models.BigIntegerField(null=True, blank=True)
startdate = models.DateField()
description = models.TextField(max_length=80, null=True)
mappedto = models.BigIntegerField(null=True, blank=True)
class Meta:
_DATABASE = 'gencache'
class Students(models.Model):
setid = models.TextField(max_length=10)
studentid = models.TextField(primary_key=True, max_length=12)
name = models.TextField(max_length=120)
linkcode = models.TextField(max_length=20, null=True)
courseid = models.TextField(max_length=12, null=True)
courseyear = models.BigIntegerField(null=True, blank=True)
classgroupid = models.TextField(max_length=10, null=True)
deptid = models.TextField(max_length=10)
instcode = models.TextField(max_length=10)
qtype1 = models.TextField(max_length=10)
qtype2 = models.TextField(max_length=10)
qtype3 = models.TextField(max_length=10, null=True)
regchecked = models.CharField(max_length=1, null=True)
fullypaid = models.CharField(max_length=1, null=True)
house1 = models.BigIntegerField(null=True, blank=True)
house2 = models.BigIntegerField(null=True, blank=True)
lecturerid = models.TextField(max_length=10, null=True)
optionblockid = models.BigIntegerField(null=True, blank=True)
rulesetid = models.TextField(max_length=10, null=True)
semid = models.BigIntegerField(null=True, blank=True)
instid = models.BigIntegerField(null=True, blank=True)
isflipflop = models.CharField(max_length=1, null=True)
crsver = models.BigIntegerField(null=True, blank=True)
finalyear = models.CharField(max_length=1, null=True)
newcourseid = models.TextField(max_length=12, null=True)
lastsemrank = models.BigIntegerField(null=True, blank=True)
acadstanding = models.BigIntegerField(null=True, blank=True)
isdeferred = models.CharField(max_length=1, null=True)
oldcourseid = models.TextField(max_length=12, null=True)
oldcourseyear = models.BigIntegerField(null=True, blank=True)
oldsemid = models.BigIntegerField(null=True, blank=True)
oldacadstanding = models.BigIntegerField(null=True, blank=True)
acaddone = models.BigIntegerField(null=True, blank=True)
semrank = models.BigIntegerField(null=True, blank=True)
oldsemrank = models.BigIntegerField(null=True, blank=True)
custate = models.BigIntegerField(null=True, blank=True)
oldcustate = models.BigIntegerField(null=True, blank=True)
progstoskip = models.BigIntegerField(null=True, blank=True)
adjacadstanding = models.BigIntegerField(null=True, blank=True)
ema = models.CharField(max_length=1, null=True)
emaid = models.TextField(max_length=12, null=True)
dob = models.DateField(null=True)
class Meta:
managed = False
db_table = '"CMIS_OWNER"."STUDENTS"'
_DATABASE = 'roombookings'
class StudentsA(models.Model):
id = models.AutoField(primary_key=True)
setid = models.TextField(max_length=10)
studentid = models.TextField(max_length=12)
name = models.TextField(max_length=120)
linkcode = models.TextField(max_length=20, null=True)
courseid = models.TextField(max_length=12, null=True)
courseyear = models.BigIntegerField(null=True, blank=True)
classgroupid = models.TextField(max_length=10, null=True)
deptid = models.TextField(max_length=10)
instcode = models.TextField(max_length=10)
qtype1 = models.TextField(max_length=10)
qtype2 = models.TextField(max_length=10)
qtype3 = models.TextField(max_length=10, null=True)
regchecked = models.CharField(max_length=1, null=True)
fullypaid = models.CharField(max_length=1, null=True)
house1 = models.BigIntegerField(null=True, blank=True)
house2 = models.BigIntegerField(null=True, blank=True)
lecturerid = models.TextField(max_length=10, null=True)
optionblockid = models.BigIntegerField(null=True, blank=True)
rulesetid = models.TextField(max_length=10, null=True)
semid = models.BigIntegerField(null=True, blank=True)
instid = models.BigIntegerField(null=True, blank=True)
isflipflop = models.CharField(max_length=1, null=True)
crsver = models.BigIntegerField(null=True, blank=True)
finalyear = models.CharField(max_length=1, null=True)
newcourseid = models.TextField(max_length=12, null=True)
lastsemrank = models.BigIntegerField(null=True, blank=True)
acadstanding = models.BigIntegerField(null=True, blank=True)
isdeferred = models.CharField(max_length=1, null=True)
oldcourseid = models.TextField(max_length=12, null=True)
oldcourseyear = models.BigIntegerField(null=True, blank=True)
oldsemid = models.BigIntegerField(null=True, blank=True)
oldacadstanding = models.BigIntegerField(null=True, blank=True)
acaddone = models.BigIntegerField(null=True, blank=True)
semrank = models.BigIntegerField(null=True, blank=True)
oldsemrank = models.BigIntegerField(null=True, blank=True)
custate = models.BigIntegerField(null=True, blank=True)
oldcustate = models.BigIntegerField(null=True, blank=True)
progstoskip = models.BigIntegerField(null=True, blank=True)
adjacadstanding = models.BigIntegerField(null=True, blank=True)
ema = models.CharField(max_length=1, null=True)
emaid = models.TextField(max_length=12, null=True)
dob = models.DateField(null=True, blank=True)
class Meta:
_DATABASE = 'gencache'
class StudentsB(models.Model):
id = models.AutoField(primary_key=True)
setid = models.TextField(max_length=10)
studentid = models.TextField(max_length=12)
name = models.TextField(max_length=120)
linkcode = models.TextField(max_length=20, null=True)
courseid = models.TextField(max_length=12, null=True)
courseyear = models.BigIntegerField(null=True, blank=True)
classgroupid = models.TextField(max_length=10, null=True)
deptid = models.TextField(max_length=10)
instcode = models.TextField(max_length=10)
qtype1 = models.TextField(max_length=10)
qtype2 = models.TextField(max_length=10)
qtype3 = models.TextField(max_length=10, null=True)
regchecked = models.CharField(max_length=1, null=True)
fullypaid = models.CharField(max_length=1, null=True)
house1 = models.BigIntegerField(null=True, blank=True)
house2 = models.BigIntegerField(null=True, blank=True)
lecturerid = models.TextField(max_length=10, null=True)
optionblockid = models.BigIntegerField(null=True, blank=True)
rulesetid = models.TextField(max_length=10, null=True)
semid = models.BigIntegerField(null=True, blank=True)
instid = models.BigIntegerField(null=True, blank=True)
isflipflop = models.CharField(max_length=1, null=True)
crsver = models.BigIntegerField(null=True, blank=True)
finalyear = models.CharField(max_length=1, null=True)
newcourseid = models.TextField(max_length=12, null=True)
lastsemrank = models.BigIntegerField(null=True, blank=True)
acadstanding = models.BigIntegerField(null=True, blank=True)
isdeferred = models.CharField(max_length=1, null=True)
oldcourseid = models.TextField(max_length=12, null=True)
oldcourseye
|
docwalter/py3status
|
py3status/modules/whoami.py
|
Python
|
bsd-3-clause
| 1,179
| 0.000848
|
# -*- coding: utf-8 -*-
"""
Display logged-in username.
Configuration parameters:
format: display format for whoami (default '{username}')
Format placeholders:
{username} display current username
Inspired by i3 FAQ:
https://faq.i3wm.org/question/1618/add-user-name-to-status-bar.1.html
@author ultrabug
SAMPLE OUTPUT
{'full_text': u'ultrabug'}
"""
from getpass import getuser
class Py3status:
"""
"""
# available configuration parameters
format = '{username}'
class Meta:
deprecated = {
'remove': [
{
'param': 'cache_timeout',
'msg': 'obsolete parameter',
|
},
],
}
def whoami(self):
"""
We use the getpass module to get the current user.
"""
username = '{}'.format(getuser())
return {
'cached_until': self.py3.CACHE_FOREVER,
'full_text': self.py3.safe_format(self.for
|
mat, {'username': username})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
paypal/support
|
examples/python_analytics/client/collect.py
|
Python
|
bsd-3-clause
| 2,659
| 0.000376
|
# -*- coding: utf-8 -*-
import sys
import json
import time
import uuid
import socket
import getpass
import argparse
import datetime
import platform
import socklusion
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 8888
DEFAULT_PATH = '/v1/on_import'
TIMEOUT = 5.0
INSTANCE_ID = uuid.uuid4()
IS_64BIT = sys.maxsize > 2 ** 32
HAVE_READLINE = True
try:
import readline
except:
HAVE_READLINE = False
HAVE_UCS4 = getattr(sys, 'maxunicode', 0) > 65536
TIME_INFO = {'utc': str(datetime.datetime.utcnow()),
'std_utc_offset': -time.timezone / 3600.0}
def get_python_info():
ret = {}
ret['argv'] = sys.argv
ret['bin'] = sys.executable
ret['is_64bit'] = IS_64BIT
ret['version'] = sys.version
ret['compiler'] = platform.python_compiler()
ret['
|
build_date'] = platform.python_build()[1]
ret['version_info'] = list(sys.version_info)
ret['have_ucs4'] = HAVE_UCS4
ret['have_readline'] = HAVE_READLINE
return ret
def get_all_info():
ret = {}
ret['username']
|
= getpass.getuser()
ret['uuid'] = str(INSTANCE_ID)
ret['hostname'] = socket.gethostname()
ret['hostfqdn'] = socket.getfqdn()
ret['uname'] = platform.uname()
ret['python'] = get_python_info()
ret['time'] = TIME_INFO
return ret
def build_post_message(data, host=DEFAULT_HOST, path=DEFAULT_PATH):
msg_lines = ['POST %s HTTP/1.0' % path,
'Host: %s' % host,
'Content-Length: ' + str(len(data)),
'',
data]
msg = '\r\n'.join(msg_lines)
return msg
def send_import_analytics(host=DEFAULT_HOST, port=DEFAULT_PORT, data_dict=None,
timeout=TIMEOUT, path=DEFAULT_PATH):
if data_dict is None:
data_dict = get_all_info()
msg = build_post_message(json.dumps(data_dict), host=host, path=path)
return socklusion.send_data(msg,
host=host,
port=port,
wrap_ssl=False,
timeout=timeout,
want_response=True)
def main():
prs = argparse.ArgumentParser()
prs.add_argument('--host', default=DEFAULT_HOST)
prs.add_argument('--port', default=DEFAULT_PORT, type=int)
prs.add_argument('--path', default=DEFAULT_PATH)
prs.add_argument('--verbose', action='store_true')
args = prs.parse_args()
output = send_import_analytics(host=args.host,
port=args.port,
path=args.path)
if args.verbose:
print output
if __name__ == '__main__':
main()
|
meisamhe/GPLshared
|
Research_Projects_UTD/Data_Science/some_python.py
|
Python
|
gpl-3.0
| 988
| 0.026316
|
# 1. Strip non alphabetic
inputStr = "I may opt for a top yam for Amy, May, and Tommy."
import re
regex = re.compile('[^a-zA-Z ]')
output = regex.sub('',inputStr)
# 2. Convert letters to lower case
output = output.lower()
# 3. so
|
rt all the letters within each word
# 4. also eliminate duplicates
DicOfWords = {}
for word in output.split():
DicOfWords[''.join(sorted(word))] = 1
# 4. continue sort the list of words
outputWordList = DicOfWords.keys()
outputWordList.sort()
# 5. print the words with space
" ".join(outputWordList)
def Collatz(N):
if (N/2 == float(N)/2): # if it is even
return (N/2)
else:
return (3*N+1)
crrnt = N
for i in range(1,K+1):
crrnt = Collatz(crrnt)
print(crrnt)
pa
|
rameters= [1.5,2,-1,-2.5,3]
attribute = [2,-1,2,0.5]
output = 0
for i in range(0, len(attribute)):
output = output + float(attribute[i])*float(parameters[i])
output = output + float(parameters[len(attribute)])
import math
score = 1/(1+math.exp(-output))
print('%.3f'%score)
|
DeadWisdom/inreach
|
app/ajax.py
|
Python
|
mit
| 1,366
| 0.005124
|
import json, datetime, decimal
from django.http import HttpResponse
class EnhancedJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
if isinstance(o, datetime.datetime):
return o.ctime()
elif isinstance(o, datetime.date):
return o.ctime()
elif isinstance(o, datetime.time):
return o.ctime()
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(EnhancedJSONEncoder, self).default(o)
def to_json(simple_object):
"""
Serializes the ``simple_object`` to JSON using the EnhancedJSONEncoder above.
"""
return json.dumps(simple_object, cls=EnhancedJSONEncoder)
def from_json(src):
"""
Simply deserializes the given json ``src``, provided for consistancy with ``to_json()``.
"""
return json.loads(src)
class JsonResponse(HttpResponse):
"""
An HttpResponse class that automatically serializes the response content with JSON using
the EnhancedJSONEncoder above to deal with date/time and decimal types.
"""
def __init__(self, simple_object, status=None, content_type="application/json"):
super(JsonResponse, self).__init__(to_json(simple_objec
|
t), status=stat
|
us, content_type=content_type)
|
grokcore/dev.lexycross
|
wordsmithed/menu.py
|
Python
|
mit
| 7,437
| 0.045717
|
'''
This will store any menus all inherited from a prototype with two functions
update and execute. Update will change display based on cursor position, while
execute will process button clicks.
'''
import pygame, ai
from pygame.locals import *
from scrabble import DISPLAYSURF, CLICK
class Menu():
def __init__(self):
self.buttons = {}
self.rect = (0, 0, 800, 600)
self.background = (255, 255, 255)
'''
Goes through all buttons and returns the name of the button, if it was clicked
'''
def execute(self, mouseX, mouseY):
if self.within(mouseX, mouseY):
theKey = ""
for key in self.buttons.keys():
if self.buttons[key].within(mouseX, mouseY):
theKey = key
if theKey != "":
CLICK.play()
return theKey
'''
Goes through and updates all buttons, redrawing them if they are hovered
'''
def update(self, mouseX, mouseY):
for button in self.buttons.values():
button.update(mouseX, mouseY)
def within(self, mouseX, mouseY):
(left, top, width, height) = self.rect
return mouseX >= left and mouseX <= left+width and mouseY >= top and mouseY <= top+height
def redraw(self):
pygame.draw.rect(DISPLAYSURF, self.background, self.rect)
for button in self.buttons.values():
button.redraw()
#==================== MAIN MENU =====================
class MainMenu(Menu):
NEW_GAME = "new"
EXIT_GAME = "exit"
TRAINING = "training"
ACHIEVEMENT = "achievement"
def __init__(self, userdata):
Menu.__init__(self)
trainerText = TextBox(["Practice your Scrabble skills with a built-in HINT",
"box, which lets you know how the AI would have played",
"your move. But you can't get ACHIEVEMENTS while training."], (400, 400),
(55, 46, 40), (255, 255, 255), horzCenter = True)
newGameText = TextBox(["Play one-on-one against Wordsmith, the Scrabble AI.",
"No hints allowed, try to beat your best score!"], (400, 400),
(55, 46, 40), (255, 255, 255), horzCenter = True)
achieveText = TextBox(self.createAchievementText(userdata), (400, 400),
(55, 46, 40), (255, 255, 255), horzCenter = True)
self.buttons[MainMenu.TRAINING] = Button("Training", (250, 135, 300, 50), trainerText)
self.buttons[MainMenu.NEW_GAME] = Button("Challenge", (250, 190, 300, 50), newGameText)
self.buttons[MainMenu.ACHIEVEMENT] = Button("Achievements", (250, 245, 300, 50), achieveText)
self.buttons[MainMenu.EXIT_GAME] = Button("Exit", (250, 300, 300, 50))
DISPLAYSURF.fill((255,255,255))
def resetAchievements(self, userdata):
self.buttons[MainMenu.ACHIEVEMENT].textBox.text = self.createAchievementText(userdata)
def createAchievementText(self, userdata):
text = []
if userdata.has_key("name"):
text.append(userdata["name"]+"'s Achievements")
else:
text.append("Guest Achievements")
if
|
userdata.has_key("bestScore"):
text.append("Highest Score: "+str(userdata["bestScore"]))
else:
text.append("Highest Score: 0")
if userdata.has_key("numVictories"):
text.a
|
ppend("Victories: "+str(userdata["numVictories"]))
else:
text.append("Victories: 0")
if userdata.has_key("numGames"):
text.append("Games Played: "+str(userdata["numGames"]))
else:
text.append("Games Played: 0")
return text
#==================== GAME MENU =====================
class GameMenu(Menu):
PLAY_TURN = "play"
RESHUFFLE = "shuffle"
MAIN_MENU = "quit"
HINT_TURN = "hint"
def __init__(self, useHintBox = False):
Menu.__init__(self)
self.rect = (570, 300, 150, 300)
playText = TextBox(["Confirm your move,",
"returns your tiles if",
"your move is illegal."], (570, 480), (55, 46, 40), (255, 255, 255))
self.buttons[GameMenu.PLAY_TURN] = Button("PLAY", (570, 300, 150, 30), textBox = playText)
shuffleText = TextBox(["Forfeit your turn",
"and draw new tiles for",
"the next turn."], (570, 480), (55, 46, 40), (255, 255, 255))
self.buttons[GameMenu.RESHUFFLE] = Button("REDRAW", (570, 340, 150, 30), textBox = shuffleText)
if useHintBox:
hintText = TextBox(["The AI will put your",
"pieces down. Just hit",
"PLAY to confirm it."], (570, 480), (55, 46, 40), (255, 255, 255))
self.buttons[GameMenu.HINT_TURN] = Button("HINT", (570, 380, 150, 30), textBox = hintText, color = (255, 255, 100), backColor = (255, 170, 50))
self.buttons[GameMenu.MAIN_MENU] = Button("QUIT", (570, 420, 150, 30))
else:
self.buttons[GameMenu.MAIN_MENU] = Button("QUIT", (570, 380, 150, 30))
DISPLAYSURF.fill((255,255,255))
#==================== TEXT BOX ======================
class TextBox():
initialized = False
MARGIN = 21
@staticmethod
def initialize():
TextBox.FONT = pygame.font.Font('freesansbold.ttf', 18)
TextBox.initialized = True
def __init__(self, textLines, pos, color, backColor, horzCenter = False):
self.text = textLines
self.pos = pos
self.color = color
self.width = 0
self.backColor = backColor
self.horzCentered = horzCenter
if not TextBox.initialized:
TextBox.initialize()
def draw(self):
i = 0
for line in self.text:
left = self.pos[0]
top = self.pos[1] + TextBox.MARGIN * i
text = TextBox.FONT.render(line, True, self.color, self.backColor)
rect = text.get_rect()
if self.horzCentered:
rect.centerx = left
else:
rect.left = left
rect.top = top
if rect.width > self.width:
self.width = rect.width
DISPLAYSURF.blit(text, rect)
i+=1
def undraw(self):
height = TextBox.MARGIN * len(self.text)
if self.horzCentered:
rect = (self.pos[0]-self.width/2, self.pos[1], self.width, height)
else:
rect = (self.pos[0], self.pos[1], self.width, height)
pygame.draw.rect(DISPLAYSURF, self.backColor, rect)
#==================== BUTTON ========================
class Button():
BACKGROUND = (125, 125, 170)
HIGHLIGHT = (200, 200, 255)
FONT_COLOR = (55, 46, 40)
ON = "on"
OFF = "off"
initialized = False
@staticmethod
def initialize():
Button.FONT = pygame.font.Font('freesansbold.ttf', 18)
Button.initialized = True
def __init__(self, name, rect, textBox = None, color = None, backColor = None):
#Make sure the fonts are set up
if not Button.initialized:
Button.initialize()
if color == None:
color = Button.HIGHLIGHT
if backColor == None:
backColor = Button.BACKGROUND
self.name = name
self.rect = rect
self.lastDrawn = Button.OFF
self.textBox = textBox
self.color = color
self.backColor = backColor
def update(self, mouseX, mouseY):
if self.within(mouseX, mouseY):
self.draw(self.color)
self.lastDrawn = Button.ON
if self.textBox != None:
self.textBox.draw()
else:
self.draw(self.backColor)
if self.lastDrawn == Button.ON and self.textBox != None:
self.textBox.undraw()
self.lastDrawn = Button.OFF
def within(self, mouseX, mouseY):
(left, top, width, height) = self.rect
return mouseX >= left and mouseX <= left+width and mouseY >= top and mouseY <= top+height
def draw(self, backColor):
pygame.draw.rect(DISPLAYSURF, backColor, self.rect)
(left, top, width, height) = self.rect
text = Button.FONT.render(self.name, True, Button.FONT_COLOR, backColor)
rect = text.get_rect()
rect.center = (left+width/2, top+height/2)
DISPLAYSURF.blit(text, rect)
def redraw(self):
if self.lastDrawn == Button.ON:
self.draw(self.color)
elif self.lastDrawn == Button.OFF:
self.draw(self.backColor)
|
steve9164/maze-generator
|
maze.py
|
Python
|
mit
| 6,550
| 0.003511
|
'''
Maze Generator
By Stephen Davies
Builds a maze using tree construction
'''
# Current idea:
# - Build maze and tree at the same time
# - Start at some square - say (0, 0) for now - and randomly add unused square to leaves of the tree
import random, json
from collections import namedtuple
SquareNode = namedtuple('SquareNode', ['coord', 'children'])
Coordinate = namedtuple('Coordinate', ['x', 'y'])
class MazeTree(object):
'A maze represented as a tree of squares (each with coordinates of their position in the maze)'
def __init__(self, width, height):
self.width = width
self.height = height
self.start_square = None
self.end_square = None
self.tree = None
def __str__(self):
'''
Generate a string representation of the maze by starting with a maze with all walls,
then removing walls while traversing the maze tree
'''
maze = [[' '] + self.width*['-', ' ']]
for _ in range(self.height):
maze += [
['|'] + self.width*['x', '|'],
[' '] + self.width*['-', ' ']
]
def remove_wall(coord1, coord2):
'Remove wall between neighbouring squares at coord1 and coord2'
coord = Coordinate(min(coord1.x, coord2.x), min(coord1.y, coord2.y))
if coord1.x > coord.x or coord2.x > coord.x:
maze[2*coord.y+1][2*coord.x+2] = ' '
elif coord1.y > coord.y or coord2.y > coord.y:
maze[2*coord.y+2][2*coord.x+1] = ' '
else:
print('Error: No wall removed for pair of coords ({0.x}, {0.y}) & ({1.x}, {1.y})'.format
|
(coord1, coord2))
def remove_walls(node):
'Remove walls between node and its children'
for child in node.children:
remove_
|
wall(node.coord, child.coord)
remove_walls(child)
remove_walls(self.tree)
maze[2*self.start_square.y+1][2*self.start_square.x+1] = 'S'
maze[2*self.end_square.y+1][2*self.end_square.x+1] = 'E'
return '\n'.join(''.join(row) for row in maze)
def to_json(self):
maze = []
for _ in range(self.height):
row = []
for _ in range(self.width):
sq = {
'right': True,
'left': True,
'top': True,
'bottom': True,
'start': False,
'end': False
}
row.append(sq)
maze.append(row)
def remove_wall(coord1, coord2):
'Remove wall between neighbouring squares at coord1 and coord2'
#coord = Coordinate(min(coord1.x, coord2.x), min(coord1.y, coord2.y))
if coord1.x > coord2.x:
maze[coord1.y][coord1.x]['left'] = False
maze[coord2.y][coord2.x]['right'] = False
if coord1.x < coord2.x:
maze[coord1.y][coord1.x]['right'] = False
maze[coord2.y][coord2.x]['left'] = False
if coord1.y > coord2.y:
maze[coord1.y][coord1.x]['top'] = False
maze[coord2.y][coord2.x]['bottom'] = False
if coord1.y < coord2.y:
maze[coord1.y][coord1.x]['bottom'] = False
maze[coord2.y][coord2.x]['top'] = False
def remove_walls(node):
'Remove walls between node and its children'
for child in node.children:
remove_wall(node.coord, child.coord)
remove_walls(child)
remove_walls(self.tree)
maze[self.start_square.y][self.start_square.x]['start'] = True
maze[self.end_square.y][self.end_square.x]['end'] = True
return json.dumps(maze)
def list_paths(self):
'Find all paths from the root node to leaf nodes'
def generate_directed_paths(node):
'Generate a list of directed paths from the given node to each leaf'
if node.children:
return [[node] + path for child in node.children for path in generate_directed_paths(child)]
else:
return [[node]]
return generate_directed_paths(self.tree)
@classmethod
def generate_random_maze(cls, width, height):
'Randomly generate maze by adding squares to the leaves of the maze'
def get_neighbouring_coordinates(coord):
'Get neighbouring coordinates that also lie in the rectangle'
return (
([Coordinate(coord.x-1, coord.y)] if coord.x > 0 else []) +
([Coordinate(coord.x+1, coord.y)] if coord.x < width-1 else []) +
([Coordinate(coord.x, coord.y-1)] if coord.y > 0 else []) +
([Coordinate(coord.x, coord.y+1)] if coord.y < height-1 else [])
)
maze_tree = cls(width, height)
maze_tree.start_square = Coordinate(0,0)
maze_tree.tree = SquareNode(coord=maze_tree.start_square, children=[])
tree_nodes = [maze_tree.tree]
used_squares = set([maze_tree.tree.coord])
while len(used_squares) < width*height:
# Choose a square to add to the maze, and the node it should be added to
all_choices = [(adjacent, node) for node in tree_nodes for adjacent in set(get_neighbouring_coordinates(node.coord)) - used_squares]
next_square, node = random.choice(all_choices)
# Create the new node and place it in the MazeTree
new_node = SquareNode(coord=next_square, children=[])
node.children.append(new_node)
# Record the new node and the square it occupies
tree_nodes.append(new_node)
used_squares.add(next_square)
# Now that the maze is built, choose the longest (break ties randomly) and use
# the leaf as the end square of the maze
paths = maze_tree.list_paths()
max_path_length = max(len(path) for path in paths)
maze_path = random.choice([path for path in paths if len(path) == max_path_length])
maze_tree.end_square = maze_path[-1].coord
return maze_tree
def print_depth(node, depth):
'Print the depth of every leaf node in the tree'
if node.children:
for child in node.children:
print_depth(child, depth+1)
else:
print('Reached leaf at depth {}'.format(depth))
if __name__ == "__main__":
mt = MazeTree.generate_random_maze(10,8)
print(mt)
print_depth(mt.tree, 0)
|
CCI-MOC/moc-openstack-tools
|
setpass.py
|
Python
|
apache-2.0
| 1,945
| 0.001542
|
# Copyright 2016 Massachusetts Open Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import string
import random
def random_password(size):
"""Generate a random password of length 'size'
The resulting password may contain any of:
upper or lowercase letters A-Z
the digits 0-9
valid punctuation marks defined in the 'punctuation' variable below
"""
punctuation = '#$%&!'
chars = string.ascii_letters + string.digits + punctuation
return ''.join(random.choice(chars) for _ in range(size))
class SetpassClient:
"""Class for interacting with a Setpass server"""
def __init__(self, session, setpass_url):
self.url = setpass_url
self.session = session
def get_token(self,
|
userid, password, pin):
"""Add the user ID and random password to the setpass database.
Returns a token allowing the user to set their password.
"""
body = {'password': password,
|
'pin': pin}
request_url = '{base}/token/{userid}'.format(base=self.url,
userid=userid)
response = self.session.put(request_url, json=body)
token = response.text
return token
def get_url(self, token):
""" Generate URL for the user to set their password """
url = "{base}?token={token}".format(base=self.url, token=token)
return url
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.