repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
igroen/homebrew | homebrew/homebrew.py | Python | isc | 2,053 | 0.000974 | import asyncio
import subprocess
from collections import defaultdict
from .cache import uses_cache
from .printer import print_overview
class HomeBrew:
__slots__ = ("_installed", "_uses")
def __init__(self):
self.update()
def update(self):
self._installed = self._get_installed()
self._uses = self._get_uses()
def _g | et_installed(self):
result = subprocess.check_output(["brew", "list", "--formula"])
installed = result.split()
return [r.deco | de() for r in installed]
@uses_cache
def _get_uses(self):
tasks = [self._get_uses_for_package(package) for package in self._installed]
tasks_result, _ = asyncio.run(asyncio.wait(tasks))
return dict(task_result.result() for task_result in tasks_result)
async def _get_uses_for_package(self, package):
uses = await asyncio.create_subprocess_shell(
f"brew uses --installed {package}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
stdout, _ = await uses.communicate()
return package, stdout.decode().split()
@property
def installed_packages(self):
return self._installed
@property
def packages_not_needed_by_other(self):
return [key for key, val in self._uses.items() if not val]
@property
def packages_needed_by_other(self):
return {key: val for key, val in self._uses.items() if val}
@property
def package_dependencies(self):
dependencies = defaultdict(list)
for package, needed_by in self.packages_needed_by_other.items():
for needed in needed_by:
dependencies[needed].append(package)
return {needed: sorted(packages) for needed, packages in dependencies.items()}
@property
def info(self):
print_overview(
self.installed_packages,
self.packages_not_needed_by_other,
self.packages_needed_by_other,
self.package_dependencies,
)
|
appleseedhq/cortex | test/IECore/Object.py | Python | bsd-3-clause | 6,383 | 0.04465 | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class TestObject( unittest.TestCase ) :
def setUp( self ) :
self.typeNames = [
"FloatTypedData",
"FloatVectorData",
"V3fVectorData"
]
def testDataTypesDefinition( self ) :
"""
Function that checks if all the Data classes from IECore were defined on __dataTypesConversion dict.
It raises an exception if there's any problem.
"""
import IECore
def test(c):
try:
return issubclass(c, IECore.Data) and not (c is IECore.Data)
except:
return False
dataClasses = filter(test, map(lambda x: getattr(IECore, x), dir(IECore)))
notDefinedClasses = set(dataClasses).difference(IECore.getDataDerivedTypes())
if len(notDefinedClasses) > 0:
raise Exception, "The following classes were not defined on the conversion dictionaire: " + \
", ".join( map(str, notDefinedClasses) ) + ".\nPlease, add them on DataTraits.py"
def testObjectCreateAndCast( self ):
"""
Tests if all Object derived classes can be created using the factory function and
if they can be casted to Object pointers.
PS: Data derived objects should be casted down to Data. Because Data is casted to Object.
"""
import IECore
def objectDerived(c):
try:
return issubclass(c, IECore.Object) and not IECore.Object.isAbstractType(c.__name__)
except:
ret | urn False
group = IECore.CompoundObject()
objectClasses = filter(objectDerived, map(lambda x: getattr(IECore, x), dir(IECore)))
notCreated = []
notCasted = []
for c in objectClasses:
tId = IECore.Object.typeIdFromTypeName( c.__name__ )
try:
obj = IECore.Object.create(tId)
except:
notCreated.append(c)
else:
try:
gr | oup["object"] = obj
except:
notCasted.append(c)
errors = ""
if len( notCreated ) :
errors += "The following classes could not be created from Object.create() function: " + str( notCreated ) + "\n"
if len( notCasted ):
errors += "The following classes could not be casted to ObjectPtr:" + str( notCasted )
if len( errors ):
raise Exception( errors )
def testDynamicDowncasting( self ):
"""
Tests if python can downcast a ObjectPtr to a proper derived Object instance.
The downcast usually works, but when the object is used on a function that requires a derived class, then
it didn't match. This problem was solved with the INTRUSIVE_PTR_PATCH used on the bindings.
This does not test every class on IECore, but should...
"""
o = IECore.CompoundObject()
o["first"] = IECore.IntData( 1 )
t = IECore.CompoundData( { "first": o["first"] } )
def testTypeIdToNameMapping( self ) :
for tId in IECore.TypeId.values.values() :
if tId==IECore.TypeId.Invalid :
continue
if IECore.Object.isType( tId ) :
self.assertEqual( tId, IECore.Object.typeIdFromTypeName( IECore.Object.typeNameFromTypeId( tId ) ) )
def testCreate( self ) :
for tId in IECore.TypeId.values.values() :
if tId==IECore.TypeId.Invalid :
continue
if IECore.Object.isType( tId ) and not IECore.Object.isAbstractType( tId ) :
o = IECore.Object.create( tId )
self.assertEqual( o.typeId(), tId )
self.assertEqual( o.typeName(), IECore.Object.typeNameFromTypeId( tId ) )
oo = IECore.Object.create( IECore.Object.typeNameFromTypeId( tId ) )
self.assertEqual( oo.typeId(), tId )
def testCopy( self ) :
for tId in IECore.TypeId.values.values() :
if tId==IECore.TypeId.Invalid :
continue
if IECore.Object.isType( tId ) and not IECore.Object.isAbstractType( tId ) :
o = IECore.Object.create( tId )
oo = o.copy()
self.assertEqual( o, oo )
def testCopyFrom( self ) :
i = IECore.IntData( 1 )
ii = IECore.IntData( 2 )
self.assertNotEqual( i, ii )
ii.copyFrom( i )
self.assertEqual( i, ii )
f = IECore.FloatData( 1 )
self.assertNotEqual( i, f )
self.assertRaises( RuntimeError, IECore.curry( ii.copyFrom, f ) )
b = IECore.BlindDataHolder()
b.blindData()["floatData"] = IECore.FloatData( 1.0 )
b.blindData()["intData"] = IECore.IntData( -5 )
bb = IECore.BlindDataHolder()
self.assertNotEqual( b, bb )
bb.copyFrom( b )
self.assertEqual( b, bb )
def testHash( self ) :
allHashes = set()
objectsCreated = 0
for t in IECore.TypeId.names :
o = None
with IECore.IgnoredExceptions( RuntimeError ) :
o = IECore.Object.create( t )
if o is not None :
objectsCreated += 1
allHashes.add( str( o.hash() ) )
h = IECore.MurmurHash()
o.hash( h )
self.assertEqual( h, o.hash() )
self.assertEqual( len( allHashes ), objectsCreated )
if __name__ == "__main__":
unittest.main()
|
heddle317/moto | moto/core/responses.py | Python | apache-2.0 | 22,064 | 0.000453 | from __future__ import unicode_literals
import datetime
import json
import logging
import re
import pytz
from moto.core.exceptions import DryRunClientError
from jinja2 import Environment, DictLoader, TemplateNotFound
import six
from six.moves.urllib.parse import parse_qs, urlparse
import xmltodict
from pkg_resources import resource_filename
from werkzeug.exceptions import HTTPException
from moto.compat import OrderedDict
from moto.core.utils import camelcase_to_underscores, method_names_from_class
log = logging.getLogger(__name__)
def _decode_dict(d):
decoded = {}
for key, value in d.items():
if isinstance(key, six.binary_type):
newkey = key.decode("utf-8")
elif isinstance(key, (list, tuple)):
newkey = []
for k in key:
if isinstance(k, six.binary_type):
newkey.append(k.decode('utf-8'))
else:
newkey.append(k)
else:
newkey = key
if isinstance(value, six.binary_type):
newvalue = value.decode("utf-8")
elif isinstance(value, (list, tuple)):
newvalue = []
for v in value:
if isinstance(v, six.binary_type):
newvalue.append(v.decode('utf-8'))
else:
newvalue.append(v)
else:
newvalue = value
decoded[newkey] = newvalue
return decoded
class DynamicDictLoader(DictLoader):
"""
Note: There's a bug in jinja2 pre-2.7.3 DictLoader where caching does not work.
Including the fixed (current) method version here to ensure performance benefit
even for those using older jinja versions.
"""
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def update(self, mapping):
self.mapping.update(mapping)
def contains(self, template):
return bool(template in self.mapping)
class _TemplateEnvironmentMixin(object):
def __init__(self):
super(_TemplateEnvironmentMixin, self).__init__()
self.loader = DynamicDictLoader({})
self.environment = Environment(
loader=self.loader, autoescape=self.should_autoescape)
@property
def should_autoescape(self):
# Allow for subclass to overwrite
return False
def contains_template(self, template_id):
return self.loader.contains(template_id)
def response_template(self, source):
template_id = id(source)
if not self.contains_template(template_id):
self.loader.update({template_id: source})
self.environment = Environment(loader=self.loader, autoescape=self.should_autoescape, trim_blocks=True,
lstrip_blocks=True)
return self.environment.get_template(template_id)
class BaseResponse(_TemplateEnvironmentMixin):
default_region = 'us-east-1'
region_regex = r'\.(.+?)\.amazonaws\.com'
aws_service_spec = None
@classmethod
def dispatch(cls, *args, **kwargs):
return cls()._dispatch(*args, **kwargs)
def setup_class(self, request, full_url, headers):
querystring = {}
if hasattr(request, 'body'):
# Boto
self.body = request.body
else:
# Flask server
# FIXME: At least in Flask==0.10.1, request.data is an empty string
# and the information we want is in request.form. Keeping self.body
# definition for back-compatibility
self.body = request.data
querystring = {}
for key, value in request.form.items():
querystring[key] = [value, ]
raw_body = self.body
if isinstance(self.body, six.binary_type):
self.body = self.body.decode('utf-8')
if not querystring:
querystring.update(
parse_qs(urlparse(full_url).query, keep_blank_values=True))
if not querystring:
if 'json' in request.headers.get('content-type', []) and self.aws_service_spec:
decoded = json.loads(self.body)
target = request.headers.get(
'x-amz-target') or request.headers.get('X-Amz-Target')
service, method = target.split('.')
input_spec = self.aws_service_spec.input_spec(method)
flat = flatten_json_request_body('', decoded, input_spec)
for key, value in flat.items():
querystring[key] = [value]
elif self.body:
querystring.update(parse_qs(raw_body, keep_blank_values=True))
if not querystring:
querystring.update(headers)
querystring = _decode_dict(querystring)
self.uri = full_url
self.path = urlparse(full_url).path
self.querystring = querystring
self.method = request.method
self.region = self.get_region_from_url(request, full_url)
self.headers = request.headers
if 'host' not in self.headers:
self.headers['host'] = urlparse(full_url).netloc
self.response_headers = {"server": "amazon.com"}
def get_region_from_url(self, request, full_url):
match = re.search(self.region_regex, full_url)
if match:
region = match.group(1)
elif 'Authorization' in request.headers:
region = request.headers['Authorization'].split(",")[
0].split("/")[2]
else:
region = self.default_region
return region
def _dispatch(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self.call_action()
def call_action(self):
headers = self.response_headers
action = self.querystring.get('Action', [""])[0]
if not action: # Some services use a header for the action
# Headers are case-insensitive. Probably a better way to do this.
match = self.headers.get(
'x-amz-target') or self.headers.get('X-Amz-Target')
if match:
action = match.split(".")[-1]
action = camelcase_to_underscores(action)
method_names = method_names_from_class(self.__class__)
if action in method_names:
method = getattr(self, action)
try:
response = method()
except HTTPException as http_error:
response = http_error.description, dict(status=http_error.code)
if isinstance(response, six.string_types):
return 200, headers, response
else:
body, new_headers = response
status = new_headers.get('status', 200)
headers.update(new_headers)
# Cast status to string
if "status" in headers:
headers['status'] = str(headers['status'])
return status, headers, body
raise NotImplementedError(
"The {0} action has not been implemented".format(action))
def _get_param(self, param_name, if_none=None):
val = self.querystring.get(param_name)
if val is not None:
return val[0]
return if_none
def _get_int_param(self, param_name, if_none=None):
val = self._get_pa | ram(param_name)
if val is not None:
return int(val)
return if_none
def _get_bool_param(self, param_name, if_none=None):
val = self._get_param(param_name)
if val | is not None:
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
return if_none
def _get_multi_param(self, param_prefix):
"""
Given a querystring of ?LaunchConfigurationNames.member.1=my-test-1&LaunchConfigurationNames.member.2=my-test-2
this will return ['my-test-1', 'my-test-2']
"""
if pa |
silly-wacky-3-town-toon/SOURCE-COD | otp/otpbase/Settings.py | Python | apache-2.0 | 893 | 0.00224 | import collections
import json
import os
class Settings(collections.MutableMapping):
def __init__(self, filename):
self.filename = filename
self.store = {}
self.read()
def read(self):
if os.path.exists(self.filename):
with open(self.filename, 'r') as f:
self.store = json.load(f)
else:
self.write()
def write(self):
with open(self.filename, 'w') as f:
json.dump(self.store, f, sort_keys=True, indent=2, | separators=(',', ': '))
def __setitem__(self, key, value):
self.store[key] = value
self.write()
def __delitem__(self, key):
del self.store[key]
self.write()
def __getitem__(self, key):
return self.store[key]
def __iter__(self | ):
return iter(self.store)
def __len__(self):
return len(self.store)
|
feist/pcs | pcs_test/tools/command_env/config_http_files.py | Python | gpl-2.0 | 4,688 | 0 | import base64
import json
from pcs_test.tools.command_env.mock_node_communicator import (
place_multinode_call,
)
class FilesShortcuts:
def __init__(self, calls):
self.__calls = calls
def put_files(
self,
node_labels=None,
pcmk_authkey=None,
corosync_authkey=None,
corosync_conf=None,
pcs_disaster_recovery_conf=None,
pcs_settings_conf=None,
communication_list=None,
name="http.files.put_files",
):
# pylint: disable=too-many-arguments
"""
Create a call for the files distribution to the nodes.
node_labels list -- create success responses from these nodes
pcmk_authkey bytes -- content of pacemaker authkey file
corosync_authkey bytes -- content of corosync authkey file
corosync_conf string -- content of corosync.conf
pcs_disaster_recovery_conf string -- content of pcs DR config
pcs_settings_conf string -- content of pcs_settings.conf
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
written_output_dict = dict(
code="written",
message="",
)
if pcmk_authkey:
file_id = "pacemaker_remote authkey"
input_data[file_id] = dict(
data=base64.b64encode(pcmk_authkey).decode("utf-8"),
type="pcmk_remote_authkey",
rewrite_existing= | True,
)
output_data[file_id] = written_output_dict
if corosync_authke | y:
file_id = "corosync authkey"
input_data[file_id] = dict(
data=base64.b64encode(corosync_authkey).decode("utf-8"),
type="corosync_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_conf:
file_id = "corosync.conf"
input_data[file_id] = dict(
data=corosync_conf,
type="corosync_conf",
)
output_data[file_id] = written_output_dict
if pcs_disaster_recovery_conf:
file_id = "disaster-recovery config"
input_data[file_id] = dict(
data=base64.b64encode(pcs_disaster_recovery_conf).decode(
"utf-8"
),
type="pcs_disaster_recovery_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if pcs_settings_conf:
file_id = "pcs_settings.conf"
input_data[file_id] = dict(
data=pcs_settings_conf,
type="pcs_settings_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/put_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
def remove_files(
self,
node_labels=None,
pcsd_settings=False,
pcs_disaster_recovery_conf=False,
communication_list=None,
name="http.files.remove_files",
):
"""
Create a call for removing the files on the nodes.
node_labels list -- create success responses from these nodes
pcsd_settings bool -- if True, remove file pcsd_settings
pcs_disaster_recovery_conf bool -- if True, remove pcs DR config
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
if pcsd_settings:
file_id = "pcsd settings"
input_data[file_id] = dict(type="pcsd_settings")
output_data[file_id] = dict(
code="deleted",
message="",
)
if pcs_disaster_recovery_conf:
file_id = "pcs disaster-recovery config"
input_data[file_id] = dict(type="pcs_disaster_recovery_conf")
output_data[file_id] = dict(
code="deleted",
message="",
)
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/remove_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
|
jasonwee/asus-rt-n14uhp-mrtg | src/lesson_email/imaplib_fetch_raw.py | Python | apache-2.0 | 243 | 0 | import imaplib
import pprint
import imaplib_connect
ima | plib.Debug = 4
with imapli | b_connect.open_connection() as c:
c.select('INBOX', readonly=True)
typ, msg_data = c.fetch('1', '(BODY.PEEK[HEADER] FLAGS)')
pprint.pprint(msg_data)
|
SilverWingedSeraph/pyparens | parens/eval.py | Python | mit | 4,263 | 0.000704 | # eval.py
# Python Lisp EVALuation function
import importlib
from .common import Symbol, List
from .env import global_env
def get_var(x, env):
" Look up a variable in the env. If it's not there, look it up globally. "
try:
val = env[x]
except KeyError:
try:
val = globals()[x]
except:
raise NameError("No variable named {}".format(x))
return val
def dot_extraction(x, env):
'Dot notation extraction: e.g. (. obj attr) will give obj.attr'
if len(x) == 3:
(_, parent, child) = x
return getattr(eval(parent, env), child)
else:
raise SyntaxError("Dot extraction requires " +
"exactly two arguments.")
def wants_env(f):
from inspect import signature
try:
sig = signature(f)
except ValueError:
return False
for param in sig.parameters.values():
if (param.kind == param.KEYWORD_ONLY and
param.name == 'env'):
return True
return False
def eval(x, env=global_env):
"""
Evaluate an expression x in an env
"""
if isinstance(x, Symbol):
if x[0] == '"':
# x is a string literal
# Cut off the quotes and return it as such
return x[1:-1]
# OK, it's a variable.
return get_var(x, env)
# Maybe x isn't a list but some kind of literal
elif not isinstance(x, List) | :
# const. literal
return x
# OK, x is a list, but is it | empty?
elif len(x) == 0:
return []
# It isn't empty... maybe it's a special form.
# Dot extraction special form
elif x[0] == '.':
return dot_extraction(x, env)
# Conditional special form
elif x[0] == 'if':
try:
# With an alt clause
(_, test, conseq, alt) = x
except ValueError:
try:
# Without an alt clause, defaults to False
(_, test, conseq) = x
alt = False
except ValueError:
raise SyntaxError(
"if requires two or three arguments" +
"(test, consqeuence, and optional alternative)")
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
# Variable definition special form
elif x[0] == 'define':
try:
(_, var, exp) = x
except ValueError:
raise SyntaxError(
"define requires exactly two arguments " +
"(the name of the variable and its value)")
val = eval(exp, env)
env[var] = val
# This is not standard Lisp, but I like it
return val
# Import special form
elif x[0] == 'import':
try:
(_, exp) = x
except ValueError as e:
raise SyntaxError(
"import requires exactly 1 argument " +
"(the name of the module). {}".format(e))
return importlib.import_module(exp)
else:
# This is the default case:
# (f arg1 arg2 .. argn)
# or perhaps
# (item1 item2 ... itemn)
# Evaluate the first item, to see if it gives us back a callable
proc = eval(x[0], env)
# Handle the case of (item1 item2 ... itemn)
if not callable(proc):
# If input is of the form (item), put item in a list and we're done
if len(x) == 1:
return [proc]
else:
# If there are more elements, eval them and put them in a list
L = [proc]
for item in x[1:]:
L.append(eval(item))
return L
# OK, input is of the form (f arg1 arg2 ... argn)
args = [eval(arg, env) for arg in x[1:]]
try:
if wants_env(proc):
return proc(*args, env=env)
else:
return proc(*args)
except TypeError as e:
if callable(proc):
# Callable, but wrong number of args or something
raise NameError(e)
raise NameError("Tried to call a non-callable Python object {} " +
"(its type is {})".format(x[0], type(proc)))
|
HelloLily/hellolily | lily/utils/migrations/0016_remove_historylistitem.py | Python | agpl-3.0 | 606 | 0 | # -*- coding: utf-8 | -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('utils', '0015_auto_20161121_1831'),
('notes', '0010_remove_polymorphic_cleanup')
]
operations = [
migrations.RemoveField(
model_name='historylistitem',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='historylistitem',
n | ame='tenant',
),
migrations.DeleteModel(
name='HistoryListItem',
),
]
|
tgquintela/pySpatialTools | pySpatialTools/Retrieve/collectionretrievers.py | Python | mit | 13,968 | 0.001432 |
"""
CollectionRetrievers
--------------------
Wrapper of retrievers with a customed api oriented to interact properly with
spatial descriptor models.
This module contains the main class to mamage all retrievers without entering
in low-level programming.
As the FeaturesRetriever there are a requisit to have all the retrievers under
this manager, but in this case it is not the output, it is the input.
All of them are required to have the same input size.
"""
import numpy as np
from tools_retriever import create_aggretriever
from pySpatialTools.utils.selectors import Spatial_RetrieverSelector,\
format_selection
from pySpatialTools.utils.neighs_info import join_by_iss
from retrievers import BaseRetriever
inttypes = [int, np.int32, np.int64]
class RetrieverManager:
"""Retreiver of elements given other elements and only considering the
information of the non-retrivable elements.
There are different retriavables objects and different inputs.
See also
--------
pst.FeaturesManagement.FeaturesManager
"""
__name__ = 'pySpatialTools.RetrieverManager'
typeret = 'manager'
def _initialization(self):
"""Mutable globals reset."""
## Elements information
self.k_perturb = 0
self.retrievers = []
self.n_inputs = 0
self.staticneighs = True
self.selector = (0, 0)
self._preferent_ret = 0
def __init__(self, retrievers, selector_retriever=None):
self._initialization()
self._format_retrievers(retrievers)
self._format_selector(selector_retriever)
def __len__(self):
"""Number of retrievers which are in this manager."""
return len(self.retrievers)
def __getitem__(self, i_ret):
"""Get the `i_ret` retriever."""
if i_ret < 0 or i_ret >= len(self.retrievers):
raise IndexError("Not correct index for features.")
return self.retrievers[i_ret]
def set_iter(self, ret_pre):
"""Set preferent retriever."""
self._preferent_ret = ret_pre
def __iter__(self):
"""It assumes preferent retriever 0."""
## If constant selected retriever
if type(self._preferent_ret) == int:
for neighs_info in self[self._preferent_ret]:
yield neighs_info
else:
pass
############################ Retriever functions ##########################
###########################################################################
def _retrieve_neighs_constant(self, i, typeret_i=None):
"""Retrieve neighbourhood under conditions of ifdistance or others
interior parameters. Constant typeret_i assumption.
Parameters
----------
i: int
the element id.
typeret_i: tuple, np.ndarray or None (default)
the selector of the retrievers we want to use.
Returns
-------
neighs_info: pst.Neighs_Info
the neighborhood information.
"""
typeret_i, out_ret = self.get_type_ret(i, typeret_i)
neighs_info =\
self.retrievers[typeret_i].retrieve_neighs(i, output=out_ret)
return neighs_info
def _retrieve_neighs_variable(self, i, typeret_i=None):
"""Retrieve neighbourhood under conditions of ifdistance or others
interior parameters. Variable typeret_i assumption.
Parameters
----------
i: int
the element id.
typeret_i: tuple, np.ndarray or None (default)
the selector of the retrievers we want to use.
Returns
-------
neighs_info: pst.Neighs_Info
the neighborhood information.
"""
typeret_i, out_ret = self.get_type_ret(i, typeret_i)
i = [i] if type(i) in inttypes else i
typeret_i = [typeret_i] if type(typeret_i) != list else typeret_i
out_ret = [out_ret] if type(out_ret) != list else out_ret
neighs = []
for j in range(len(typeret_i)):
neighs_info = self.retrievers[typeret_i[j]].\
retrieve_neighs(i[j], output=out_ret[j])
neighs.append(neighs_info)
neighs_info = join_by_iss(neighs)
return neighs_info
def _retrieve_neighs_general(self, i, typeret_i=None):
"""Retrieve neighbourhood under conditions of ifdistance or others
interior parameters. No typeret_i assumption.
Parameters
----------
i: int
the element id.
typeret_i: tuple, np.ndarray or None (default)
the selector of the retrievers we want to use.
Returns
-------
neighs_info: pst.Neighs_Info
the neighborhood information.
"""
typeret_i, out_ret = self.get_type_ret(i, typeret_i)
if type(typeret_i) == list:
i = [i] if type(i) in inttypes else i
neighs = []
for j in range(len(typeret_i)):
neighs_info = self.retrievers[typeret_i[j]].\
retrieve_neighs(i[j], output=out_ret[j])
neighs.append(neighs_info)
neighs_info = join_by_iss(neighs)
else:
neighs_info =\
self.retrievers[typeret_i].retrieve_neighs(i, output=out_ret)
return neighs_info
def compute_nets(self, kret=None):
"""Compute all the possible relations if there is a common
(homogeneous) ouput.
Parameters
----------
kret: int, list or None (default)
the id of the retrievers we want to get their spatial networks
"""
## Check that match conditions (TODO)
## Format kret
kret = range(len(self.retrievers)) if kret is None else kret
kret = [kret] if type(kret) == int else kret
## Compute
nets = []
for r in kret:
nets.append(self.retrievers[r].compute_neighnets(self.selector))
return nets
######################### Auxiliar administrative #########################
###########################################################################
def add_retrievers(self, retrievers):
"""Add new retrievers.
Parameters
----------
retrievers: list, pst.BaseRetriever
the retrievers we want to input to that manager.
"""
self._format_retrievers(retrievers)
def set_neighs_info(self, bool_input_idx):
"""Setting the neighs info of the retrievers.
Parameters
----------
bool_input_idx: boolean or None
if the input is going to be indices or in the case of false,
the whole spatial information.
"""
for i in range(len(self)):
self.retrievers[i]._format_neighs_info(bool_input_idx)
def set_selector(self, selector):
"""Set a common selector in order to not depend on continous external
orders.
Parameters
----------
selector: tuple, np.ndarray, None or others
the selector information to choose retriever.
"""
self._format_selector(selector)
################################ Formatters ############### | ################
###########################################################################
def _format_retrievers(sel | f, retrievers):
"""Format the retrievers.
Parameters
----------
retrievers: list, pst.BaseRetriever
the retrievers we want to input to that manager.
"""
if type(retrievers) == list:
self.retrievers += retrievers
elif retrievers.__name__ == 'pySpatialTools.BaseRetriever':
self.retrievers.append(retrievers)
elif not(type(retrievers) == list):
raise TypeError("Incorrect type. Not retrievers list.")
## WARNING: By default it is determined by the first retriever
ret_n_inputs = [len(self.retrievers[i]) for i in range(len(self))]
assert(all([len(self.retrievers[0]) == r for r in ret_n_inputs]))
assert(all([self.retrievers[0].k_perturb == r.k_perturb
for r i |
AhmadHamzeei/Amir-Accounting | amir/setting.py | Python | gpl-3.0 | 27,685 | 0.006502 | import gtk
import os
import gobject
import class_subject
import upgrade
import database
import dbconfig
import subjects
from share import share
from helpers import get_builder, comboInsertItems
config = share.config
class Setting(gobject.GObject):
def __init__(self):
gobject.GObject.__init__(self)
self.builder = get_builder("setting")
self.window = self.builder.get_object("window1")
self.filechooser = self.builder.get_object("filechooser")
self.filename = self.builder.get_object("filename")
self.treeview = self.builder.get_object("databases-table")
self.treeview.set_direction(gtk.TEXT_DIR_LTR)
self.liststore = gtk.ListStore(gobject.TYPE_BOOLEAN, str, str)
if gtk.widget_get_default_direction() == gtk.TEXT_DIR_RTL :
halign = 1
else:
halign = 0
crtoggle = gtk.CellRendererToggle()
crtoggle.set_radio(True)
# crtoggle.set_activatable(True)
crtoggle.connect('toggled', self.changeCurrentDb, 0)
column = gtk.TreeViewColumn(_("Current"), crtoggle, active=0)
column.set_alignment(halign)
column.set_spacing(5)
column.set_resizable(True)
self.treeview.append_column(column)
column = gtk.TreeViewColumn(_("Name"), gtk.CellRendererText(), text=1)
column.set_alignment(halign)
column.set_spacing(5)
column.set_resizable(True)
self.treeview.append_column(column)
column = gtk.TreeViewColumn(_("Path"), gtk.CellRendererText(), text=2)
column.set_alignment(halign)
column.set_spacing(5)
column.set_resizable(True)
self.treeview.append_column(column)
self.treeview.set_model(self.liststore)
i = 0
for dbpath in config.dblist:
if i == config.currentdb - 1:
self.active_iter = self.liststore.append((True, config.dbnames[i], dbpath))
else:
self.liststore.append((False, config.dbnames[i], dbpath))
i += 1
# self.olddb = self.builder.get_object("olddb")
# self.newdb = self.builder.get_object("newdb")
self.infolabel = self.builder.get_object("infolabel")
self.infolabel.set_text(config.db.dbfile)
self.langlist = self.builder.get_object("language")
comboInsertItems(self.langlist, config.langlist)
self.langlist.set_active(config.localelist.index(config.locale))
self.dateformat = self.builder.get_object("dateformat")
comboInsertItems(self.dateformat, config.datetypes)
self.dateformat.set_active(config.datetype)
self.delimiter = self.builder.get_object("delimiter")
comboInsertItems(self.delimiter, config.datedelims)
self.delimiter.set_active(config.datedelim)
self.dateorder = self.builder.get_object("dateorder")
comboInsertItems(self.dateorder, [])
for order in config.dateorders:
self.dateorder.append_text(order[0] + " - " + order[1] + " - " + order[2])
self.dateorder.set_active(config.dateorder)
self.uselatin = self.builder.get_object("uselatin")
if config.digittype == 0:
self.uselatin.set_active(True)
else:
self.uselatin.set_active(False)
self.repair_atstart = self.builder.get_object("repair_atstart")
self.repair_atstart.set_active(config.repair_atstart)
self.builder.get_object("topmargin").set_value(config.topmargin)
self.builder.get_object("botmargin").set_value(config.botmargin)
self.builder.get_object("rightmargin").set_value(config.rightmargin)
self.builder.get_object("leftmargin").set_value(config.leftmargin)
self.builder.get_object("namefont").set_value(config.namefont)
self.builder.get_object("headerfont").set_value(config.headerfont)
self.builder.get_object("contentfont").set_value(config.contentfont)
self.builder.get_object("footerfont").set_value(config.footerfont)
paper_size = gtk.paper_size_new_from_ppd(config.paper_ppd, config.paper_name, config.paper_width, config.paper_height)
self.page_setup = gtk.PageSetup()
self.page_setup.set_paper_size(paper_size)
self.page_setup.set_orientation(config.paper_orientation)
self.builder.get_object("papersize").set_text(config.paper_name)
self.setup_config_tab()
self.window.show_all()
self.builder.connect_signals(self)
def changeCurrentDb(self, cell, path, column):
cpath = self.liststore.get_string_from_iter(self.active_iter)
if cpath != path:
iter = self.liststor | e.get_iter_from_string(path)
self.liststore.set(self.active_iter, column, False)
self.liststore.set(iter, column, True)
self.active_iter = iter
def selectDbFile(self, sender):
self.filechooser.set_action(gtk.FILE_CHOOSER_ACTION_OP | EN)
self.filechooser.set_current_folder (os.path.dirname (config.db.dbfile))
result = self.filechooser.run()
if result == gtk.RESPONSE_OK:
self.filename.set_text(self.filechooser.get_filename())
self.filechooser.hide()
# def selectOldDatabase(self, sender):
# self.filechooser.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
# result = self.filechooser.run()
# if result == gtk.RESPONSE_OK:
# self.olddb.set_text(self.filechooser.get_filename())
# self.filechooser.hide()
#
# def selectNewDatabase(self, sender):
# self.filechooser.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
# result = self.filechooser.run()
# if result == gtk.RESPONSE_OK:
# self.newdb.set_text(self.filechooser.get_filename())
# self.filechooser.hide()
def addDatabase(self, sender):
dialog = self.builder.get_object("dialog1")
dialog.set_title(_("Add Database"))
self.filename.set_text("")
result = dialog.run()
if result == 1 :
dbfile = self.filename.get_text()
if dbfile != "":
msg = ""
result = upgrade.checkInputDb(dbfile)
if result == -2:
msg = _("Can not connect to the database. The selected database file may not be a sqlite database or be corrupt.")
elif result == 0:
msg = _("The selected file is compatible with older versions of Amir. First convert it to the new version.")
if msg != "":
msgbox = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, msg)
msgbox.set_title(_("Error opening new database"))
msgbox.run()
msgbox.destroy()
else :
self.liststore.append((False, self.builder.get_object("dbname").get_text(), dbfile))
dialog.hide()
def removeDatabase(self, sender):
selection = self.treeview.get_selection()
iter = selection.get_selected()[1]
if iter != None :
msgbox = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL,
_("Are you sure to remove this database from the list?"))
msgbox.set_title(_("Are you sure?"))
result = msgbox.run()
msgbox.destroy()
if result == gtk.RESPONSE_OK :
self.liststore.remove(iter)
iter = self.liststore.get_iter_first()
if iter == None:
dbfile = os.path.join(os.path.expanduser('~'), '.amir', 'amir.sqlite')
dbname = 'amir.sqlite'
msgbox = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
_("All databases removed.\nThe default database will be opened f |
PHSCRC/boxbot | hardware/distance.py | Python | mit | 1,344 | 0.00372 |
import os, math
try:
from .adc import *
except SystemError:
from adc import *
from bisect import bisect_right, bisect_left
class InterpolatedDistance(ADC4):
def __init__(self, profiles, ad | dr, **kwargs):
super().__init__(addr, fn="distance", **kwargs)
self.__mm = []
self.__v = []
| for i in profiles:
i.sort(key=lambda x: -x[0])
repacked = tuple(zip(*i))
self.__mm.append(repacked[0])
self.__v.append(repacked[1])
def process_reading(self, reading, pin):
v = super().process_reading(reading, pin)
try:
v = v + 0.6255 * math.log(v) + 0.5249
except ValueError:
pass
index = bisect_right(self.__v[pin], v)
mm = self.__mm[pin]
return mm[min(max(index, 0), len(mm) - 1)] / 10
@classmethod
def from_files(cls, addr, *args, base_path=None, **kwargs):
profiles = []
for fn in args:
fd = open(os.path.join(base_path,fn) if base_path else fn)
lines = fd.readlines()
fd.close()
data = []
for i in lines:
line = i.strip().split(",")
data.append((int(line[0]), float(line[1])))
profiles.append(data)
return cls(profiles, addr, **kwargs)
|
WUJISHANXIA/wujishanxia | bootcamp/activities/models.py | Python | mit | 8,027 | 0 | from __future__ import unicode_literals
import json
from django.db.models.functions import TruncMonth, TruncDay
from django.db.models import Count
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import escape
@python_2_unicode_compatible
class Activity(models.Model):
FAVORITE = 'F'
LIKE = 'L'
UP_VOTE = 'U'
DOWN_VOTE = 'D'
ACTIVITY_TYPES = (
(FAVORITE, 'Favorite'),
(LIKE, 'Like'),
(UP_VOTE, 'Up Vote'),
(DOWN_VOTE, 'Down Vote'),
)
user = models.ForeignKey(User)
activity_type = models.CharField(max_length=1, choices=ACTIVITY_TYPES)
date = models.DateTimeField(auto_now_add=True)
feed = models.IntegerField(null=True, blank=True)
question = models.IntegerField(null=True, blank=True)
answer = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = 'Activity'
verbose_name_plural = 'Activities'
@staticmethod
def monthly_activity(user):
"""Static method to retrieve monthly statistical information about the
user activity.
@requires: user - Instance from the User Django model.
@returns: Two JSON arrays, the first one is dates which contains all
the dates with activity records, and the second one is
datapoints containing the sum of all the activity than had
place in every single month.
Both arrays keep the same order, so there is no need to order them.
"""
# months = ["Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio",
# "Julio", "Agosto", "Septiembre", "Octubre", "Noviembre", "Diciembre"]
query = Activity.objects.filter(user=user).annotate(
month=TruncMonth('date')).values('month').annotate(
c=Count('id')).values('month', 'c')
try:
dates, datapoints = zip(
*[[a['c'], str(a['month'].date())] for a in query])
return json.dumps(dates), json.dumps(datapoints)
except ValueError:
return json.dumps(0), json.dumps(0)
@staticmethod
def daily_activity(user):
"""Static method to retrieve daily statistical information about the
user activity.
@requires: user - Instance from the User Django model.
@returns: Two JSON arrays, the first one is dates which contains all
the dates with activity records, and the second one is
datapoints containing the sum of all the activity than had
place in every single day.
Both arrays keep the same order, so there is no need to order them.
"""
query = Activity.objects.filter(user=user).annotate(day=TruncDay(
'date')).values('day').annotate(c=Count('id')).values('day', 'c')
try:
dates, datapoints = zip(
*[[a['c'], str(a['day'].date())] for a in query])
return json.dumps(dates), json.dumps(datapoints)
except ValueError:
return json.dumps(0), json.dumps(0)
def __str__(self):
return self.activity_type
@python_2_unicode_compatible
class Notification(models.Model):
LIKED = 'L'
COMMENTED = 'C'
FAVORITED = 'F'
ANSWERED = 'A'
ACCEPTED_ANSWER = 'W'
EDITED_ARTICLE = 'E'
ALSO_COMMENTED = 'S'
NOTIFICATION_TYPES = (
(LIKED, 'Liked'),
(COMMENTED, 'Commented'),
(FAVORITED, 'Favorited'),
(ANSWERED, 'Answered'),
(ACCEPTED_ANSWER, 'Accepted Answer'),
(EDITED_ARTICLE, 'Edited Article'),
(ALSO_COMMENTED, 'Also Commented'),
)
_LIKED_TEMPLATE = '<a href="/{0}/">{1}</a> liked your post: <a href="/feeds/{2}/">{3}</a>' # noqa: E501
_COMMENTED_TEMPLATE = '<a href="/{0}/">{1}</a> commented on your post: <a href="/feeds/{2}/">{3}</a>' # noqa: E501
_FAVORITED_TEMPLATE = '<a href="/{0}/">{1}</a> favorited your question: <a href="/questions/{2}/">{3}</a>' # noqa: E501
_ANSWERED_TEMPLATE = '<a href="/{0}/">{1}</a> answered your question: <a href="/questions/{2}/">{3}</a>' # noqa: E501
_ACCEPTED_ANSWER_TEMPLATE = '<a href="/{0}/">{1}</a> accepted your answer: <a href="/questions/{2}/">{3}</a>' # noqa: E501
_EDITED_ARTICLE_TEMPLATE = '<a href="/{0}/">{1}</a> edited your article: <a href="/article/{2}/">{3}</a>' # noqa: E501
_ALSO_COMMENTED_TEMPLATE = '<a href="/{0}/">{1}</a> also commentend on the post: <a href="/feeds/{2}/">{3}</a>' # noqa: E501
from_user = models.ForeignKey(User, related_name='+')
to_user = models.ForeignKey(User, related_name='+')
date = models.DateTimeField(auto_now_add=True)
feed = models.ForeignKey('feeds.Feed', null=True, blank=True)
question = models.ForeignKey('questions.Question', null=True, blank=True)
answer = models.ForeignKey('questions.Answer', null=True, blank=True)
article = models.ForeignKey('articles.Article', null=True, blank=True)
notification_type = models.CharField(max_length=1,
choices=NOTIFICATION_TYPES)
is_read = models.BooleanField(default=False)
class Meta:
verbose_name = 'Notification'
verbose_name_plural = 'Notifications'
ordering = ('-date',)
def __str__(self):
if self.notification_type == self.LIKED:
return self._LIKED_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
self.feed.pk,
escape(self.get_summary(self.feed.post))
)
elif self.notification_type == self.COMMENTED:
return self._COMMENTED_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
self.feed.pk,
escape(self.get_summary(self.feed.post))
)
elif self.notification_type == self.FAVORITED:
return self._FAVORITED_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
self.question.pk,
escape(self.get_summary(self.question.title))
)
elif self.notification_type == self.ANSWERED:
return self._ANSWERED_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()), |
self.question.pk,
escape(self.get_summary(self.question.title))
)
elif self.notification_type == self.ACCEPTED_ANSWER:
return self._ACCEPTED_ANSWER_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
| self.answer.question.pk,
escape(self.get_summary(self.answer.description))
)
elif self.notification_type == self.EDITED_ARTICLE:
return self._EDITED_ARTICLE_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
self.article.slug,
escape(self.get_summary(self.article.title))
)
elif self.notification_type == self.ALSO_COMMENTED:
return self._ALSO_COMMENTED_TEMPLATE.format(
escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
self.feed.pk,
escape(self.get_summary(self.feed.post))
)
else:
return 'Ooops! Something went wrong.'
def get_summary(self, value):
summary_size = 50
if len(value) > summary_size:
return '{0}...'.format(value[:summary_size])
else:
return value
|
tsdmgz/ansible | test/runner/lib/sanity/rstcheck.py | Python | gpl-3.0 | 2,126 | 0.002352 | """Sanity test using rstcheck."""
from __future__ import absolute_import, print_function
import os
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
parse_to_dict,
find_executable,
)
from lib.config import (
SanityConfig,
)
class RstcheckTest | (SanitySingleVersion):
"""Sanity test using rstcheck."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: SanityResult
"""
with open('test/sanity/rstcheck/ignore-substitutions.txt', 'r') as ignore_fd:
ignore_substitutions = sorted(set(ignore_fd.read().splitlines()))
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ( | '.rst',))
if not paths:
return SanitySkipped(self.name)
cmd = [
'python%s' % args.python_version,
find_executable('rstcheck'),
'--report', 'warning',
'--ignore-substitutions', ','.join(ignore_substitutions),
] + paths
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stdout:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+): \((?P<level>INFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P<message>.*)$'
results = [parse_to_dict(pattern, line) for line in stderr.splitlines()]
results = [SanityMessage(
message=r['message'],
path=r['path'],
line=int(r['line']),
column=0,
level=r['level'],
) for r in results]
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
|
siye1982/kafkey | app/main/__init__.py | Python | bsd-2-clause | 848 | 0.001238 | # -*- coding:utf-8 -*-
import socket
# 判断是否是ip:port
def | is_ip_port(ip_port):
ip_port_arr = ip_port.split(':')
if len(ip_port_arr) != 2 or isinstance(ip_port_arr[1], int):
return False
return is_internal_ip(ip_port_arr[0])
# 判断是否是内网ip
def is_internal_ip(ip):
if not check_ip(ip):
return False
ip = ip_into_int(ip)
| net_a = ip_into_int('10.255.255.255') >> 24
net_b = ip_into_int('172.31.255.255') >> 20
net_c = ip_into_int('192.168.255.255') >> 16
return ip >> 24 == net_a or ip >> 20 == net_b or ip >> 16 == net_c
# 将ip转成int
def ip_into_int(ip):
return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.')))
# 判断是否是ip
def check_ip(ip):
try:
socket.inet_aton(ip)
return True
except socket.error:
return False
|
nirizr/rematch | server/collab/migrations/0002_auto_20181104_0545.py | Python | gpl-3.0 | 713 | 0.002805 | # Generated by Django 2.1.2 on 2018-11-04 05:45
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('collab', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='instance',
name='created',
field=model | s.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
| migrations.AddField(
model_name='match',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
luiscape/hdx-monitor-sql-collect | app/functions/manage_queue.py | Python | mit | 1,180 | 0.010169 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Function to manage the Redis Queue. In summary
this function either
(a) starts a new queue if no jobs are pending
(b) fetches statistics from a running queue
(c) cancels a queue and starts again
This function was designed to be called directly
by the routes so that the final user knows what
is the current status of the running queues.
'''
import os
import flask
from rq import Queue
from redis import Redis
REDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR')
def getStatus(queue_id='default'):
| '''
Gets status of queue.
'''
queue = Queue(connection=Redis(host=REDIS_HOST), name=queue_id)
result = {
'message': 'Queue `{name}` has {n} jobs that are being processed.'.format(name=queue_id, n=queue.count),
'empty': queue.is_empty(),
'count': queue.count,
'ids': queue.get_job_ids()
}
return result
def cleanQueue(queue_id='default'):
'''
Cleans specified queue.
'''
queue = Queue(connection=Redis(), name=queue_id)
re | sult = {
'success': True,
'message': 'Cleaned {n} jobs successfully from `{name}`.'.format(n=queue.empty(), name=queue_id)
}
return result
|
c3cashdesk/c6sh | src/postix/core/migrations/0057_itemsupplypack_itemsupplypacklog.py | Python | agpl-3.0 | 2,025 | 0.003951 | # Generated by Django 2.1.4 on 2018-12-20 13:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0056_auto_20181207_1404'),
]
operations = [
migrations.CreateModel(
name='ItemSupplyPack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=190, unique=True)),
('amount', models.IntegerField(default=50)),
('state', models.CharField(choices=[('backoffice', 'In backoffice'), ('troubleshooter', 'With troubleshooter'), ('dissolved', 'Dissolved for other reasons'), ('used', 'Used to refill cash session')], max_length=190)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='supply_packs', to='core.Item')),
],
),
migrations.CreateModel(
name='ItemSupplyPackLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_state', models.CharField(choices=[('backoffice', 'In backoffice'), ('troubleshooter', 'With troubleshooter'), ('dissolved', 'Dissolved for other reasons'), ('used', 'Used to refill cash session')], max_length=190)),
('item_movement', models.F | oreignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='core.ItemMovement', verbose_name='Associated item movement')),
('supply_pack', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='logs', to='core.ItemSupplyPack')) | ,
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='item_supply_logs', to=settings.AUTH_USER_MODEL, verbose_name='User issuing movement')),
],
),
]
|
spulec/moto | moto/dynamodb2/models/dynamo_type.py | Python | apache-2.0 | 8,167 | 0.002204 | from moto.dynamodb2.comparisons import get_comparison_func
from moto.dynamodb2.exceptions import IncorrectDataType
from moto.dynamodb2.models.utilities import bytesize
class DDBType(object):
"""
Official documentation at https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html
"""
BINARY_SET = "BS"
NUMBER_SET = "NS"
STRING_SET = "SS"
STRING = "S"
NUMBER = "N"
MAP = "M"
LIST = "L"
BOOLEAN = "BOOL"
BINARY = "B"
NULL = "NULL"
class DDBTypeConversion(object):
_human_type_mapping = {
val: key.replace("_", " ")
for key, val in DDBType.__dict__.items()
| if key.upper() == key
}
@classmethod
def get_human_type(cls, abbreviated_type):
"""
Args:
abbreviated_type(str): An attribute of DDBType
Returns:
str: The human readable form of the DDBType.
"""
return cls._human_type_mapping.get(abbreviated_type, abbreviated_ | type)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
if type(type_as_dict) == DynamoType:
self.type = type_as_dict.type
self.value = type_as_dict.value
else:
self.type = list(type_as_dict)[0]
self.value = list(type_as_dict.values())[0]
if self.is_list():
self.value = [DynamoType(val) for val in self.value]
elif self.is_map():
self.value = dict((k, DynamoType(v)) for k, v in self.value.items())
def filter(self, projection_expressions):
nested_projections = [
expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr
]
if self.is_map():
expressions_to_delete = []
for attr in self.value:
if (
attr not in projection_expressions
and attr not in nested_projections
):
expressions_to_delete.append(attr)
elif attr in nested_projections:
relevant_expressions = [
expr[len(attr + ".") :]
for expr in projection_expressions
if expr.startswith(attr + ".")
]
self.value[attr].filter(relevant_expressions)
for expr in expressions_to_delete:
self.value.pop(expr)
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __ne__(self, other):
return self.type != other.type or self.value != other.value
def __lt__(self, other):
return self.cast_value < other.cast_value
def __le__(self, other):
return self.cast_value <= other.cast_value
def __gt__(self, other):
return self.cast_value > other.cast_value
def __ge__(self, other):
return self.cast_value >= other.cast_value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
def __add__(self, other):
if self.type != other.type:
raise TypeError("Different types of operandi is not allowed.")
if self.is_number():
self_value = float(self.value) if "." in self.value else int(self.value)
other_value = float(other.value) if "." in other.value else int(other.value)
return DynamoType(
{DDBType.NUMBER: "{v}".format(v=self_value + other_value)}
)
else:
raise IncorrectDataType()
def __sub__(self, other):
if self.type != other.type:
raise TypeError("Different types of operandi is not allowed.")
if self.type == DDBType.NUMBER:
self_value = float(self.value) if "." in self.value else int(self.value)
other_value = float(other.value) if "." in other.value else int(other.value)
return DynamoType(
{DDBType.NUMBER: "{v}".format(v=self_value - other_value)}
)
else:
raise TypeError("Sum only supported for Numbers.")
def __getitem__(self, item):
if isinstance(item, str):
# If our DynamoType is a map it should be subscriptable with a key
if self.type == DDBType.MAP:
return self.value[item]
elif isinstance(item, int):
# If our DynamoType is a list is should be subscriptable with an index
if self.type == DDBType.LIST:
return self.value[item]
raise TypeError(
"This DynamoType {dt} is not subscriptable by a {it}".format(
dt=self.type, it=type(item)
)
)
def __setitem__(self, key, value):
if isinstance(key, int):
if self.is_list():
if key >= len(self.value):
# DynamoDB doesn't care you are out of box just add it to the end.
self.value.append(value)
else:
self.value[key] = value
elif isinstance(key, str):
if self.is_map():
self.value[key] = value
else:
raise NotImplementedError("No set_item for {t}".format(t=type(key)))
@property
def cast_value(self):
if self.is_number():
try:
return int(self.value)
except ValueError:
return float(self.value)
elif self.is_set():
sub_type = self.type[0]
return set([DynamoType({sub_type: v}).cast_value for v in self.value])
elif self.is_list():
return [DynamoType(v).cast_value for v in self.value]
elif self.is_map():
return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()])
else:
return self.value
def child_attr(self, key):
"""
Get Map or List children by key. str for Map, int for List.
Returns DynamoType or None.
"""
if isinstance(key, str) and self.is_map():
if key in self.value:
return DynamoType(self.value[key])
if isinstance(key, int) and self.is_list():
idx = key
if 0 <= idx < len(self.value):
return DynamoType(self.value[idx])
return None
def size(self):
if self.is_number():
value_size = len(str(self.value))
elif self.is_set():
sub_type = self.type[0]
value_size = sum([DynamoType({sub_type: v}).size() for v in self.value])
elif self.is_list():
value_size = sum([v.size() for v in self.value])
elif self.is_map():
value_size = sum(
[bytesize(k) + DynamoType(v).size() for k, v in self.value.items()]
)
elif type(self.value) == bool:
value_size = 1
else:
value_size = bytesize(self.value)
return value_size
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values)
def is_number(self):
return self.type == DDBType.NUMBER
def is_set(self):
return self.type in (DDBType.STRING_SET, DDBType.NUMBER_SET, DDBType.BINARY_SET)
def is_list(self):
return self.type == DDBType.LIST
def is_map(self):
return self.type == DDBType.MAP
def same_type(self, other):
return self.type == other.type
def pop(self, key, *args, **kwargs):
if self.is_map() or self.is_list():
self.value.pop(key, *args, **kwargs)
else:
raise TypeError("pop not supported for DynamoType {t}".format(t=self.type))
|
ctgk/BayesianNetwork | test/math/test_divide.py | Python | mit | 637 | 0 | import unittest
import numpy as np
import bayesnet as bn
class TestDivide(unittest.TestCase):
def test_divide(self):
x = bn.Parame | ter(10.)
z = x / 2
self.assertEqual(z.value, 5)
z.backward()
self.assertEqual(x.grad, 0.5)
x = np.random.rand(5, 10, 3)
y = np.random.rand(10, 1)
p = bn.Parameter(y | )
z = x / p
self.assertTrue((z.value == x / y).all())
z.backward(np.ones((5, 10, 3)))
d = np.sum(-x / y ** 2, axis=0).sum(axis=1, keepdims=True)
self.assertTrue((p.grad == d).all())
if __name__ == '__main__':
unittest.main()
|
mwcraig/conda-build | conda_build/jinja_context.py | Python | bsd-3-clause | 1,276 | 0.004702 | '''
Created on Jan 16, 2014
@author: sean
'''
from __ | future__ import absolute_import, division, print_function
import json
import os
from conda.compat import PY3
from .environ import get_dict as get_environ
_setuptools_data = None
def load_setuptools(setup_file='setup.py'):
global _setuptools_data
if _setuptools_data is None:
_setuptools_data = {}
def setup(**kw):
_setuptools_data.update(kw)
import setuptools
#Add current directory to path
import sys
sys | .path.append('.')
#Patch setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup
exec(open(setup_file).read())
setuptools.setup = setuptools_setup
del sys.path[-1]
return _setuptools_data
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor():
ctx = get_environ()
environ = dict(os.environ)
environ.update(get_environ())
ctx.update(load_setuptools=load_setuptools,
load_npm=load_npm,
environ=environ)
return ctx
|
Valeureux/wezer-exchange | __unreviewed__/website_community_template/__openerp__.py | Python | agpl-3.0 | 1,583 | 0 | # -*- coding: utf-8 -*-
#
#
# Website Marketplace
# Copyright (C) 2014 Valeureux Group (<http://Valeureux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Website Community Template',
'category': 'Website',
'version': '1.0',
'description': """
""",
'author': 'Valeureux for Wezer.org',
'depends': [
| 'auth_signup',
'website',
'website_marketplace',
'website_blog'
],
'data': [
'views/assets.xml',
'views/snippets.xml',
'views/templates/404.xml',
'views/templates/layout.xml',
'views/templates/login.xml',
'views/templates/login_layout.xml',
'views/templates/reset_password.xml',
'views/templates/signup.xml',
'views/templates/signup_fields.xml',
'views/templates/register_template.xml',
'views/templates/blog_template.xml',
],
'installable': True
}
|
metacloud/molecule | test/unit/command/init/test_role.py | Python | mit | 5,090 | 0 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this per | mission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
from molecule.command.init import role
@pytest.fixture
def _command_args():
return {
'dependency_name': 'galaxy',
'driver_name': 'docker',
'lint_name': 'ansible-lint',
'provisioner_name': 'ansible',
'role_name': 'test-role',
'scenario_name': 'default',
'subcommand': __name__,
'verifier_name': 'testinfra'
}
@pytest.fixture
def _instance(_command_args):
return role.Role(_command_args)
@pytest.fixture
def _resources_folder_path():
resources_folder_path = os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
'resources')
return resources_folder_path
@pytest.fixture
def custom_template_dir(_resources_folder_path):
custom_template_dir_path = os.path.join(_resources_folder_path,
'custom_role_template')
return custom_template_dir_path
@pytest.fixture
def invalid_template_dir(_resources_folder_path):
invalid_role_template_path = os.path.join(_resources_folder_path,
'invalid_role_template')
return invalid_role_template_path
@pytest.fixture
def custom_readme_content(custom_template_dir):
readme_path = os.path.join(custom_template_dir,
'{{cookiecutter.role_name}}', 'README.md')
custom_readme_content = ""
with open(readme_path, 'r') as readme:
custom_readme_content = readme.read()
return custom_readme_content
def test_execute(temp_dir, _instance, patched_logger_info,
patched_logger_success):
_instance.execute()
msg = 'Initializing new role test-role...'
patched_logger_info.assert_called_once_with(msg)
assert os.path.isdir('./test-role')
assert os.path.isdir('./test-role/molecule/default')
assert os.path.isdir('./test-role/molecule/default/tests')
role_directory = os.path.join(temp_dir.strpath, 'test-role')
msg = 'Initialized role in {} successfully.'.format(role_directory)
patched_logger_success.assert_called_once_with(msg)
def test_execute_role_exists(temp_dir, _instance, patched_logger_critical):
_instance.execute()
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
msg = 'The directory test-role exists. Cannot create new role.'
patched_logger_critical.assert_called_once_with(msg)
def test_execute_with_custom_template(temp_dir, custom_template_dir,
custom_readme_content, _command_args):
_command_args['template'] = custom_template_dir
custom_template_instance = role.Role(_command_args)
custom_template_instance.execute()
readme_path = './test-role/README.md'
assert os.path.isfile(readme_path)
with open(readme_path, 'r') as readme:
assert readme.read() == custom_readme_content
assert os.path.isdir('./test-role/molecule/default')
assert os.path.isdir('./test-role/molecule/default/tests')
def test_execute_with_absent_template(temp_dir, _command_args,
patched_logger_critical):
incorrect_path = os.path.join("absent_template_dir")
_command_args['template'] = incorrect_path
absent_template_instance = role.Role(_command_args)
with pytest.raises(SystemExit) as e:
absent_template_instance.execute()
assert e.value.code == 1
patched_logger_critical.assert_called_once()
def test_execute_with_incorrect_template(temp_dir, invalid_template_dir,
_command_args,
patched_logger_critical):
_command_args['template'] = invalid_template_dir
invalid_template_instance = role.Role(_command_args)
with pytest.raises(SystemExit) as e:
invalid_template_instance.execute()
assert e.value.code == 1
patched_logger_critical.assert_called_once()
|
HERA-Team/librarian | alembic/env.py | Python | bsd-2-clause | 1,321 | 0.002271 | # -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD License.
"""This script is some boilerplate needed by Alembic to do its fancy database
migration stuff.
"""
# A hack so that we can get the librarian_server module.
import sys
sys.path.insert(0, '.')
from alembic import context
from logging.config import fileConfig
config = context.config
fileConfig(config.config_file_name)
from librarian_server import app, db
target_metadata = db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode -- all we need is a URL | .
"""
url = app.config['SQLALCHEMY_DATABASE_URI']
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode -- using the actual Librarian database
connection.
"""
with db.engine.connect() | as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
CSC-IT-Center-for-Science/pouta-blueprints | migrations/versions/37s5rj356547_.py | Python | mit | 611 | 0.00982 | """empty message
Revision ID: 37s5rj356547
Revises: j4d9g3scvf4s
Create Date: 2019-02-20 16:45:34.186981
"""
# revision identifiers, used by Alembic.
revision = '37s5rj356547'
down_revision = 'j4d9g3scvf4s'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('group_quota', sa.Float(), nullable=True))
### end Alembic commands ###
def downgrade(): |
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'group_quota')
| ### end Alembic commands ###
|
deadbok/wp-passwd-reset-vestacp | change-wp-conf-secrets.py | Python | mit | 4,552 | 0.026588 | # -*- coding: utf-8 -*-
"""
Update the secrets in a Wordpress configuration file.
MIT License
Copyright (c) 2016 Martin Bo Kristensen Grønholdt
Permission is her | eby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be inc | luded in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
"""
from __future__ import print_function
import glob
import argparse
import os.path
import base64
import M2Crypto
def print_welcome():
"""
Print welcome message.
"""
print("Changing WordPress password and salt.")
def change_value(value, config, start):
"""
Change a config value with define PHP syntax.
"""
for i in range(0, len(config)):
line = config[i].replace(' ', '');
line = line.replace('\t', '');
if line.startswith(start):
config[i] = line[:len(start) - 1] + " '" + value + "');\n"
print('.', end='')
return
print('e', end='')
exit('Error changing value for: ' + start)
def generate_salt(length = 64):
return base64.b64encode(M2Crypto.m2.rand_bytes(length))
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("config_file", type=argparse.FileType('rw+'),
help="Path the WordPress configuration file to change the password and salts for.")
arg_parser.add_argument("-n", "--name", dest="name", default=None,
help="The WordPress database name.")
arg_parser.add_argument("-u", "--user", dest="user", default=None,
help="The WordPress database user.")
arg_parser.add_argument("-p", "--pass", dest="passwd", default=None,
help="The WordPress database password.")
arg_parser.add_argument("-s", "--salts", action="store_true", default=False,
help="Change the WordPress salts.")
arg_parser.add_argument("-b", "--backup", action="store_true", default=False,
help="Backup the current WordPress configuration file.")
args = arg_parser.parse_args()
print_welcome()
if args.config_file is None:
exit('Error opening configuration file.')
config = args.config_file.readlines()
if (len(config) == 0):
exit('Error, empty configuration file.')
try:
if (args.backup):
filename = args.config_file.name + '.bak'
with open(filename, 'w') as backup:
backup.write(''.join(config))
backup.close()
print('Original WordPress config saved as: ' + filename)
except Exception as ex:
exit('Error creating backup: ' + str(ex))
if args.user is not None:
if args.user != '':
print('Changing user: "' + args.user + '"', end='')
change_value(args.user, config, "define('DB_USER','")
print
if args.passwd is not None:
if (args.passwd != ''):
print('Changing password: "' + args.passwd + '"', end='')
change_value(args.passwd, config, "define('DB_PASSWORD','")
print('')
if args.name is not None:
if (args.name != ''):
print('Database name: "' + args.name + '"', end='')
change_value(args.name, config, "define('DB_NAME','")
print
if args.salts is not None:
if (args.salts):
print('Changing salts.', end='')
change_value(generate_salt(), config, "define('AUTH_KEY','")
change_value(generate_salt(), config, "define('SECURE_AUTH_KEY','")
change_value(generate_salt(), config, "define('LOGGED_IN_KEY','")
change_value(generate_salt(), config, "define('NONCE_KEY','")
change_value(generate_salt(), config, "define('AUTH_SALT','")
change_value(generate_salt(), config, "define('SECURE_AUTH_SALT','")
change_value(generate_salt(), config, "define('LOGGED_IN_SALT','")
change_value(generate_salt(), config, "define('NONCE_SALT','")
print
print('done')
args.config_file.seek(0)
args.config_file.write(''.join(config))
args.config_file.close()
if __name__ == '__main__':
main()
|
gnufede/tddgoat | tddgoat/lists/models.py | Python | mit | 87 | 0.022989 | from django.db import models
|
class Item(models.Model):
text = models.TextField() | |
jcu-eresearch/jcu.dc24.ingesterapi | jcudc24ingesterapi/schemas/tests.py | Python | bsd-3-clause | 962 | 0.006237 | import unittest
from jcudc24ingesterapi.schemas.metadata_schemas import DataEntryMetadataSchema
from jcudc24ingesterapi.schemas.data_types import *
class TestSchemas(unittest.TestCase):
def test_valid_schemas(self):
"""Test that the schema creation APIs properly validate the schemas
as they are constructed.
"""
good_schema = DataE | ntryMetadataSchema()
good_schema.addAttr(Double("attr1"))
good_schema.addAttr(String("attr2"))
bad_schema = DataEntryMetadataSchema()
self.assertRaises(ValueError, bad_schema.addAttr, str)
def test_data_types(self):
pass
class TestDataTypes(unittest.TestCase):
def test_names(self):
self.assertRaises(Value | Error, Double, "")
self.assertRaises(ValueError, Double, "dfkj dfjk")
self.assertRaises(ValueError, Double, "1dfkjdfjk")
Double("validname")
if __name__ == '__main__':
unittest.main() |
hatwar/buyback-erpnext | erpnext/setup/page/setup_wizard/setup_wizard.py | Python | agpl-3.0 | 17,916 | 0.029917 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json, copy
from frappe.utils import cstr, flt, getdate, strip
from frappe import _
from frappe.utils.file_manager import save_file
from frappe.translate import (set_default_language, get_dict,
get_lang_dict, send_translations, get_language_from_code)
from frappe.geo.country_info import get_country_info
from frappe.utils.nestedset import get_root_of
from .default_website import website_maker
import install_fixtures
from .sample_data import make_sample_data
from erpnext.accounts.utils import FiscalYearError
from erpnext.accounts.doctype.account.account import RootNotEditable
@frappe.whitelist()
def setup_account(args=None):
try:
if frappe.db.sql("select name from tabCompany"):
frappe.throw(_("Setup Already Complete!!"))
args = process_args(args)
if args.language and args.language != "english":
set_default_language(args.language)
frappe.clear_cache()
install_fixtures.install(args.get("country"))
update_user_name(args)
frappe.local.message_log = []
create_fiscal_year_and_company(args)
frappe.local.message_log = []
create_users(args)
frappe.local.message_log = []
set_defaults(args)
frappe.local.message_log = []
create_territories()
frappe.local.message_log = []
create_price_lists(args)
frappe.local.message_log = []
create_feed_and_todo()
frappe.local.message_log = []
create_email_digest()
frappe.local.message_log = []
create_letter_head(args)
frappe.local.message_log = []
create_taxes(args)
frappe.local.message_log = []
create_items(args)
frappe.local.message_log = []
create_customers(args)
frappe.local.message_log = []
create_suppliers(args)
frappe.local.message_log = []
frappe.db.set_default('desktop:home_page', 'desktop')
website_maker(args.company_name.strip(), args.company_tagline, args.name)
create_logo(args)
frappe.db.commit()
login_as_first_user(args)
frappe.db.commit()
frappe.clear_cache()
if args.get("add_sample_data"):
try:
make_sample_data()
frappe.clear_cache()
except FiscalYearError:
pass
except:
if args:
traceback = frappe.get_traceback()
for hook in frappe.get_hooks("setup_wizard_exception"):
frappe.get_attr(hook)(traceback, args)
raise
else:
for hook in frappe.get_hooks("setup_wizard_success"):
frappe.get_attr(hook)(args)
def process_args(args):
if not args:
args = frappe.local.form_dict
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
# strip the whitespace
for key, value in args.items():
| if isinstance(value, basestring):
args[key] = strip(value)
return args
def update_user_name(args):
if args.get("email"):
ar | gs['name'] = args.get("email")
_mute_emails, frappe.flags.mute_emails = frappe.flags.mute_emails, True
doc = frappe.get_doc({
"doctype":"User",
"email": args.get("email"),
"first_name": args.get("first_name"),
"last_name": args.get("last_name")
})
doc.flags.no_welcome_mail = True
doc.insert()
frappe.flags.mute_emails = _mute_emails
from frappe.auth import _update_password
_update_password(args.get("email"), args.get("password"))
else:
args['name'] = frappe.session.user
# Update User
if not args.get('last_name') or args.get('last_name')=='None':
args['last_name'] = None
frappe.db.sql("""update `tabUser` SET first_name=%(first_name)s,
last_name=%(last_name)s WHERE name=%(name)s""", args)
if args.get("attach_user"):
attach_user = args.get("attach_user").split(",")
if len(attach_user)==3:
filename, filetype, content = attach_user
fileurl = save_file(filename, content, "User", args.get("name"), decode=True).file_url
frappe.db.set_value("User", args.get("name"), "user_image", fileurl)
add_all_roles_to(args.get("name"))
def create_fiscal_year_and_company(args):
curr_fiscal_year = get_fy_details(args.get('fy_start_date'), args.get('fy_end_date'))
frappe.get_doc({
"doctype":"Fiscal Year",
'year': curr_fiscal_year,
'year_start_date': args.get('fy_start_date'),
'year_end_date': args.get('fy_end_date'),
}).insert()
# Company
frappe.get_doc({
"doctype":"Company",
'domain': args.get("industry"),
'company_name':args.get('company_name').strip(),
'abbr':args.get('company_abbr'),
'default_currency':args.get('currency'),
'country': args.get('country'),
'chart_of_accounts': args.get(('chart_of_accounts')),
}).insert()
# Bank Account
args["curr_fiscal_year"] = curr_fiscal_year
def create_price_lists(args):
for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))):
frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": 1 if pl_type == "Buying" else 0,
"selling": 1 if pl_type == "Selling" else 0,
"currency": args["currency"]
}).insert()
def set_defaults(args):
# enable default currency
frappe.db.set_value("Currency", args.get("currency"), "enabled", 1)
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.update({
'current_fiscal_year': args.curr_fiscal_year,
'default_currency': args.get('currency'),
'default_company':args.get('company_name').strip(),
"country": args.get("country"),
})
global_defaults.save()
number_format = get_country_info(args.get("country")).get("number_format", "#,###.##")
# replace these as float number formats, as they have 0 precision
# and are currency number formats and not for floats
if number_format=="#.###":
number_format = "#.###,##"
elif number_format=="#,###":
number_format = "#,###.##"
system_settings = frappe.get_doc("System Settings", "System Settings")
system_settings.update({
"language": args.get("language"),
"time_zone": args.get("timezone"),
"float_precision": 3,
"email_footer_address": args.get("company"),
'date_format': frappe.db.get_value("Country", args.get("country"), "date_format"),
'number_format': number_format,
'enable_scheduler': 1 if not frappe.flags.in_test else 0
})
system_settings.save()
accounts_settings = frappe.get_doc("Accounts Settings")
accounts_settings.auto_accounting_for_stock = 1
accounts_settings.save()
stock_settings = frappe.get_doc("Stock Settings")
stock_settings.item_naming_by = "Item Code"
stock_settings.valuation_method = "FIFO"
stock_settings.stock_uom = _("Nos")
stock_settings.auto_indent = 1
stock_settings.auto_insert_price_list_rate_if_missing = 1
stock_settings.automatically_set_serial_nos_based_on_fifo = 1
stock_settings.save()
selling_settings = frappe.get_doc("Selling Settings")
selling_settings.cust_master_name = "Customer Name"
selling_settings.so_required = "No"
selling_settings.dn_required = "No"
selling_settings.save()
buying_settings = frappe.get_doc("Buying Settings")
buying_settings.supp_master_name = "Supplier Name"
buying_settings.po_required = "No"
buying_settings.pr_required = "No"
buying_settings.maintain_same_rate = 1
buying_settings.save()
notification_control = frappe.get_doc("Notification Control")
notification_control.quotation = 1
notification_control.sales_invoice = 1
notification_control.purchase_order = 1
notification_control.save()
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.save()
def create_feed_and_todo():
"""update Activity feed and create todo for creation of item, customer, vendor"""
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Comment",
"subject": "ERPNext Setup Complete!"
}).insert(ignore_permissions=True)
def create_email_digest():
from frappe.utils.user import get_system_managers
system_managers = get_system_managers(only_name=True)
if not system_managers:
return
companies = frappe.db.sql_list("select name FROM `tabCompany`")
for company in companies:
if not frappe.db.exists("Email Digest", "Default Weekly Digest - " + company):
edigest = frappe.get_doc({
"doctype": "Email Digest",
"name": "Default Weekly Digest - " + company,
" |
christophelec/github3.py | tests/integration/test_notifications.py | Python | bsd-3-clause | 1,512 | 0 | """Integration test for Notifications."""
import github3
from .helper import IntegrationHelper
class TestThread(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_subscription(self):
"""Show that a user can retrieve notifications for repository"""
self.token_log | in()
cassette_name = self.cassette_name("subscription")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all=True))
assert len(threads) > 0
thread = threads[0]
assert isinstance(thread, github3.notifications.Thread)
assert isinstance(thread.subscription(),
| github3.notifications.Subscription)
class TestSubscription(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_set(self):
"""Show that user can successful set subscription"""
self.token_login()
cassette_name = self.cassette_name("set")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all='true'))
assert len(threads) > 0
subscription = threads[0].subscription()
assert subscription.set(True, False) is None
assert isinstance(subscription, github3.notifications.Subscription)
|
zhengbomo/python_practice | project/Lagou/Downloader.py | Python | mit | 2,671 | 0.001139 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import shutil
import hashlib
import urllib
import urllib2
class Downloader(object):
def __init__(self):
pass
@staticmethod
def get_html(url, data, folder):
file_url = url + data
file_url = file_url.encode('UTF-8')
file_url = urllib.quote(file_url)
content = Downloader.__get_file_content(file_url, folder)
if not content:
content = Downloader.__download(url, data)
if content:
# 保存到文件
Downloader.__cache_url(file_url, content, folder)
return False, content
else:
return True, content
@staticmethod
def move_file(url, data, src, dst):
file_url = url + data
file_url = file_url.encode('UTF-8')
file_url = urllib.quote(file_url)
src = Downloader.__get_file_path(file_url, src)
dst = Downloader.__get_file_path(file_url, dst)
shutil.move(src, dst)
@staticmethod
def remove_file(url, data, folder):
file_url = url + data
file_url = file_url.encode('UTF-8')
file_url = urllib.quote(file_url)
src = Downloader.__get_file_path(file_url, folder)
os.remove(src)
@staticmethod
def __download(url, data):
"""下载url内容"""
request = urllib2. | Request(url)
try:
response = urllib2.urlopen(request, data)
return response.read()
except Exception, e:
print str(e)
return None
@staticmethod
def __get_file_content(url, folder):
path = Downloader.__get_file_path(url, folder)
if os.path.isfile(path | ):
file_object = open(path)
try:
return file_object.read()
finally:
file_object.close()
return None
@staticmethod
def __cache_url(url, html, folder):
path = Downloader.__get_file_path(url, folder)
output = open(path, 'w')
output.write(html)
output.close()
@staticmethod
def __get_file_path(url, folder):
"""返回缓存的路径"""
name = Downloader.__hash(url)
html_folder = os.path.join(os.getcwd(), '.html')
if not os.path.isdir(html_folder):
os.mkdir(html_folder)
folder = os.path.join(html_folder, folder)
if not os.path.isdir(folder):
os.mkdir(folder)
return os.path.join(folder, name)
@staticmethod
def __hash(url):
"""对url做hash"""
m2 = hashlib.md5()
m2.update(url)
return m2.hexdigest()
|
mutirri/bokeh | bokeh/models/plots.py | Python | bsd-3-clause | 15,027 | 0.001331 | """ Models for representing top-level plot objects.
"""
from __future__ import absolute_import
from six import string_types
from ..enums import Location
from ..mixins import LineProps, TextProps
from ..plot_object import PlotObject
from ..properties import Bool, Int, String, Color, Enum, Auto, Instance, Either, List, Dict, Include
from ..query import find
from ..util.string import nice_join
from .glyphs import Glyph
from .ranges import Range, Range1d
from .renderers import Renderer, GlyphRenderer
from .sources import DataSource, ColumnDataSource
from .tools import Tool, ToolEvents
from .widget import Widget
class PlotContext(PlotObject):
""" A container for multiple plot objects.
``PlotContext`` objects are a source of confusion. Their purpose
is to collect together different top-level objects (e.g., ``Plot``
or layout widgets). The reason for this is that different plots may
need to share ranges or data sources between them. A ``PlotContext``
is a container in which such sharing can occur between the contained
objects.
"""
children = List(Instance(PlotObject), help="""
A list of top level objects in this ``PlotContext`` container.
""")
# TODO (bev) : is this used anywhere?
class PlotList(PlotContext):
# just like plot context, except plot context has special meaning
# everywhere, so plotlist is the generic one
pass
class Plot(Widget):
""" Model representing a plot, containing glyphs, guides, annotations.
"""
def __init__(self, **kwargs):
if "tool_events" not in kwargs:
kwargs["tool_events"] = ToolEvents()
super(Plot, self).__init__(**kwargs)
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector.
There are a few different ways to call the ``select`` method.
The most general is to supply a JSON-like query dictionary as the
single argument or as keyword arguments:
Args:
selector (JSON-like) : some sample text
Keyword Arguments:
kwargs : query dict key/values as keyword arguments
For convenience, queries on just names can be made by supplying
the ``name`` string as the single parameter:
Args:
name (str) : the name to query on
Also queries on just type can be made simply by supplying the
``PlotObject`` subclass as the single parameter:
Args:
type (PlotObject) : the type to query on
Returns:
seq[PlotObject]
Examples:
.. code-block:: python
# These two are equivalent
p.select({"type": HoverTool})
p.select(HoverTool)
# These two are also equivalent
p.select({"name": "mycircle"})
p.select("mycircle")
# Keyword arguments can be supplied in place of selector dict
p.select({"name": "foo", "type": HoverTool})
p.select(name="foo", type=HoverTool)
'''
if len(args) > 1:
raise TypeError("select accepts at most ONE positional argument.")
if len(args) > 0 and len(kwargs) > 0:
raise TypeError("select accepts EITHER a positional argument, OR keyword arguments (not both).")
if len(args) == 0 and len(kwargs) == 0:
raise TypeError("select requires EITHER a positional argument, OR keyword arguments.")
if args:
arg = args[0]
if isinstance(arg, dict):
selector = arg
elif isinstance(arg, string_types):
selector = dict(name=arg)
elif issubclass(arg, PlotObject):
selector = {"type" : arg}
else:
raise RuntimeError("Selector must be a dictionary, string or plot object.")
else:
selector = kwargs
# Want to pass selector that is a dictionary
from ..plotting_helpers import _list_attr_splat
return _list_attr_splat(find(self.references(), selector, {'plot': self}))
def row(self, row, gridplot):
''' Return whether this plot is in a given row of a GridPlot.
Args:
row (int) : index of the row to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.row(row)
def column(self, col, gridplot):
''' Return whether this plot is in a given column of a GridPlot.
Args:
col (int) : index of the column to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.column(col)
def add_layout(self, obj, place='center'):
''' Adds an object to the plot in a specified place.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None
'''
valid_places = ['left', 'right', 'above', 'below', 'center']
if place not in valid_places:
raise ValueError(
"Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places))
)
if hasattr(obj, 'plot'):
if obj.plot is not None:
raise ValueError("object to be added already has 'plot' attribute set")
obj.plot = self
self.renderers.append(obj)
if place is not 'center':
getattr(self, place).append(obj)
def add_tools(self, *tools):
''' Adds an tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
Returns:
None
'''
if not all(isinstance(tool, Tool) for tool in tools):
raise ValueError("All arguments to add_tool must be Tool subclasses.")
for tool in tools:
if tool.plot is not None:
raise ValueError("tool %s to be added already has 'plot' attribute set" % tool)
tool.plot = self
self.tools.append(tool)
def add_glyph(self, source_or_glyph, glyph=None, **kw):
''' Adds a glyph to the plot with associated data sources and ranges.
This function will take care of creating and configurinf a Glyph object,
and then add it to the plot's list of renderers.
Args:
source (DataSource) : a data source for the glyphs to all use
glyph (Glyph) : the glyph to add to the Plot
Keyword Arguments:
Any additional keyword arguments are passed on as-is to the
Glyph initializer.
Returns:
glyph : Glyph
'''
if glyph is not None:
source = source_or_glyph
else:
source, glyph = ColumnDataSource(), source_or_glyph
if not isinstance(source, DataSource):
raise ValueError("'source' argument to add_glyph() must be DataSource subclass")
if not isinstance(glyph, Glyph):
raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass")
g = GlyphRenderer(data_source=source, glyph=glyph, **kw)
self.renderers.append(g)
return g
x_range = Instance(Range, help="""
The (default) data range of the horizontal dimension of the plot.
""")
y_range = Instance(Range, help="""
The (default) data range of the vertical dimension of the plot.
""")
x_mapper_type = Either(Auto, String, help="""
What kind of mapper to use to convert x-coordinates in data space
into x-coordinates in screen space.
Typically this can be determined automatically, but this property
can be useful to, e.g., show datetime values as floating point
| "seconds since epoch" instead of formatted dates.
""")
y_mapper_ty | pe = Either(Auto, String, help="""
What kind of mapper to use to convert y-coordina |
acq4/acq4 | acq4/devices/NiDAQ/resample_test.py | Python | mit | 4,804 | 0.030183 | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import time
import numpy as np
from pyqtgraph import PlotWindow
from six.moves import zip
sys.path.append(' | ../../util')
sys.path.append('../..')
from .nidaq import NiDAQ
from pyqtgraph.functions import mkPen, mkColor
pw = PlotWindow()
time.clock()
sr = 100000
dur = 2.0
data = np | .zeros(int(sr*dur))
dlen = len(data)
xVals = np.linspace(0, dur, dlen)
data += np.random.normal(size=dlen) + 20.
data[round(dlen*0.102):round(dlen*0.3)] += 20
data[round(dlen*0.3):round(dlen*0.5)] += 30
data[round(dlen*0.4)]+= 1000
data += np.sin(xVals*40677*2.0*np.pi)*4.
#data = sin(linspace(0, dur, sr*dur)* linspace(0, sr*2, sr*dur))
methods = ['subsample', 'mean', 'fourier', 'bessel_mean', 'butterworth_mean']
colors = [mkColor((255,0,0)), mkColor((0,255,0)), mkColor((0,0,255)), mkColor((255,0,255)), mkColor((255,255,0))]
def run(ds):
pw.plot(data, clear=True)
for m, c in zip(methods, colors):
d1 = data.copy()
t = time.clock()
d2 = NiDAQ.downsample(d1, ds, method=m)
print("Method %d: %f" % (m, time.clock()-t))
p = pw.plot(y=d2, x=np.linspace(0, len(d2)*ds, len(d2)), pen=mkPen(c))
p.setZValue(10000)
#pw.plot(d2, pen=mkPen(colors[i-1]))
def showDownsample(**kwargs):
d1 = data.copy()
d2 = NiDAQ.downsample(d1, **kwargs)
xv2 = xVals[::kwargs['ds']][:len(d2)]
pw.plot(y=d1, x=xVals, clear=True)
pw.plot(y=d2[:len(xv2)], x=xv2, pen=mkPen((255, 0, 0)))
def showTransfer(**kwargs):
xVals = np.linspace(0, dur, sr*dur)
#data = sin(xVals* linspace(0, sampr*2, sampr*dur))
data = np.random.normal(size=sr*dur)
data2 = NiDAQ.lowpass(data, **kwargs)
pw.plot(y=data, x=xVals, clear=True)
pw.plot(y=data2, x=xVals, pen=mkPen((255, 0, 0)))
#def downsample(data, ds, method=1):
#if method == 1:
## Method 1:
## decimate by averaging points together (does not remove HF noise, just folds it down.)
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#elif method == 2:
## Method 2:
## Decimate using fourier resampling -- causes ringing artifacts.
#newLen = int(data.shape[0] / ds)
#data = scipy.signal.resample(data, newLen, window=8) # Use a kaiser window with beta=8
#elif method == 3:
## Method 3:
## Decimate by lowpass filtering, then average points together. (slow, artifacts at beginning and end of traces)
## Not as good as signal.resample for removing HF noise, but does not generate artifacts either.
## worst at removing HF noise (??)
#b,a = scipy.signal.bessel(8, 1.0/ds, btype='low')
#base = data.mean()
#data = scipy.signal.lfilter(b, a, data-base) + base
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#elif method == 4:
##Method 4:
### Pad data, forward+reverse bessel filter, then average down
#b,a = scipy.signal.bessel(4, 1.0/ds, btype='low')
#padded = numpy.hstack([data[:100], data, data[-100:]]) ## can we intelligently decide how many samples to pad with?
#data = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, padded)[::-1])[::-1][100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
##data = scipy.signal.lfilter(b, a, padded)[100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#elif method == 5:
##Method 4:
### Pad data, forward+reverse butterworth filter, then average down
#ord, Wn = scipy.signal.buttord(1.0/ds, 1.5/ds, 0.01, 0.99)
#print "butt ord:", ord, Wn
#b,a = scipy.signal.butter(ord, Wn, btype='low')
#padded = numpy.hstack([data[:100], data, data[-100:]]) ## can we intelligently decide how many samples to pad with?
#data = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, padded)[::-1])[::-1][100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
##data = scipy.signal.lfilter(b, a, padded)[100:-100] ## filter twice; once forward, once reversed. (This eliminates phase changes)
#newLen = int(data.shape[0] / ds) * ds
#data = data[:newLen]
#data.shape = (data.shape[0]/ds, ds)
#data = data.mean(axis=1)
#return data
|
mindprince/test-infra | experiment/generate_tests.py | Python | apache-2.0 | 11,228 | 0.000267 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create e2e test definitions.
Usage example:
In $GOPATH/src/k8s.io/test-infra,
$ bazel run //experiment:generate_tests -- \
--yaml-config-path=experiment/test_config.yaml \
"""
import argparse
import hashlib
import os
import ruamel.yaml as yaml
# TODO(yguo0905): Generate Prow and testgrid configurations.
PROW_CONFIG_TEMPLATE = """
tags:
- generated # AUTO-GENERATED by experiment/generate_tests.py - DO NOT EDIT!
interval:
agent: kubernetes
labels:
preset-service-account: "true"
preset-k8s-ssh: "true"
name:
spec:
containers:
- args:
env:
image: gcr.io/k8s-testimages/kubekins-e2e:v20180730-8b7ab3104-master
"""
COMMENT = 'AUTO-GENERATED by experiment/generate_tests.py - DO NOT EDIT.'
def get_sha1_hash(data):
"""Returns the SHA1 hash of the specified data."""
sha1_hash = hashlib.sha1()
sha1_hash.update(data)
return sha1_hash.hexdigest()
def substitute(job_name, lines):
"""Replace '${job_name_hash}' in lines with the SHA1 hash of job_name."""
return [line.replace('${job_name_hash}', get_sha1_hash(job_name)[:10]) \
for line in lines]
def get_args(job_name, field):
"""Returns a list of args for the given field."""
if not field:
return []
return substitute(job_name, field.get('args', []))
def write_prow_configs_file(output_file, job_defs):
"""Writes the Prow configurations into output_file."""
with open(output_file, 'w') as fp:
yaml.dump(
job_defs, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
fp.write('\n')
def apply_job_overrides(envs_or_args, job_envs_or_args):
'''Applies the envs or args overrides defined in the job level'''
for job_env_or_arg in job_envs_or_args:
name = job_env_or_arg.split('=', 1)[0]
env_or_arg = next(
(x for x in envs_or_args if (x.strip().startswith('%s=' % name) or
x.strip() == name)), None)
if env_or_arg:
envs_or_args.remove(env_or_arg)
envs_or_args.append(job_env_or_arg)
class E2ENodeTest(object):
def __init__(self, job_name, job, config):
self.job_name = job_name
self.job = job
self.common = config['nodeCommon']
self.images = config['nodeImages']
self.k8s_versions = config['nodeK8sVersions']
self.test_suites = config['nodeTestSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite, k8s_version):
"""Returns the Prow config for the job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
prow_config['interval'] = self.job['interval']
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
if not container['env']:
container['env'] = []
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
container['args'].extend(k8s_version.get('args', []))
container['args'].append('--root=/go/src')
container['env'].extend([{'name':'GOPATH', 'value': '/go'}])
# Specify the appropriate kubekins-e2e image. This allows us to use a
# specific image (containing a particular Go version) to build and
# trigger the node e2e test to avoid issues like
# https://github.com/kubernetes/kubernetes/issues/43534.
if k8s_version.get('prowImage', None):
container['image'] = k8s_version['prowImage']
return prow_config
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 6:
raise ValueError('Expected | 6 fields in job name', self.job_name)
image = self.images[fields[3]]
k8s_version = self.k8s_versions[fields[4][3:]]
test_suite = self.test_suites[fields[5]]
# envs are disallowed in node e2e tests.
if 'envs' in self.common or 'envs' in image or 'envs' in test_suite:
raise ValueError(
'envs are disallowed in node e2e test', self.job_name)
# Generates args.
arg | s = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates prow config.
prow_config = self.__get_prow_config(test_suite, k8s_version)
# Combine --node-args
node_args = []
job_args = []
for arg in job_config['args']:
if '--node-args=' in arg:
node_args.append(arg.split('=', 1)[1])
else:
job_args.append(arg)
if node_args:
flag = '--node-args='
for node_arg in node_args:
flag += '%s ' % node_arg
job_args.append(flag.strip())
job_config['args'] = job_args
return job_config, prow_config
class E2ETest(object):
def __init__(self, output_dir, job_name, job, config):
self.env_filename = os.path.join(output_dir, '%s.env' % job_name),
self.job_name = job_name
self.job = job
self.common = config['common']
self.cloud_providers = config['cloudProviders']
self.images = config['images']
self.k8s_versions = config['k8sVersions']
self.test_suites = config['testSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite):
"""Returns the Prow config for the e2e job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
prow_config['interval'] = self.job['interval']
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
container['args'].append('--bare')
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
return prow_config
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 7:
raise Value |
studiocoop/maya-coop | scripts/coopReplaceMayaEnvironment.py | Python | unlicense | 2,170 | 0.006912 | '''
@name: coopReplaceMayaEnvironment.py
@repository: https://github.com/studiocoop/maya
@version: 1.0
@license: UNLICENCE
@author: Santiago Montesdeoca [artineering.io]
@summary: replaces the existing Maya.env with a template which has to
be in the same directory as this file and hides specified shelves
@requires: -
@run: MEL: python("execfile('E:/coopReplaceMayaEnvironment.py')")
PYTHON: execfile('E:/coopReplaceMayaEnvironment.py')
@created: 8 Jul, 2015
@change: 8 Aug, 2016
'''
import os
import maya.mel as mel
impor | t maya.cmds as cmds
import userPrefs as prefs
#find environment directory
scriptsDir = os.path.abspath(cmds.int | ernalVar(usd=True))
envDir = os.path.dirname(scriptsDir)
#hide unnecessary shelves
shelvesDict = prefs.unnecessaryShelvesForAnim
#Maya creates all default shelves in prefs only after each has been opened (initialized)
for shelf in shelvesDict:
try:
mel.eval('jumpToNamedShelf("{0}");'.format(shelf))
except:
continue
#all shelves loaded -> save them
mel.eval('saveAllShelves $gShelfTopLevel;')
#time to delete them
shelfTopLevel = mel.eval('$tempMelVar=$gShelfTopLevel') + '|'
for shelf in shelvesDict:
shelfLayout = shelvesDict[shelf].split('.mel')[0]
if cmds.shelfLayout(shelfTopLevel + shelfLayout, q=True, ex=True):
cmds.deleteUI(shelfTopLevel+shelfLayout, layout=True)
#mark them as deleted to avoid startup loading
shelfDir = os.path.join(envDir,'prefs','shelves')
for shelf in shelvesDict:
shelfName = os.path.join(shelfDir,'shelf_' + shelvesDict[shelf])
deletedShelfName = shelfName + '.deleted'
if os.path.isfile(shelfName):
#make sure the deleted file doesn't already exist
if os.path.isfile(deletedShelfName):
os.remove(shelfName)
continue
os.rename(shelfName,deletedShelfName)
#unload unnecessary plugins
plugins = prefs.unnecessaryPluginsForAnim
for plugin in plugins:
if (cmds.pluginInfo(plugin, loaded=True, q=True)):
cmds.unloadPlugin(plugin)
cmds.pluginInfo(plugin, autoload=False, e=True)
|
tsl143/addons-server | src/olympia/files/tests/test_models.py | Python | bsd-3-clause | 56,292 | 0 | # -*- coding: utf-8 -*-
import hashlib
import json
import os
import tempfile
import zipfile
from datetime import datetime
from django import forms
from django.core.files.storage import default_storage as storage
from django.conf import settings
from django.test.utils import override_settings
import mock
import pytest
from mock import patch
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.utils import rm_local_tmp_dir, chunked
from olympia.addons.models import Addon
from olympia.applications.models import AppVersion
from olympia.files.models import (
EXTENSIONS, File, FileUpload, FileValidation, nfd_str, Permission,
track_file_status_change, WebextPermission, WebextPermissionDescription,
)
from olympia.files.templatetags.jinja_helpers import copyfileobj
from olympia.files.utils import (
check_xpi_info, Extractor, parse_addon, parse_xpi)
from olympia.versions.models import Version
pytestmark = pytest.mark.django_db
class UploadTest(TestCase, amo.tests.AMOPaths):
"""
Base for tests that mess with file uploads, safely using temp directories.
"""
def file_path(self, *args, **kw):
return self.file_fixture_path(*args, **kw)
def get_upload(self, filename=None, abspath=None, validation=None):
xpi = open(abspath if abspath else self.file_path(filename)).read()
upload = FileUpload.from_post([xpi], filename=abspath or filename,
size=1234)
# Simulate what fetch_manifest() does after uploading an app.
upload.validation = (validation or
json.dumps(dict(errors=0, warnings=1, notices=2,
metadata={}, messages=[])))
upload.save()
return upload
class TestFile(TestCase, amo.tests.AMOPaths):
"""
Tests the methods of the File model.
"""
fixtures = ['base/addon_3615', 'base/addon_5579']
def test_get_absolute_url(self):
f = File.objects.get(id=67442)
url = f.get_absolute_url(src='src')
expected = ('/firefox/downloads/file/67442/'
'delicious_bookmarks-2.1.072-fx.xpi?src=src')
assert url.endswith(expected), url
def test_get_url_path(self):
file_ = File.objects.get(id=67442)
assert file_.get_url_path('src') == \
file_.get_absolute_url(src='src')
def test_get_url_path_attachment(self):
file_ = File.objects.get(id=67442)
expected = ('http://testserver/firefox/downloads/file/67442'
'/type:attachment/delicious_bookmarks-2.1.072-fx.xpi'
'?src=src')
assert file_.get_url_path('src', attachment=True) == expected
def test_get_signed_url(self):
file_ = File.objects.get(id=67442)
url = file_.get_signed_url('src')
expected = ('/api/v3/file/67442/'
'delicious_bookmarks-2.1.072-fx.xpi?src=src')
assert url.endswith(expected), url
def check_delete(self, file_, filename):
"""Test that when the File object is deleted, it is removed from the
filesystem."""
try:
with storage.open(filename, 'w') as f:
f.write('sample data\n')
assert storage.exists(filename)
file_.delete()
assert not storage.exists(filename)
finally:
if storage.exists(filename):
storage.delete(filename)
def test_delete_by_version(self):
"""Test that version (soft)delete doesn't delete the file."""
f = File.objects.get(pk=67442)
try:
with storage.open(f.file_path, 'w') as fi:
fi.write('sample data\n')
assert storage.exists(f.file_path)
| f.version.delete()
assert storage.exists(f.file_path)
finally:
if storage.exists(f.file_path):
storage.delete | (f.file_path)
def test_delete_file_path(self):
f = File.objects.get(pk=67442)
self.check_delete(f, f.file_path)
def test_delete_no_file(self):
# test that the file object can be deleted without the file
# being present
file = File.objects.get(pk=74797)
filename = file.file_path
assert not os.path.exists(filename), 'File exists at: %s' % filename
file.delete()
def test_delete_signal(self):
"""Test that if there's no filename, the signal is ok."""
file = File.objects.get(pk=67442)
file.update(filename='')
file.delete()
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_disable_signal(self, hide_mock):
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
f.save()
assert not hide_mock.called
f.status = amo.STATUS_DISABLED
f.save()
assert hide_mock.called
@mock.patch('olympia.files.models.File.unhide_disabled_file')
def test_unhide_on_enable(self, unhide_mock):
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
f.save()
assert not unhide_mock.called
f = File.objects.get(pk=67442)
f.status = amo.STATUS_DISABLED
f.save()
assert not unhide_mock.called
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
f.save()
assert unhide_mock.called
def test_unhide_disabled_files(self):
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
with storage.open(f.guarded_file_path, 'wb') as fp:
fp.write('some data\n')
f.unhide_disabled_file()
assert storage.exists(f.file_path)
assert storage.open(f.file_path).size
def test_latest_url(self):
# With platform.
file_ = File.objects.get(id=74797)
actual = file_.latest_xpi_url()
assert actual == (
'/firefox/downloads/latest/cooliris/platform:3/'
'addon-5579-latest.xpi')
actual = file_.latest_xpi_url(attachment=True)
assert actual == (
'/firefox/downloads/latest/cooliris/type:attachment/platform:3/'
'addon-5579-latest.xpi')
actual = file_.latest_xpi_url(beta=True)
assert actual == (
'/firefox/downloads/latest-beta/cooliris/platform:3/'
'addon-5579-latest.xpi')
actual = file_.latest_xpi_url(beta=True, attachment=True)
assert actual == (
'/firefox/downloads/latest-beta/cooliris/type:attachment/'
'platform:3/addon-5579-latest.xpi')
# Same tests repeated, but now without a platform because that File is
# available for all platforms and not just a specific one.
file_ = File.objects.get(id=67442)
actual = file_.latest_xpi_url()
assert actual == (
'/firefox/downloads/latest/a3615/addon-3615-latest.xpi')
actual = file_.latest_xpi_url(attachment=True)
assert actual == (
'/firefox/downloads/latest/a3615/type:attachment/'
'addon-3615-latest.xpi')
actual = file_.latest_xpi_url(beta=True)
assert actual == (
'/firefox/downloads/latest-beta/a3615/addon-3615-latest.xpi')
actual = file_.latest_xpi_url(beta=True, attachment=True)
assert actual == (
'/firefox/downloads/latest-beta/a3615/type:attachment/'
'addon-3615-latest.xpi')
def test_eula_url(self):
f = File.objects.get(id=67442)
assert f.eula_url() == '/en-US/firefox/addon/3615/eula/67442'
def test_generate_filename(self):
f = File.objects.get(id=67442)
assert f.generate_filename() == 'delicious_bookmarks-2.1.072-fx.xpi'
def test_pretty_filename(self):
f = File.objects.get(id=67442)
f.generate_filename()
assert f.pretty_filename() == 'delicious_bookmarks-2.1.072-fx.xpi'
def test_pretty_filename_short(self):
f = File.objects.get(id=67442)
f.version.addon.name = 'A Place Where The Sea Remembers Your Name'
f.generate_filename()
assert f.pretty_filename() == 'a_pla |
sergiocorreia/panflute | setup.py | Python | bsd-3-clause | 4,886 | 0.000614 | """A pythonic alternative to pandocfilters
See:
https://github.com/sergiocorreia/panflute
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Import version number
version = {}
with open("panflute/version.py") as fp:
exec(fp.read(), version)
version = version['__version__']
setup(
name='panflute',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Pythonic Pandoc filters',
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/sergiocorreia/panflute',
project_urls={
"Source": "https://github.com/sergiocorreia/panflute",
"Documentation": "http://scorreia.com/software/panflute/",
"Tracker": "https://github.com/sergiocorreia/panflute/issues",
},
# Author details
author="Sergio Correia",
author_email='sergio.correia@gmail.com',
# Choose your license
license='BSD3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Filters',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# What does your project relate to?
keywords='pandoc pandocfilters markdown latex',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
| python_requires='>=3.6',
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'click >=6,<9',
'p | yyaml >=3,<7',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,pypi]
extras_require={
'dev': [
'configparser',
'coverage',
'flake8',
'pandocfilters',
'pytest-cov',
'pytest',
'requests',
],
'pypi': [
'docutils',
'Pygments',
'twine',
'wheel',
]
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'panflute=panflute:main',
'panfl=panflute:panfl',
],
},
)
|
Evlos/rwbypress | app.py | Python | gpl-2.0 | 726 | 0.001377 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# RedisPress.alpha
# ---
# Database: Redis
# Framework: Tornado
import | sys
import raven.contrib.tornado
import tornado.httpserver
import tornado.ioloop
import project.app.application
if __name__ == '__main__':
if len(sys.argv) == 2:
port = sys.argv[1]
else:
port = 24400
sentry_key = '56b95038ef7849d7bfbfc735d2ee50e1:64c853d4c58947b5a8b663ca7ac2c8b1'
app = project.app.application.Application()
a | pp.sentry_client = raven.contrib.tornado.AsyncSentryClient(
'http://' + sentry_key + '@lin.eternalelf.com/sentry/7',
)
srv = tornado.httpserver.HTTPServer(app)
srv.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
|
forslund/mycroft-core | mycroft/tts/espeak_tts.py | Python | apache-2.0 | 2,379 | 0 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
from .tts import TTS, TTSValidator
class ESpeak(TTS):
"""TTS module for generating speech using ESpeak."""
def __init__(self, lang, config):
super(ESpeak, self).__init__(lang, config, ESpeakValidator(self))
def get_tts(self, sentence, wav_file):
"""Generate WAV from sentence, phonemes aren't supported.
Args:
sentence (str): sentence to generate audio for
wav_file (str): output file
Returns:
tuple ((str) file location, N | one)
"""
| # Create Argument String for Espeak
arguments = ['espeak', '-v', self.lang + '+' + self.voice]
amplitude = self.config.get('amplitude')
if amplitude:
arguments.append('-a '+amplitude)
gap = self.config.get('gap')
if gap:
arguments.append('-g '+gap)
capital = self.config.get('capital')
if capital:
arguments.append('-k '+capital)
pitch = self.config.get('pitch')
if pitch:
arguments.append('-p '+pitch)
speed = self.config.get('speed')
if speed:
arguments.append('-s '+speed)
arguments.extend(['-w', wav_file, sentence])
subprocess.call(arguments)
return wav_file, None
class ESpeakValidator(TTSValidator):
def __init__(self, tts):
super(ESpeakValidator, self).__init__(tts)
def validate_lang(self):
# TODO
pass
def validate_connection(self):
try:
subprocess.call(['espeak', '--version'])
except Exception:
raise Exception('ESpeak is not installed. Please install it on '
'your system and restart Mycroft.')
def get_tts_class(self):
return ESpeak
|
nharraud/invenio-jsonschemas | invenio_jsonschemas/errors.py | Python | gpl-2.0 | 2,330 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is di | stributed | in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio-JSONSchemas errors."""
from __future__ import absolute_import, print_function
class JSONSchemaError(Exception):
"""Base class for errors in Invenio-JSONSchemas module."""
class JSONSchemaNotFound(JSONSchemaError):
"""Exception raised when a requested JSONSchema is not found."""
def __init__(self, schema, *args, **kwargs):
"""Constructor.
:param schema: path of the requested schema which was not found.
"""
self.schema = schema
super(JSONSchemaNotFound, self).__init__(
'Schema "{}" not found'.format(schema), *args, **kwargs
)
class JSONSchemaDuplicate(JSONSchemaError):
"""Exception raised when multiple schemas match the same path."""
def __init__(self, schema, first_dir, second_dir, *args, **kwargs):
"""Constructor.
:param schema: duplicate schema path.
:param first_dir: first directory where the schema was found.
:param second_dir: second directory where the schema was found.
"""
self.schema = schema
super(JSONSchemaDuplicate, self).__init__(
'Schema "{schema}" defined in multiple ' +
'directories: "{first}" and "{second}"'.format(
schema=schema,
first=first_dir,
second=second_dir),
*args, **kwargs)
|
kbrebanov/atlantic-python | atlantic/atlantic.py | Python | mit | 549 | 0.001821 | from .utils import Atl | anticBase
from .image import AtlanticImage
from .plan import AtlanticPlan
from .server import AtlanticServer
from .sshkey import AtlanticSSHKey
class Atlantic(AtlanticBase):
def __init__(self, access_key, private_key):
AtlanticBase.__init__(self, access_key, private_key)
self.image = AtlanticImage(access_key, private_ | key)
self.plan = AtlanticPlan(access_key, private_key)
self.server = AtlanticServer(access_key, private_key)
self.sshkey = AtlanticSSHKey(access_key, private_key)
|
TAJaroszewski/lma_contrail_monitoring | deployment_scripts/puppet/modules/lma_contrail_monitoring/files/scripts/check-api-contrail-9081.py | Python | apache-2.0 | 4,630 | 0.000216 | # !/usr/bin/python
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import urllib2
import sys
import simplejson as json
import ConfigParser
import signal
import time
CONF_FILE = '/etc/check_api.conf'
plugin_name = "check-api-contrail-9081"
plugin_instance = "lma-contrail-extension"
plugin_interval = 90
plugin_type = 'gauge'
plugin_request = 'active'
url = "http://127.0.0.1:9081"
class OSAPI(object):
def __init__(self, config):
self.config = config
self.username = self.config.get('api', 'user')
self.password = self.config.get('api', 'password')
self.tenant_name = self.config.get('api', 'tenant')
self.endpoint_keystone = self.config.get('api',
'keystone_endpoints'
).split(',')
self.token = None
self.tenant_id = None
self.get_token()
def get_timeout(self, service):
try:
return int(self.config.get('api', '%s_timeout' % service))
except ConfigParser.NoOptionError:
return 1
def get_token(self):
data = json.dumps({
"auth":
{
'tenantName': self.tenant_name,
'passwordCredentials':
{
'username': self.username,
'password': self.password
}
}
})
for keystone in self.endpoint_keystone:
try:
request = urllib2.Request(
'%s/tokens' % keystone,
data=data,
headers={
'Content-type': 'application/json'
})
data = json.loads(
urllib2.urlopen(
request, timeout=self.get_timeout('keystone')).read())
self.token = data['access']['token']['id']
self.tenant_id = data['access']['token']['tenant']['id']
return
except Exception as e:
print("Got exception '%s'" % e)
sys.exit(1)
def check_api(self, url, service):
try:
request = urllib2.Request(
url,
headers={
'X-Auth-Token': self.token,
})
start_time = time.time()
p = urllib2.urlopen(request, timeout=self.get_timeout(service))
end_time = time.time()
except urllib2.HTTPError, e:
return
except Exception as e:
print e
sys.exit(1)
return "%.3f" % (end_time - start_time)
def configure_callback(conf):
for node in conf.children:
val = str(node.values[0])
def restore_sigchld():
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def log_verbose(msg):
collectd.info('%s pl | ugin [verbose]: %s' % (plugin_name, msg))
def payload():
config = ConfigParser.RawConfigParser()
config.read(CONF_FILE)
API = OSAPI(config)
payload = API.check_api(url, "contrail")
return payload
def payload_callback():
| log_verbose('Read callback called')
value = payload()
# log_verbose(
# 'Sending value: %s.%s=%s' % (plugin_name, '-'.join([val.plugin, val.type]), value))
val = collectd.Values(
plugin=plugin_name, # metric source
plugin_instance=plugin_instance,
type=plugin_type,
type_instance=plugin_name,
interval=plugin_interval,
meta={'0': True},
values=[value]
)
val.dispatch()
if __name__ == '__main__':
if sys.argv[1]:
url = sys.argv[1]
else:
print "Please provide URL"
sys.exit(1)
print "Plugin: " + plugin_name
payload = payload()
print("%s" % (payload))
sys.exit(0)
else:
import collectd
collectd.register_init(restore_sigchld)
collectd.register_config(configure_callback)
collectd.register_read(payload_callback, plugin_interval)
|
CINPLA/exana | exana/tests/_test_baysian_latency.py | Python | gpl-3.0 | 3,442 | 0.003196 | import pytest
import elephant
import neo
import quantities as pq
import numpy as np
def test_baysian():
from exana.stimulus import baysian_latency, generate_salt_trials
from exana.misc import concatenate_spiketrains
from elephant.spike_train_generation import homogeneous_poisson_process as hpp
np.random.seed(12345)
N_trials = 100
stim_duration = 100 * pq.ms
stim_start = 1000 * pq.ms
stim_latency = 50 * pq.ms
trial_duration = 1150 * pq.ms
trains = []
stim_onsets = []
for n in range(N_trials):
offset = trial_duration * n
stim_onsets.append(offset)
trains.extend([hpp(rate=2 * pq.Hz,
t_start=offset,
t_stop=stim_start + stim_latency + offset),
hpp(rate=8 * pq.Hz,
t_start=stim_start + stim_latency + offset,
t_stop=stim_start + stim_duration + offset)])
spike_train = concatenate_spiketrains(trains)
epoch = neo.Epoch(
times=np.array(stim_onsets) * pq.ms,
durations=np.array([trial_duration] * len(stim_onsets)) * pq.ms)
from exana.stimulus import make_spiketrain_trials
trials = make_ | spiketrain_trials(spike_train=spike_train, epoch=epoch)
from elephant.statistics import time_histogram
t_start = trials[0].t_start.rescale('s')
t_stop = trials[0].t_stop.rescale('s')
binsize = (abs(t_start)+abs(t_stop))/float(100)
time_hist = time_histogram(trials, binsize, t_start=t_start,
t_stop=t_stop, output='counts', binary=False)
bins = | np.arange(t_start.magnitude, t_stop.magnitude, binsize.magnitude)
count_data = time_hist.magnitude
trace = baysian_latency(count_data)
return count_data, trace
if __name__ == '__main__':
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
count_data, trace = test_baysian()
plt.figure()
n_count_data = len(count_data)
plt.bar(np.arange(n_count_data), count_data)
plt.xlim(0, n_count_data);
plt.show()
lambda_1_samples = trace['lambda_1']
lambda_2_samples = trace['lambda_2']
tau_samples = trace['tau']
plt.figure(figsize=(12.5, 10))
#histogram of the samples:
ax = plt.subplot(311)
ax.set_autoscaley_on(False)
plt.hist(lambda_1_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of $\lambda_1$", color="#A60628", normed=True)
plt.legend(loc="upper left")
plt.title(r"""Posterior distributions of the variables
$\lambda_1,\;\lambda_2,\;\tau$""")
plt.xlim([15, 30])
plt.xlabel("$\lambda_1$ value")
ax = plt.subplot(312)
ax.set_autoscaley_on(False)
plt.hist(lambda_2_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of $\lambda_2$", color="#7A68A6", normed=True)
plt.legend(loc="upper left")
plt.xlim([15, 30])
plt.xlabel("$\lambda_2$ value")
plt.subplot(313)
w = 1.0 / tau_samples.shape[0] * np.ones_like(tau_samples)
plt.hist(tau_samples, bins=n_count_data, alpha=1,
label=r"posterior of $\tau$",
color="#467821", weights=w, rwidth=2.)
plt.xticks(np.arange(n_count_data))
plt.legend(loc="upper left")
plt.ylim([0, .75])
plt.xlim([35, len(count_data)-20])
plt.xlabel(r"$\tau$ (in days)")
plt.ylabel("probability")
plt.show()
|
legordian/ROOTPWA | pyInterface/scripts/convertTreeToEvt.py | Python | gpl-3.0 | 3,620 | 0.024862 | #!/usr/bin/env python
import argparse
import os
import sys
import math
import pyRootPwa
import pyRootPwa.core
def writeParticleToFile (outFile, particleName, particleMomentum):
if pyRootPwa.core.particleDataTable.isInTable(particleName):
partProperties = pyRootPwa.core.particleDataTable.entry(particleName)
charge = partProperties.charge
energy = math.sqrt(particleMomentum.Px()**2 + particleMomentum.Py()**2 + particleMomentum.Pz()**2 + partProperties.mass2)
outFile.write(
str(pyRootPwa.core.particleDataTable.geantIdFromParticleName(particleName)) + " " +
str(charge) + " " +
'%.16e' % particleMomentum.Px() + " " +
'%.16e' % particleMomentum.Py() + " " +
'%.16e' % particleMomentum.Pz() + " " +
'%.16e' % energy + "\n"
)
return True
else:
pyRootPwa.utils.printErr("particle '" + particleName + "' not found in particleDataTable.")
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Converts ROOTPWA .root file to .evt file."
)
parser.add_argument("inputFileName", help="The p | ath to the RootPwa input file")
parser.add_argument("outputFileName", help="The path to the ASCII evt output file")
parser.add_argument("-p", "--particleDataTable", help="The path of particleDataTable file (default: '$ROOTPWA/particleData/particleDataTable.txt')",
default='$ROOTPWA/particleData/partic | leDataTable.txt')
args = parser.parse_args()
printWarn = pyRootPwa.utils.printWarn
printErr = pyRootPwa.utils.printErr
printSucc = pyRootPwa.utils.printSucc
ROOT = pyRootPwa.ROOT
pdtPath = os.path.expandvars(args.particleDataTable)
if not pyRootPwa.core.particleDataTable.instance.readFile(pdtPath):
printErr("error loading particleDataTable from '" + pdtPath + "'. Aborting...")
sys.exit(1)
inputFile = ROOT.TFile(args.inputFileName, "READ")
if not inputFile:
printErr("error opening input file. Aborting...")
sys.exit(1)
metaData = pyRootPwa.core.eventMetadata.readEventFile(inputFile)
if metaData == 0:
printErr("error reading metaData. Input file is not a RootPWA root file.")
prodKinPartNames = metaData.productionKinematicsParticleNames()
decayKinPartNames = metaData.decayKinematicsParticleNames()
tree = metaData.eventTree()
with open(args.outputFileName, 'w') as outputEvtFile:
particleCount = len(prodKinPartNames) + len(decayKinPartNames)
for event in tree:
prodKinMomenta = event.__getattr__(metaData.productionKinematicsMomentaBranchName)
decayKinMomenta = event.__getattr__(metaData.decayKinematicsMomentaBranchName)
if particleCount != (prodKinMomenta.GetEntries() + decayKinMomenta.GetEntries()):
printErr("particle count in metaData does not match particle count in event data.")
sys.exit(1)
outputEvtFile.write(str(particleCount) + '\n')
for particle in range(prodKinMomenta.GetEntries()):
if not writeParticleToFile(outputEvtFile, prodKinPartNames[particle], prodKinMomenta[particle]):
printErr("failed writing particle '" + particle + "' to output file.")
sys.exit(1)
for particle in range(decayKinMomenta.GetEntries()):
if not writeParticleToFile(outputEvtFile, decayKinPartNames[particle], decayKinMomenta[particle]):
printErr("failed writing particle '" + particle + "' to output file.")
sys.exit(1)
inputFile.Close()
printSucc("successfully converted '" + args.inputFileName + "' to '" + args.outputFileName + "'.")
|
arthurdejong/python-stdnum | stdnum/nl/postcode.py | Python | lgpl-2.1 | 2,481 | 0 | # postcode.py - functions for handling Dutch postal codes
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Postcode (the Dutch postal code).
The Dutch postal code consists of four numbers followed by two letters,
separated by a single space.
More information:
* https://en.wikipedia.org/wiki/Postal_codes_in_the_Netherlands
* https://nl.wikipedia.org/wiki/Postcodes_in_Nederland
>>> validate('2601 DC')
'2601 DC'
>>> validate('NL-2611ET')
'2611 ET'
>>> validate('26112 ET')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('2611 SS') # a few letter combinations are banned
Traceback (most recent call last):
...
InvalidComponent: ...
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
_postcode_re = re.compile(r'^(?P<pt1>[1-9][0-9]{3})(?P<pt2>[A-Z]{2})$')
_postcode_blacklist = ('SA', 'SD', 'SS')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' -').upper().strip()
if number.startswith('NL'):
number = number[2:]
return number
def validate(number):
"""Check if the number is in | the correct format. This currently does not
check whether the code corr | esponds to a real address."""
number = compact(number)
match = _postcode_re.search(number)
if not match:
raise InvalidFormat()
if match.group('pt2') in _postcode_blacklist:
raise InvalidComponent()
return '%s %s' % (match.group('pt1'), match.group('pt2'))
def is_valid(number):
"""Check if the number is a valid postal code."""
try:
return bool(validate(number))
except ValidationError:
return False
|
psnj/petl | petl/test/io/test_sources.py | Python | mit | 3,696 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import gzip
import bz2
import zipfile
from tempfile import NamedTemporaryFile
from petl.compat import PY2
from petl.test.helpers import ieq, eq_
import petl as etl
from petl.io.sources import MemorySource, PopenSource, ZipSource, \
StdoutSource, GzipSource, BZ2Source
def test_memorysource():
tbl1 = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
# test writing to a string buffer
ss = MemorySource()
etl.tocsv(tbl1, ss)
expect = "foo,bar\r\na,1\r\nb,2\r\nc,2\r\n"
if not PY2:
expect = expect.encode('ascii')
actual = ss.getvalue()
eq_(expect, actual)
# test reading from a string buffer
tbl2 = etl.fromcsv(MemorySource(actual))
ieq(tbl1, tbl2)
ieq(tbl1, tbl2)
# test appending
etl.appendcsv(tbl1, ss)
actual = ss.getvalue()
expect = "foo,bar\r\na,1\r\nb,2\r\nc,2\r\na,1\r\nb,2\r\nc,2\r\n" |
if not PY2:
expect = expect.encode('ascii')
eq_ | (expect, actual)
def test_memorysource_2():
data = 'foo,bar\r\na,1\r\nb,2\r\nc,2\r\n'
if not PY2:
data = data.encode('ascii')
actual = etl.fromcsv(MemorySource(data))
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
ieq(expect, actual)
ieq(expect, actual)
def test_popensource():
expect = (('foo', 'bar'),)
delimiter = ' '
actual = etl.fromcsv(PopenSource(r'echo foo bar',
shell=True),
delimiter=delimiter)
ieq(expect, actual)
def test_zipsource():
# setup
tbl = [('foo', 'bar'), ('a', '1'), ('b', '2')]
fn_tsv = NamedTemporaryFile().name
etl.totsv(tbl, fn_tsv)
fn_zip = NamedTemporaryFile().name
z = zipfile.ZipFile(fn_zip, mode='w')
z.write(fn_tsv, 'data.tsv')
z.close()
# test
actual = etl.fromtsv(ZipSource(fn_zip, 'data.tsv'))
ieq(tbl, actual)
def test_stdoutsource():
tbl = [('foo', 'bar'), ('a', 1), ('b', 2)]
etl.tocsv(tbl, StdoutSource(), encoding='ascii')
etl.tohtml(tbl, StdoutSource(), encoding='ascii')
etl.topickle(tbl, StdoutSource())
def test_stdoutsource_unicode():
tbl = [('foo', 'bar'),
(u'Արամ Խաչատրյան', 1),
(u'Johann Strauß', 2)]
etl.tocsv(tbl, StdoutSource(), encoding='utf-8')
etl.tohtml(tbl, StdoutSource(), encoding='utf-8')
etl.topickle(tbl, StdoutSource())
def test_gzipsource():
# setup
tbl = [('foo', 'bar'), ('a', '1'), ('b', '2')]
fn = NamedTemporaryFile().name + '.gz'
expect = b"foo,bar\na,1\nb,2\n"
# write explicit
etl.tocsv(tbl, GzipSource(fn), lineterminator='\n')
actual = gzip.open(fn).read()
eq_(expect, actual)
# write implicit
etl.tocsv(tbl, fn, lineterminator='\n')
actual = gzip.open(fn).read()
eq_(expect, actual)
# read explicit
tbl2 = etl.fromcsv(GzipSource(fn))
ieq(tbl, tbl2)
# read implicit
tbl2 = etl.fromcsv(fn)
ieq(tbl, tbl2)
def test_bzip2source():
# setup
tbl = [('foo', 'bar'), ('a', '1'), ('b', '2')]
fn = NamedTemporaryFile().name + '.bz2'
expect = b"foo,bar\na,1\nb,2\n"
# write explicit
etl.tocsv(tbl, BZ2Source(fn), lineterminator='\n')
actual = bz2.BZ2File(fn).read()
eq_(expect, actual)
# write implicit
etl.tocsv(tbl, fn, lineterminator='\n')
actual = bz2.BZ2File(fn).read()
eq_(expect, actual)
# read explicit
tbl2 = etl.fromcsv(BZ2Source(fn))
ieq(tbl, tbl2)
# read implicit
tbl2 = etl.fromcsv(fn)
ieq(tbl, tbl2)
|
Nano1994/freenos | site_scons/iso.py | Python | gpl-3.0 | 2,428 | 0.01771 | #
# Copyright (C) 2009 Niek Linnenbank
#
# This program is free softw | are: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free S | oftware Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import shutil
import tempfile
import version
import checksum
import linn
from build import *
from SCons.Script import *
from SCons.Action import *
#
# Generate an ISO 9660 image.
#
def generateISO(target, source, env):
# Open the list.
list = open(str(source[0]))
# Create a temporary directory.
temp = tempfile.mkdtemp()
# Read out which file to add.
for line in list.readlines():
# Copy them to the temporary directory.
copyWithParents(line.strip(), temp)
# Create an bootable ISO image.
os.system("mkisofs -R -b boot/grub/stage2_eltorito -no-emul-boot " +
" -boot-load-size 4 -boot-info-table -V 'FreeNOS " + version.currentRev + "'" +
" -o " + str(target[0]) + " " + temp)
# Done.
os.system("rm -rf " + temp)
list.close()
#
# Output a user friendly command.
#
def generateISOstr(target, source, env):
return " ISO " + str(target[0])
#
# Create the ISO builder.
#
isoBuilder = Builder(action = Action(generateISO, generateISOstr),
suffix = '.iso',
src_suffix = '.isodesc')
target.Append(BUILDERS = { 'ISO' : isoBuilder })
#
# Instructs to build an ISO and MD5+SHA1 checksums.
#
isoImage = target.ISO('#boot/boot.iso', ['#boot/boot.isodesc'])
isoImageMd5 = target.Checksum('#boot/boot.iso.md5', '#boot/boot.iso')
isoImageSha1 = target.Checksum('#boot/boot.iso.sha1', '#boot/boot.iso')
#
# Dependencies and target aliases.
#
target.AddPreAction(isoImage, linn.action)
target.Clean(isoImage, 'boot/boot.linn.gz')
Depends(isoImage, ['bin', 'lib', 'kernel', 'sbin', 'srv', 'etc', '#boot/boot.img'])
Alias('iso', [ isoImage, isoImageMd5, isoImageSha1 ])
AlwaysBuild(isoImage, isoImageMd5, isoImageSha1)
|
lesina/Hack70 | env/bin/player.py | Python | gpl-3.0 | 2,194 | 0 | #!/home/oleg/Web/Hack70/env/bin/python3
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# --------------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0 | ]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
| self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
KT26/PythonCourse | 3. Working with lists/17.py | Python | mit | 397 | 0.012594 | # Created by PyCharm Pro Edition
# User: Kaushik Talukdar |
# Date: 30-03-17
# Time: 11:35 PM
# tuple can't be modified but the variable holding a tuple can be assigned new values
# basically changing the tuple
cars = ["bmw", "rollsroyce", "audi", "ferrari"]
print(cars)
cars = ["bmw", "koenigsegg", "audi", "ferrari"]
print(ca | rs) |
oVirt/ovirt-node-dbus-backend | src/log.py | Python | gpl-3.0 | 2,690 | 0.000372 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# log.py - Copyright (C) 2015 Red Hat, Inc.
# Written by Ryan Barry <rbarry@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
import logging
import logging.config
"""
Logging for the oVirt Node Dbus Backend. Since we're running from
systemd, send default messages there and let journald handle it. Debug
goes in /tmp if we're running in debug mode.
"""
DEBUG_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s %(message)s'
}
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler'
},
'debug': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/tmp/ovirt-node-dbus.debug.log'
}
},
'loggers': {
'': {
'handlers': ['console', 'debug'],
'level': 'DEBUG',
'propagate': False
}
}
}
LOGGING = DEBUG_LOGGING.copy()
LOGGING.update({
'handlers': {
'console': {
'level | ': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
}
}
})
def configure_lo | gging(debug=False):
log_config = DEBUG_LOGGING if debug else LOGGING
logging.config.dictConfig(log_config)
def getLogger(name=None):
if not getLogger._logger:
if not logging.getLogger().handlers:
configure_logging()
getLogger._logger = logging.getLogger()
fullname = ".".join([getLogger._logger.name, name]) if name else name
return logging.getLogger(fullname)
getLogger._logger = None
|
dkruchinin/spdemo | walkers/bfs.py | Python | bsd-3-clause | 1,065 | 0 | from core.cell import CellStatus
from walkers.basic import BasicWalker
class BFSWalker(BasicWalker):
"" | "
Breadth first search algorithm.
"""
def __init__(self, graph, src_cell, dst_cell, use_diags):
super(BFSWalker, self).__init__(graph, src_cell, dst_cell, use_diags)
self._queue = [self._src_cell]
self._finished = False
def finished(self):
return self._finished
def step(self):
if len(self | ._queue) == 0:
self._finished = True
if self._finished:
return
while len(self._queue) > 0:
cur_cell = self._queue.pop(0)
cur_cell.status = CellStatus.Visited
if cur_cell == self._dst_cell:
self._finished = True
return
for c in cur_cell.neighbours(diagonals=self._use_diags):
if c.status == CellStatus.NotVisited:
c.status = CellStatus.Discovered
c.parent = cur_cell
self._queue.append(c)
break
|
inspyration/odoo | addons/crm/report/crm_phonecall_report.py | Python | agpl-3.0 | 3,856 | 0.007002 | # -*- coding: utf-8 -*-
################# | ############################ | #################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.addons.crm import crm
from openerp.osv import fields, osv
AVAILABLE_STATES = [
('draft', 'Draft'),
('open', 'Todo'),
('cancel', 'Cancelled'),
('done', 'Held'),
('pending', 'Pending')
]
class crm_phonecall_report(osv.osv):
""" Phone calls by user and team """
_name = "crm.phonecall.report"
_description = "Phone calls by user and team"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'team_id':fields.many2one('crm.team', 'Sales Team', oldname='section_id', readonly=True),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'nbr': fields.integer('# of Cases', readonly=True), # TDE FIXME master: rename into nbr_cases
'state': fields.selection(AVAILABLE_STATES, 'Status', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'duration': fields.float('Duration', digits=(16,2),readonly=True, group_operator="avg"),
'delay_open': fields.float('Delay to open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'categ_id': fields.many2one('crm.phonecall.category', 'Category'),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.datetime('Opening Date', readonly=True, select=True),
'date_closed': fields.datetime('Close Date', readonly=True, select=True),
}
def init(self, cr):
""" Phone Calls By User And Team
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_phonecall_report')
cr.execute("""
create or replace view crm_phonecall_report as (
select
id,
c.date_open as opening_date,
c.date_closed as date_closed,
c.state,
c.user_id,
c.team_id,
c.categ_id,
c.partner_id,
c.duration,
c.company_id,
c.priority,
1 as nbr,
c.create_date as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
from
crm_phonecall c
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
coolhacks/python-hacks | examples/pyHacks/TwistedChat.py | Python | mit | 1,269 | 0.034673 | from twisted.internet.protocol import Factory
from twisted.protocols.ba | sic import LineReceiver
from twisted.internet import reactor
class NonAnonChat(LineReceiver):
def __ | init__(self, protocols):
self.users = {'john':'john', 'adastra':'adastra'}
self.userName = None
self.userLogged = False
self.protocols = protocols
def connectionMade(self):
self.sendLine('Your Username: ')
def connectionLost(self, reason):
for protocol in self.protocols:
if protocol != self:
protocol.sendLine('Connection Lost: %s '%(reason))
def lineReceived(self, line):
if self.userName == None:
if self.users.has_key(line):
self.userName = line
self.sendLine('Password: ')
else:
self.sendLine('Wrong Username')
elif self.userLogged == False:
if self.users[self.userName] == line:
self.userLogged = True
self.protocols.append(self)
else:
self.sendLine('Wrong Password')
elif self.userLogged == True:
for protocol in self.protocols:
if protocol != self:
protocol.sendLine('%s Said: %s ' %(self.userName, line))
class NonAnonFactory(Factory):
def __init__(self):
self.protocols = []
def buildProtocol(self, addr):
return NonAnonChat(self.protocols)
reactor.listenTCP(8000, NonAnonFactory())
reactor.run()
|
andmos/ansible | lib/ansible/module_utils/docker/common.py | Python | gpl-3.0 | 37,096 | 0.002237 | #
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
from datetime import timedelta
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = st | r(exc)
| HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
cacert_path=dict(type='path', aliases=['tls_ca_cert']),
cert_path=dict(type='path', aliases=['tls_client_cert']),
key_path=dict(type='path', aliases=['tls_client_key']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
tls_verify=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY'])),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = [
['tls', 'tls_verify']
]
DOCKER_REQUIRED_TOGETHER = [
['cert_path', 'key_path']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No docker-py. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Checks whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules installed together as they use the same namespace and "
"cause a corrupt installation. Please uninstall both packages, and re-install only the docker-py or docker python "
"module. It is recommended to install the docker module if no support for Python 2.6 is required. "
"Please note that simply uninstalling one of the modules can leave the other module in a broken state.")
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = "Failed to import docker - %s. Try `pip install docker`"
else:
msg = "Failed to import docker or docker-py - %s. Try `pip install docker` or `pip install docker-py` (Python 2.6)"
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
if NEEDS_DOCKER_PY2:
if docker_version < LooseVersion('2.0'):
ms |
Mariaanisimova/pythonintask | INBa/2015/Ionova_A_K/task_2_8.py | Python | apache-2.0 | 579 | 0.047887 | # Задача 2 | . Вариант 8.
#Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Лао-Цзы. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Ionova A. K.
#30.04.2016
print("Нельзя обожествлять бесов.\n\t\t\t\t\t\t\t\tЛао-цзы")
input("Нажмите EN | TER для выхода.") |
ewiseblatt/spinnaker | testing/citest/tests/bake_and_deploy_test.py | Python | apache-2.0 | 30,181 | 0.004374 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# See testable_service/integration_test.py and spinnaker_testing/spinnaker.py
# for more details.
#
# This test will use ssh to peek at the spinnaker configuration
# to determine the managed project it should verify, and to determine
# the spinnaker account name to use when sending it commands.
# Sample Usage:
# Assuming you have created $PASSPHRASE_FILE (which you should chmod 400)
# and $CITEST_ROOT points to the root directory of this repository
# (which is . if you execute this from the root). The passphrase file
# can be ommited if you run ssh-agent and add .ssh/compute_google_engine.
#
# Since this test runs a pipeline from a Jenkins trigger, you need to
# configure Jenkins in the following way.
# 1. Take note of your Jenkins server baseUrl,
# i.e <protocol>://<host>[:port]/[basePath]
# and store it as $JENKINS_URL.
#
# 2. Create a file, fill it with
# <username> <password>
# corresponding to valid Jenkins credentials, and store its path
# as $JENKINS_AUTH_PATH (also chmod 400).
# Or, set JENKINS_USER and JENKINS_PASSWORD environment variables.
#
# 3. Take note of the Jenkins master you have configured in Igor,
# and store its name as $JENKINS_MASTER.
#
# 4. Choose a name for your jenkins job and store it in $JENKINS_JOB.
#
# 5. On your Jenkins server, navigate to /job/$JENKINS_JOB/configure
# a) Under "Build Triggers", check "Trigger builds remotely".
# b) In the "Authentication Token" field, write some token
# and store it as $JENKINS_TOKEN.
# c) Add a build step that produces a file.
# mkdir -p somedir
# touch somedir/vim_2:7.4.052-1ubuntu3_amd64.deb
# Note that this might need to be consistent with the
# platform the bakery is on. The above works on Ubuntu 14.04
# d) Add post build action to archive the artifacts
# fi | les to archive: somedir/vim_2:7.4.052-1ubuntu3_amd64.deb
#
#
# PYTHONPATH=$CITEST_ROOT/testing/cites | t \
# python $CITEST_ROOT/testing/citest/tests/bake_and_deploy_test.py \
# --gce_ssh_passphrase_file=$PASSPHRASE_FILE \
# --gce_project=$PROJECT \
# --gce_zone=$ZONE \
# --gce_instance=$INSTANCE \
# --jenkins_master=$JENKINS_MASTER \
# --jenkins_url=$JENKINS_URL \
# --jenkins_auth_path=$JENKINS_AUTH_PATH \
# --jenkins_job=$JENKINS_JOB \
# --jenkins_token=$JENKINS_TOKEN \
# --test_google \
# --test_aws
# or
# PYTHONPATH=$CITEST_ROOT/testing/citest \
# python $CITEST_ROOT/testing/citest/tests/bake_and_deploy_test.py \
# --native_hostname=host-running-smoke-test
# --managed_gce_project=$PROJECT \
# --test_gce_zone=$ZONE
# --jenkins_url=$JENKINS_URL \
# --jenkins_auth_path=$JENKINS_AUTH_PATH \
# --jenkins_job=$JENKINS_JOB \
# --jenkins_token=$JENKINS_TOKEN
# --test_google \
# --test_aws
# pylint: disable=bad-continuation
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# Standard python modules.
import logging
import os
import sys
import time
# citest modules.
import citest.base
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
ov_factory = jc.ObservationPredicateFactory()
class BakeAndDeployTestScenario(sk.SpinnakerTestScenario):
MINIMUM_PROJECT_QUOTA = {
'INSTANCE_TEMPLATES': 1,
'HEALTH_CHECKS': 1,
'FORWARDING_RULES': 1,
'IN_USE_ADDRESSES': 1,
'TARGET_POOLS': 1,
'IMAGES': 1,
}
MINIMUM_REGION_QUOTA = {
'CPUS': 1,
'IN_USE_ADDRESSES': 1,
'INSTANCE_GROUP_MANAGERS': 1,
'INSTANCES': 1,
}
@classmethod
def new_agent(cls, bindings):
return gate.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
super(BakeAndDeployTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--jenkins_master', default='',
help='The name of the jenkins master as configured in igor.'
' You may need to override this to an alias depending on firewalls.'
' The Spinnaker server may have permissions, but the citest machine'
' may not. Otherwise, this defaults to Spinnaker\'s binding.')
parser.add_argument(
'--jenkins_job', default='NoOpTrigger',
help='The name of the jenkins job to trigger off.'
' You will need to add this to your --jenkins_master.')
parser.add_argument(
'--jenkins_auth_path', default=None,
help='The path to a file containing the jenkins username password pair.'
'The contents should look like: <username> <password>.')
parser.add_argument(
'--jenkins_token', default='TRIGGER_TOKEN',
help='The authentication token for the jenkins build trigger.'
' This corresponds to the --jenkins_job on the --jenkins_url server')
parser.add_argument(
'--jenkins_url', default='',
help='The baseUrl of the jenkins service,'
' i.e. <protocol>://<host>[:port]/[basePath].'
' You may need to override this to an alias depending on firewalls.'
' The Spinnaker server may have permissions, but the citest machine'
' may not. Otherwise, this can be empty for Spinnaker\'s current'
' binding.')
parser.add_argument(
'--test_google', action='store_true',
help='Test Google pipelines.')
parser.add_argument(
'--test_aws', action='store_true',
help='Test AWS pipelines.')
def _do_init_bindings(self):
logger = logging.getLogger(__name__)
bindings = self.bindings
deployed = self.agent.deployed_config
yaml_node_path = 'services.jenkins.defaultMaster'
if not bindings.get('JENKINS_MASTER'):
bindings['JENKINS_MASTER'] = deployed[yaml_node_path + '.name']
logger.info('Infering JENKINS_MASTER %s', bindings['JENKINS_MASTER'])
if not bindings.get('JENKINS_URL'):
bindings['JENKINS_URL'] = deployed[yaml_node_path + '.baseUrl']
logger.info('Infering JENKINS_URL %s', bindings['JENKINS_URL'])
def __init__(self, bindings, agent=None):
super(BakeAndDeployTestScenario, self).__init__(bindings, agent)
self.logger = logging.getLogger(__name__)
bindings = self.bindings
# We'll call out the app name because it is widely used
# because it scopes the context of our activities.
self.TEST_APP = bindings['TEST_APP']
self.__short_lb_name = 'lb'
self.__full_lb_name = '{app}-{stack}-{detail}'.format(
app=self.TEST_APP, stack=bindings['TEST_STACK'],
detail=self.__short_lb_name)
self.aws_bake_pipeline_id = None
self.aws_destroy_pipeline_id = None
self.google_bake_pipeline_id = None
self.google_destroy_pipeline_id = None
self.__image_id_to_delete = None # Id of the baked image we need to clean up after the B & D pipelines run.
self.docker_pipeline_id = None
self.test_google = bindings['TEST_GOOGLE']
self.test_aws = bindings['TEST_AWS']
# This test has been exceeding the default timeout of 13 minutes for the Jenkins agent,
# so increase the timeout to 20 minutes.
|
longyangking/AlphaGomoku | alphagomoku/Config.py | Python | lgpl-2.1 | 401 | 0.082294 | from enum import Enum
# ChessBoard Info
ChessBoardHeight = 10
ChessBoardWidth = 10
# Role Info
Roles = {'Human': | 0,'Computer':1,'Sel | f_A':2,'Self_B':3, 'Eva_base':4, 'Eva_new':5}
# Chess Info
ChessInfo = {'Human':-1,'Computer':1,'Self_A':1,'Self_B':-1, 'Eva_base':1, 'Eva_new':-1}
# Victory Info
Victories = {'Human':-1,'Computer':1,"NoOneWin":0,'Self_A':1, 'Self_B':-1, 'Eva_base':1, 'Eva_new':-1} |
if1live/wireless-latency-benchmark | benchmark/udp_server.py | Python | mit | 877 | 0.009122 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import common
import SocketServer
import sys
class UDPEchoHandler(SocketServer.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
socket.sendto(data, self.clien | t_address)
print "{} wrote:".format(self.client_address[0]), data
def main():
| if not common.validate_server_argv():
return -1
HOST = "0.0.0.0"
server = SocketServer.UDPServer((HOST, common.SOCKET_PORT), UDPEchoHandler)
server.serve_forever()
if __name__ == '__main__':
main() |
jmread/cerebro | cerebro/DE.py | Python | gpl-3.0 | 5,119 | 0.017191 | from numpy import *
from Mapper import Mapper
from kNN import kNN
from MLP import MLPbp
from functions import sigmoid, linear
from RLS import RLS
def jump_size(e,grad_e,t):
return e * clip(grad_e + 1.,0.,1.)
def project(h,w_origin,jump=0.2,N=100):
'''
USE THIS ONE FOR MAD_0.py
'''
N_i = h.N_i
W = random.randn(N,N_i) * jump + w_origin
# *NEW* weight penalty
y = h.predict(W) # + 0.01*sum(abs(W),axis=1)
i = argmin(y)
return W[i,:]
def rwd(e, _e):
'''
2nd-order Reward Function
----------------------
Offer second order properties, e.g., the improvement of e, rather than raw e.
* if we improved, return 0 error (i.e., zero error for any improvement)
* if not, return the original error
'''
if e < _e:
return 0.
else:
return e
def rwd_basic(e, _e):
return e
class DE(Mapper):
'''
Density Estimator (Mapper)
--------------------------
'''
# A classifier to estimate the density
h = None
def __init__(self,N_d,N_o,threshold=5,f=None,f_rwd=rwd_basic):
#self.h = MLPpf(N_i=len(self.g.get_weight_vector()),N_h=20,N_o=1)
#self.h = RLS(N_i=len(self.g.get_weight_vector()),N_o=1)
self.h = kNN(N_i=N_d,N_o=N_o,k=5,n=100)
#self.h = MLPbp(N_i=N_d,N_h=5,N_o=N_o,f=sigmoid,fo=linear)
self.f = f # NON-LINEARITY (OPTIONAL)
#self.h = RLS(N_d,N_o)
self.learn_rate = 0.1
self.w = zeros(N_d) # random.randn(N_d) * 0.1 # the state (actually a vector of weights)
self.t = 0 # temperature
self.threshold = threshold # deperation threshold
self._y = -100. # old reward/error/target
self.f_rwd = f_rwd
self.burn_in = 5
def map(self, w):
# likelihood function! what observations do we expect given state w
# this map(self,w) function is used in particular by mad0.py
yp = self.h.predict(w.reshape(1,-1))
return yp
def step(self, (o,y)):
'''
The actor carried out y[t] = f(x[t];w[t]) which resulted in error e[t+1]
1. update critic x -> e
2. update actor x;w;e -> y
We get observation y,
and since at the moment the state isn't saved internally here, we can use it here as x
(x is not the same as the original inputs)
'''
#y = y + 0.01 * sqrt(dot(self.x.T,self.x))
################################################################################################
# 1. Update the critic's knowledge map (kNN)
| # (the current self.x (set of weights) produced the result 'y' (e.g., error) in the last round)
################################################################################################
if self.burn_in > 0 | :
# not enough points on map yet
self.h.update(self.w,y)
self.burn_in = self.burn_in - 1
return project(self.h,zeros(self.w.shape),1.,N=1)
yp = self.h.predict(self.w.reshape(1,-1))
delta = (y - yp)
y_ = yp + self.learn_rate * delta
#print "D", y, delta, y_
self.h.update(self.w,y_)
################################################################################################
# 2. Update the actor's knowledge, with queries to the critic
################################################################################################
e_jump = self.f_rwd(y,self._y)
#print "E", self._y, y, e_jump
self.t = self.t + 1
self.w = project(self.h,self.w,jump=e_jump,N=10)
self._y = y
return self.w
def summarize(X):
'''
Summarize
-----------
* a very import function, we want to summarize X into z
* i.e., what kind of activity was invoved in X?
'''
mu = mean(X,axis=0)
#dX = X[-1,:] - X[0,:]
#return dX
return mu
class DEr(DE):
'''
Reinforcement version of DE
'''
h0 = None
def __init__(self,N_d,N_o,N_x,threshold=5):
DE.__init__(self,N_d,N_o,threshold)
#self.h0 = kNN(N_i=N_x,N_o=N_o,k=5,n=100)
self.h0 = RLS(N_x,N_o)
self.emo = -2
def step(self, (X,e)):
################################################################################################
# 2. Update the OTHER map
# (if we just tested the null hypothesis)
################################################################################################
z = summarize(X)
if self.emo == -2:
print "SUMMARIZE", z, e
self.h0.update(z,e)
self.t = self.t + 1
if self.t > 50:
self.t = 0
self.emo = 0
return self.x
else:
# Adjust the error
e_adj = clip(e - self.h0.predict(z),0.,1.)
print "ERROR ADJ", e_adj ,"=", e , "-", self.h0.predict(z)
return DE.step(self,(None,e))
|
kyhau/reko | reko/rekognition.py | Python | mit | 4,130 | 0.001211 | import boto3
DEFAULTS = {
"region_name": "ap-southeast-2" # Sydney
}
def status_code(ret):
return ret["ResponseMetadata"]["HTTPStatusCode"]
class Rekognition():
def __init__(self, profile, region=DEFAULTS["region_name"]):
self.profile = profile
self.region_name = region
self.client = self.get_client()
def get_client(self):
session = boto3.Session(profile_name=self.profile)
client = session.client("rekognition", region_name=self.region_name)
return client
def list_collections(self):
ret = self.client.list_collections()
if status_code(ret) == 200:
print("list_collections: {}".format(ret["CollectionIds"]))
return ret["CollectionIds"]
print(status_code(ret))
return []
def collection_exist(self, collection_id):
return collection_id in self.list_collections()
def list_faces(self, collection_id):
ret = self.client.list_faces(CollectionId=collection_id)
if status_code(ret) == 200:
for face in ret["Faces"]:
print("FaceId: {}".format(face["FaceId"]))
print("ImageId: {}".format(face["ImageId"]))
print("ExternalImageId: {}".format(face["ExternalImageId"]))
p | rint("Confidence: {}".format(face["Confidence"]))
return ret["Faces"]
print(status_code(ret))
return []
def create_collection(self, collection_id):
try:
ret = self.client.create_collection(CollectionId=collection_id)
print(ret)
| return True
except Exception as e:
print(e)
return False
def delete_collection(self, collection_id):
try:
ret = self.client.delete_collection(CollectionId=collection_id)
print(ret)
return True
except Exception as e:
print(e)
return False
def index_faces(self, image_file, external_image_id, collection_id):
with open(image_file, "rb") as image:
ret = self.client.index_faces(
CollectionId=collection_id,
Image={"Bytes": image.read()},
ExternalImageId=external_image_id
)
if status_code(ret) == 200:
for rec in ret["FaceRecords"]:
face = rec["Face"]
print("FaceId: {}".format(face["FaceId"]))
print("ImageId: {}".format(face["ImageId"]))
print("ExternalImageId: {}".format(face["ExternalImageId"]))
print("Confidence: {}".format(face["Confidence"]))
return True
print("Unexpected status code: {}".format(status_code(ret)))
return False
def search_faces_by_image(self, image_file, external_image_id, collection_id):
"""
:param image_file:
:param external_image_id:
:param collection_id:
:return: ExternalImageId if find a match; None if unable to find a match
"""
id = None
similarity = 0.0
with open(image_file, "rb") as image:
print("Searching faces ...")
ret = self.client.search_faces_by_image(
CollectionId=collection_id,
Image={"Bytes": image.read()},
)
print(ret)
if status_code(ret) == 200:
for rec in ret["FaceMatches"]:
if external_image_id is not None and rec["Face"]["ExternalImageId"] != external_image_id:
continue
if rec["Similarity"] > similarity:
id = rec["Face"]["ExternalImageId"]
print("Similarity: {}".format(rec["Similarity"]))
print("FaceId: {}".format(rec["Face"]["FaceId"]))
print("ImageId: {}".format(rec["Face"]["ImageId"]))
print("ExternalImageId: {}".format(rec["Face"]["ExternalImageId"]))
print("Confidence: {}".format(rec["Face"]["Confidence"]))
return id
|
tokibito/django-edamame | example/note/models.py | Python | mit | 186 | 0 | from django.db import models
class Note(models.Model):
title | = models.CharField(max_length=100)
text | = models.TextField()
created = models.DateTimeField(auto_now_add=True)
|
DarthMaulware/EquationGroupLeaks | Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/SCRIPTS/fw_wrapper/tunnel.py | Python | unlicense | 32,118 | 0.026807 |
import bananaglee
import tools
import shelve
import fw_logging
import os
import sys
import cmd
import subprocess
import shlex
import ConfigParser
import logging
class Tunnel(cmd.Cmd):
def __init__(self,sfile,logger):
cmd.Cmd.__init__(self)
self.prompt = 'Tunnel>>'
#shelve file
self.sfile = sfile
#set up logging
name = 'tunnel'
self.logger = logger
#Parse config written by finder
self.config = ConfigParser.ConfigParser()
self.config.read(os.path.join(self.sfile['script_dir'],'tools.cfg'))
self.sfile['miniprog'] = self.config.get('bananaglee'+self.sfile['version'],'miniprog')
self.sfile['miniprog_dir'], tmp = os.path.split(self.sfile['miniprog'])
if self.sfile.has_key('tunnel') == True:
if self.sfile['tunnel'] == True:
pass
else:
self.resetRule()
else:
self.resetRule()
if self.sfile.has_key('current_rule') == False:
self.sfile['current_rule'] = {}
self.logger.debug('did not find a rule that the user had previous')
else:
self.logger.info('Found a rule with the current settings')
for i in self.sfile['current_rule']:
self.logger.info(str(i) + ' : ' + str(self.sfile['current_rule'][i]))
if self.sfile.has_key('persistent_rules') == False:
self.sfile['persistent_rules'] = {}
else:
self.logger.debug('current persistent rules are: ' + str(self.sfile['persistent_rules']))
try:
os.chdir(self.sfile['miniprog_dir'])
except:
self.logger.exception('could to change dirs to the miniprog directory')
#Pull out dict for tunnel
self.logger.info('Starting tunnel module')
self.logger.debug('LP IP: ' + str(self.sfile['lp']))
self.logger.debug('Implant IP: ' + str(self.sfile['implant']))
self.logger.debug('IDKey : ' + str(self.sfile['idkey']))
self.logger.debug('Source Port: ' + str(self.sfile['sport']))
self.logger.debug('Destination Port: ' + str(self.sfile['dport']))
self.logger.debug('Log directory: ' + str(self.sfile['logs_to_process']))
self.logRule()
self.do_show_rule(' ')
if self.sfile.has_key('rules') == False:
self.sfile['rules'] = []
if self.sfile['auto'] == True:
if self.sfile['auto_start'] == True:
if self.sfile['tunnels_dict'] == {}:
self.logger.error('you specified --auto but I could not find your rules')
else:
tunnels = self.sfile['tunnels_dict']
self.logger.debug(tunnels)
keys = tunnels.keys()
for i in keys:
self.sfile['current_rule'] = tunnels[i]
self.logger.debug(tunnels[i])
self.do_upload_rules(' ')
tools.resetAuto(self.sfile,self.logger)
sys.exit()
if self.sfile['auto_end'] == True:
self.do_get_rules(' ')
self.logger.info(self.sfile['rules'])
for i in self.sfile['rules']:
self.do_remove_rule(i)
self.do_get_rules(' ')
bananaglee_mod = bananaglee.BananaGlee(self.sfile, self.logger)
bananaglee_mod.cmdloop()
#used to not run last command when nothing is given
def emptyline(self):
pass
def printStart(self):
if self.sfile['mode'] == 'simple':
self.logger.info(' ------------------Attacker------------------')
self.logger.info(' | ^')
self.logger.info(' v |')
self.logger.info(' Attacker to Firewall Packet Firewall to Attacker Packet')
self.logger.info(' Source IP : attk_source Source IP : attk_dest')
self.logger.info(' Dest IP : attk_dest Dest IP : attk_source')
self.logger.info(' Source Port: attk_sport Source Port: attk_dport')
self.logger.info(' Dest Port: attk_dport Dest Port: attk_sport')
self.logger.info(' | ^')
self.logger.info(' v Iface Num: attk_int |')
self.logger.info(' -------------------------Firewall-------------------------')
self.logger.info(' | Iface Num: tgt_int ^')
self.logger.info(' v |')
self.logger.info(' Firewall to Target Packet Target to Firewall Packet')
self.logger.info(' Source IP : tgt_source Source IP : tgt_dest')
self.logger.info(' Dest IP : tgt_dest Dest IP : tgt_source')
self.logger.info(' Source Port: tgt_sport Source Port: tgt_dport')
self.logger.info(' Dest Port: tgt_dport Dest Port: tgt_sport')
self.logger.info(' | ^')
self.logger.info(' v |')
self.logger.info(' -------------------Target-------------------')
elif self.sfile['mode'] == 'advanced': #have not implemented advance yet
self.logger.info(' ------------------Attacker------------------')
self.logger.info(' | ^')
self.logger.info(' v |')
self.logger.info(' Attacker to Firewall Packet Firewall to Attacker Packet')
self.logger.info(' Source IP : attk_source Source IP : rtn_attk_src')
self.logger.info(' Dest IP : attk_dest Dest IP : rtn_attk_dest')
self.logger.info(' Source Port: attk_sport Source Port: rtn_attk_sport')
self.logger.info(' Dest Port: attk_dport Dest Port: rtn_attk_dport')
self.logger.info(' | ^')
self.logger.info(' v Iface | Num: attk_int |')
self.logger.info(' -------------------------Firewall-------------------------')
self.logger.info(' | Iface Num: tgt_int ^')
| self.logger.info(' v |')
self.logger.info(' Firewall to Target Packet Target to Firewall Packet')
self.logger.info(' Source IP : tgt_source Source IP : rtn_tgt_source')
self.logger.info(' Dest IP : tgt_dest Dest IP : rtn_tgt_dest')
self.logger.info(' Source Port: tgt_sport Source Port: rtn_tgt_sport')
self.logger.info(' Dest Port: tgt_dport Dest Port: rtn_tgt_dport')
self.logger.info(' | ^')
self.logger.info(' v |')
self.logger.info(' -------------------Target-------------------')
def resetRule(self):
self.logger.debug('resetig the dict for current rule')
rule = {'attk_source' : '',
'attk_dest' : '',
'attk_sport' : '0',
'attk_dport' : '0',
'tgt_source' : '',
'tgt_dest' : '',
'tgt_sport' : '0',
'tgt_dport' : '0',
'attk_int' : '',
'tgt_int' : '' }
self.sfile['current_rule'] = rule
self.logger.debug(self.sfile['current_rule'])
#move to tools
def opsec_check(self):
pass
def logRule(self):
self.logger.debug('current rule information')
self.logger.debug('attackers source: ' + |
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/trial/itrial.py | Python | gpl-3.0 | 6,853 | 0.003502 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interfaces for Trial.
Maintainer: Jonathan Lange
"""
from __future__ import division, absolute_import
import zope.interface as zi
from zope.interface import Attribute
class ITestCase(zi.Interface):
"""
The interface that a test case must implement in order to be used in Trial.
"""
failureException = zi.Attribute(
"The exception class that is raised by failed assertions")
def __call__(result):
"""
Run the test. Should always do exactly the same thing as run().
"""
def countTestCases():
"""
Return the number of tests in this test case. Usually 1.
"""
def id():
"""
Return a unique identifier for the test, usually the fully-qualified
Python name.
"""
def run(result):
"""
Run the test, storing the results in C{result}.
@param result: A L{TestResult}.
"""
def shortDescription():
"""
Return a short description of the test.
"""
class IReporter(zi.Interface):
"""
I report results from a run of a test suite.
"""
stream = zi.Attribute(
"Deprecated in Twisted 8.0. "
"The io-stream that this reporter will write to")
tbformat = zi.Attribute("Either 'default', 'brief', or 'verbose'")
args = zi.Attribute(
"Additional string argument passed from the command line")
shouldStop = zi.Attribute(
"""
A boolean indicating that this reporter would like the test run to stop.
""")
separator = Attribute(
"Deprecated in Twisted 8.0. "
"A value which will occasionally be passed to the L{write} method.")
testsRun = Attribute(
"""
The number of tests that seem to have been run according to this
reporter.
""")
def startTest(method):
"""
Report the beginning of a run of a single test method.
@param method: an object that is adaptable to ITestMethod
"""
def stopTest(method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
def startSuite(name):
"""
Deprecated in Twisted 8.0.
Suites which wish to appear in reporter output should call this
before running their tests.
"""
def endSuite(name):
"""
Deprecated in Twisted 8.0.
Called at the end of a suite, if and only if that suite has called
C{startSuite}.
"""
def cleanupErrors(errs):
"""
Deprecated in Twisted 8.0.
Called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
def upDownError(userMeth, warn=True, printStatus=True):
"""
Deprecated in Twisted 8.0.
Called when an error occurs in a setUp* or tearDown* method
@param warn: indicates whether or not the reporter should emit a
warning about the error
@type warn: Boolean
@param printStatus: indicates whether or not the reporter should
print the name of the method and the status
message appropriate for the type of error
@type printStatus: Boolean
"""
def addSuccess(test):
"""
Record that test passed.
"""
def addError(test, error):
"""
Record that a test has raised an unexpected exception.
@param test: The test that has raised an error.
@param error: The error that the test raised. It will either be a
three-tuple in the style of C{sys.exc_info()} or a
L{Failure<twisted.python.failure.Failure>} object.
"""
def addFailure(test, failure):
"""
Record that a test has failed with the given failure.
@param test: The test that has failed.
@param failure: The failure that the test failed with. It will
either be a three-tuple in the style of C{sys.exc_info()}
or a L{Failure<twisted.python.failure.Failure>} object.
"""
def addExpectedFailure(test, failure, todo=None):
"""
Record that the given test failed, and was expected to do so.
In Twisted 15.5 and prior, C{todo} was a mandatory parameter.
@type test: L{pyunit.TestCase}
@param test: The test which this is about.
@type error: L{failure.Failure}
@param error: The error which this test failed with.
@type todo: L{unittest.Todo}
@param todo: The reason for the test's TODO status. If C{None}, a
generic reason is used.
"""
def addUnexpectedSuccess(test, todo=None):
"""
Record that the given test failed, and was expected to do so.
In Twisted 15.5 and prior, C{todo} was a mandatory parameter.
@type test: L{pyunit.TestCase}
@param test: The test which this is about.
@type todo: L{unittest.Todo}
@param todo: The reason for the test's TODO status. If C{None}, a
generic reason is used.
"""
def addSkip(test, reason):
"""
Record that a test has been skipped for the given reason.
@param test: The test that has been skipped.
@param reason: An object that the test case has specified as the reason
for skipping the test.
"""
def printSummary():
"""
Deprecated in Twisted 8.0, use L{done} instead.
Present a summary of the test results.
" | ""
def printErrors():
"""
Deprecated in Twisted 8.0, use L{done} instead.
Present the errors that have occ | urred during the test run. This method
will be called after all tests have been run.
"""
def write(string):
"""
Deprecated in Twisted 8.0, use L{done} instead.
Display a string to the user, without appending a new line.
"""
def writeln(string):
"""
Deprecated in Twisted 8.0, use L{done} instead.
Display a string to the user, appending a new line.
"""
def wasSuccessful():
"""
Return a boolean indicating whether all test results that were reported
to this reporter were successful or not.
"""
def done():
"""
Called when the test run is complete.
This gives the result object an opportunity to display a summary of
information to the user. Once you have called C{done} on an
L{IReporter} object, you should assume that the L{IReporter} object is
no longer usable.
"""
|
googleapis/python-dlp | samples/generated_samples/dlp_v2_generated_dlp_service_list_stored_info_types_sync.py | Python | apache-2.0 | 1,491 | 0.000671 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License | for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListStoredInfoTypes
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modificati | ons to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dlp
# [START dlp_v2_generated_DlpService_ListStoredInfoTypes_sync]
from google.cloud import dlp_v2
def sample_list_stored_info_types():
# Create a client
client = dlp_v2.DlpServiceClient()
# Initialize request argument(s)
request = dlp_v2.ListStoredInfoTypesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_stored_info_types(request=request)
# Handle the response
for response in page_result:
print(response)
# [END dlp_v2_generated_DlpService_ListStoredInfoTypes_sync]
|
takara9/watson_chatbot | weather_report/load3_db_weather.py | Python | epl-1.0 | 1,717 | 0.00975 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Cloudant のデータベース作成
#
# Copyright (C) 2016 International Business Machines Corporation
# and others. All Rights Reserved.
#
#
# https://github.com/cloudant/python-cloudant
# pip install cloudant
#
# API Document
# http://python-cloudant.readthedocs.io/en/latest/cloudant.html
#
import json
import csv
import codecs
import uuid
from cloudant.client import Cloudant
from | cloudant.result import Result, ResultByKey, QueryResult
from cloudant.query import Query
#import cloudant
# Cloudant認証情報の取得
f = open('cloudant_credentia | ls_id.json', 'r')
cred = json.load(f)
f.close()
print cred
# データベース名の取得
f = open('database_name.json', 'r')
dbn = json.load(f)
f.close()
print dbn['name']
client = Cloudant(cred['credentials']['username'],
cred['credentials']['password'],
url=cred['credentials']['url'])
# Connect to the server
client.connect()
# DB選択
db = client[dbn['name']]
# CSVを読んでループを回す
fn = 'weather_code.csv';
reader = csv.reader(codecs.open(fn),delimiter=',',quoting=csv.QUOTE_NONE)
for id, e_description,j_description in reader:
print id, e_description,j_description
data = {
"_id": id,
"e_description": e_description,
"j_description": j_description
}
print data
rx = db.create_document(data)
if rx.exists():
print "SUCCESS!!"
# Disconnect from the server
client.disconnect()
|
Placeware/ThisPlace | thisplace.py | Python | mit | 8,967 | 0.003903 | """
thisplace: Human-readable addresses for every 3x3m square on the earth's surface.
The simplest way to use this module are the `four_words` and `decode`
functions. For more see `WordHasher`.
"""
import random
import geohash
def get_words(fname):
lines = open(fname)
words = []
for word in lines:
words.append(word.strip())
lines.close()
random.seed(634634)
random.shuffle(words)
words = words[:2**15]
assert len(words) == len(set(words))
return words
# These read like alien races from a sci-fi book
GOOGLE_WORDLIST = get_words("words/google-ngram-list")
# | shorter list with only 4096 words
GOOGLE_4096WORDS = get_words("words/google-ngram-list-4096")
# current best list for the three word hash
WORDNET_LEMMAS = get_words("words/wordnet-list")
# Human friendly word list, taken directly from humanhash project
# these are the best words but there are not enough of
# them so we only use them for the six word hash
HUMAN_WORDLIST = (
'ack', 'alabama', 'alanine', 'alaska', 'alpha', 'angel', 'apart', 'april',
'arizona', 'arkansas', 'a | rtist', 'asparagus', 'aspen', 'august', 'autumn',
'avocado', 'bacon', 'bakerloo', 'batman', 'beer', 'berlin', 'beryllium',
'black', 'blossom', 'blue', 'bluebird', 'bravo', 'bulldog', 'burger',
'butter', 'california', 'carbon', 'cardinal', 'carolina', 'carpet', 'cat',
'ceiling', 'charlie', 'chicken', 'coffee', 'cola', 'cold', 'colorado',
'comet', 'connecticut', 'crazy', 'cup', 'dakota', 'december', 'delaware',
'delta', 'diet', 'don', 'double', 'early', 'earth', 'east', 'echo',
'edward', 'eight', 'eighteen', 'eleven', 'emma', 'enemy', 'equal',
'failed', 'fanta', 'fifteen', 'fillet', 'finch', 'fish', 'five', 'fix',
'floor', 'florida', 'football', 'four', 'fourteen', 'foxtrot', 'freddie',
'friend', 'fruit', 'gee', 'georgia', 'glucose', 'golf', 'green', 'grey',
'hamper', 'happy', 'harry', 'hawaii', 'helium', 'high', 'hot', 'hotel',
'hydrogen', 'idaho', 'illinois', 'india', 'indigo', 'ink', 'iowa',
'island', 'item', 'jersey', 'jig', 'johnny', 'juliet', 'july', 'jupiter',
'kansas', 'kentucky', 'kilo', 'king', 'kitten', 'lactose', 'lake', 'lamp',
'lemon', 'leopard', 'lima', 'lion', 'lithium', 'london', 'louisiana',
'low', 'magazine', 'magnesium', 'maine', 'mango', 'march', 'mars',
'maryland', 'massachusetts', 'may', 'mexico', 'michigan', 'mike',
'minnesota', 'mirror', 'mississippi', 'missouri', 'mobile', 'mockingbird',
'monkey', 'montana', 'moon', 'mountain', 'muppet', 'music', 'nebraska',
'neptune', 'network', 'nevada', 'nine', 'nineteen', 'nitrogen', 'north',
'november', 'nuts', 'october', 'ohio', 'oklahoma', 'one', 'orange',
'oranges', 'oregon', 'oscar', 'oven', 'oxygen', 'papa', 'paris', 'pasta',
'pennsylvania', 'pip', 'pizza', 'pluto', 'potato', 'princess', 'purple',
'quebec', 'queen', 'quiet', 'red', 'river', 'robert', 'robin', 'romeo',
'rugby', 'sad', 'salami', 'saturn', 'september', 'seven', 'seventeen',
'shade', 'sierra', 'single', 'sink', 'six', 'sixteen', 'skylark', 'snake',
'social', 'sodium', 'solar', 'south', 'spaghetti', 'speaker', 'spring',
'stairway', 'steak', 'stream', 'summer', 'sweet', 'table', 'tango', 'ten',
'tennessee', 'tennis', 'texas', 'thirteen', 'three', 'timing', 'triple',
'twelve', 'twenty', 'two', 'uncle', 'undress', 'uniform', 'uranus', 'utah',
'vegan', 'venus', 'vermont', 'victor', 'video', 'violet', 'virginia',
'washington', 'west', 'whiskey', 'white', 'william', 'winner', 'winter',
'wisconsin', 'wolfram', 'wyoming', 'xray', 'yankee', 'yellow', 'zebra',
'zulu')
class WordHasher(object):
def __init__(self):
"""Convert latitude and longitudes into human readable strings."""
self._symbols = "0123456789bcdefghjkmnpqrstuvwxyz"
self._decode_symbols = dict((ch, i) for (i, ch) in enumerate(self._symbols))
self._encode_symbols = dict((i, ch) for (i, ch) in enumerate(self._symbols))
self.six_wordlist = HUMAN_WORDLIST
self.four_wordlist = GOOGLE_4096WORDS
self.three_wordlist = GOOGLE_WORDLIST
def three_words(self, lat_long):
"""Convert coordinate to a combination of three words
The coordinate is defined by latitude and longitude
in degrees.
"""
lat, lon = lat_long
gh = geohash.encode(lat, lon, 9)
words = "-".join(self.three_wordlist[p] for p in self.to_rugbits(self.geo_to_int(gh)))
return words
def four_words(self, lat_long):
"""Convert coordinate to a combination of four words
The coordinate is defined by latitude and longitude
in degrees.
"""
lat, lon = lat_long
gh = geohash.encode(lat, lon, 9)
words = "-".join(self.four_wordlist[p] for p in self.to_quads(self.pad(gh)))
return words
def six_words(self, lat_long):
"""Convert coordinate to a combination of six words
The coordinate is defined by latitude and longitude
in degrees.
With six words the word list contains only words
which are short, easy to pronounce and easy distinguish.
"""
lat, lon = lat_long
gh = geohash.encode(lat, lon, 9)
words = "-".join(self.six_wordlist[p] for p in self.to_bytes(self.pad(gh)))
return words
def decode(self, words):
"""Decode `words` to latitude and longitude"""
words = words.split("-")
if len(words) == 3:
i = self.rugbits_to_int([self.three_wordlist.index(w) for w in words])
elif len(words) == 4:
i = self.quads_to_int([self.four_wordlist.index(w) for w in words])
i = self.unpad(i)
elif len(words) == 6:
i = self.bytes_to_int([self.six_wordlist.index(w) for w in words])
i = self.unpad(i)
else:
raise RuntimeError("Do not know how to decode a set of %i words."%(len(words)))
geo_hash = self.int_to_geo(i)
return geohash.decode(geo_hash)
def geo_to_int(self, geo_hash):
"""Decode `geo_hash` to an integer"""
base = len(self._symbols)
number = 0
for symbol in geo_hash:
number = number*base + self._decode_symbols[symbol]
return number
def int_to_geo(self, integer):
"""Encode `integer` to a geo hash"""
base = len(self._symbols)
symbols = []
while integer > 0:
remainder = integer % base
integer //= base
symbols.append(self._encode_symbols[remainder])
return ''.join(reversed(symbols))
def pad(self, geo_hash):
"""Pad nine character `geo_hash` to 48bit integer"""
assert len(geo_hash) == 9
return self.geo_to_int(geo_hash) * 8
def unpad(self, integer):
"""Remove 3bit of padding to get 45bit geo hash"""
return integer>>3
def to_bytes(self, integer):
"""Convert a 48bit `integer` to a list of 6bytes"""
bytes = [integer & 0b11111111]
for n in range(1,6):
div = 2**(n*8)
bytes.append((integer//div) & 0b11111111)
bytes.reverse()
return bytes
def bytes_to_int(self, bytes):
"""Convert a list of 6`bytes` to an integer"""
assert len(bytes) == 6
N = 0
bytes.reverse()
for n,b in enumerate(bytes):
N += b * (2**(8*(n)))
return N
def to_quads(self, integer):
"""Convert a 48bit `integer` to a list of 4 quads"""
quads = [integer & 0b111111111111]
for n in range(1,4):
div = 2**(n*12)
quads.append((integer//div) & 0b111111111111)
quads.reverse()
return quads
def quads_to_int(self, quads):
"""Convert a list of four 12bit values to an integer"""
assert len(quads) == 4
N = 0
quads.reverse()
for n,b in enumerate(quads):
N += b * (2**(12*(n)))
|
HoangNguyenHuy/SocialNetwork | src/SocialNetwork_API/models/timestamped.py | Python | mit | 262 | 0.007634 | from django.db import models
from django.util | s import timezone
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(default=timezone.now)
updated_at | = models.DateTimeField(default=timezone.now)
class Meta:
abstract = True |
bacher09/xrcon | xrcon/client.py | Python | lgpl-3.0 | 6,417 | 0 | import socket
import time
from functools import wraps
import six
from .utils import (
rcon_nosecure_packet,
rcon_secure_time_packet,
rcon_secure_challenge_packet,
parse_challenge_response,
parse_rcon_response,
parse_server_addr,
parse_status_packet,
Player,
CHALLENGE_PACKET,
CHALLENGE_RESPONSE_HEADER,
RCON_RESPONSE_HEADER,
PING_Q2_PACKET,
PONG_Q2_PACKET,
PING_Q3_PACKET,
PONG_Q3_PACKET,
QUAKE_STATUS_PACKET,
STATUS_RESPONSE_HEADER,
MAX_PACKET_SIZE
)
class NotConnected(Exception):
pass
def connection_required(fun):
@wraps(fun)
def wrapper(self, *args, **kwargs):
if self.sock is None:
raise NotConnected("You should call connect first")
return fun(self, *args, **kwargs)
return wrapper
class QuakeProtocol(object):
CHALLENGE_TIMEOUT = 3
player_factory = Player.parse_player
def __init__(self, host, port, timeout=0.7):
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
def connect(self):
"Create connection to server"
family, stype, proto, cname, sockaddr = self.best_connection_params(
self.host, self.port)
self.sock = socket.socket(family, stype)
self.sock.settimeout(self.timeout)
self.sock.connect(sockaddr)
@connection_required
def close(self):
"Close connection"
self.sock.close()
self.sock = None
@connection_required
def read_iterator(self, timeout=3):
timeout_time = time.time() + timeout
while time.time() < timeout_time:
yield self.sock.recv(MAX_PACKET_SIZE)
raise socket.timeout("Read timeout")
@staticmethod
def best_connection_params(host, port):
params = socket.getaddrinfo(host, port, 0, socket.SOCK_DGRAM)
for data in params:
if data[0] == socket.AF_INET:
return data
if len(params) > 0:
return params[0]
@connection_required
def getchallenge(self):
"Return server challenge"
self.sock.send(CHALLENGE_PACKET)
# wait challenge response
for packet in self.read_iterator(self.CHALLENGE_TIMEOUT):
if packet.startswith(CHALLENGE_RESPONSE_HEADER):
return parse_challenge_response(packet)
@connection_required
def getstatus_packet(self):
self.sock.send(QUAKE_STATUS_PACKET)
# wait challenge response
for packet in self.read_iterator(self.CHALLENGE_TIMEOUT):
if packet.startswith(STATUS_RESPONSE_HEADER):
return packet
def getstatus(self):
packet = self.getstatus_packet()
if packet is None:
return None
return parse_status_packet(packet, self.player_factory)
def _ping(self, ping_packet, pong_packet, timeout=1):
self.sock.send(ping_packet)
# wait pong packet
start = time.time()
try:
for packet in self.read_iterator(timeout):
if packet == pong_packet:
return time.time() - start
except socket.timeout:
return None
@connection_required
def ping2(self, timeout=1):
return self._ping(PING_Q2_PACKET, PONG_Q2_PACKET, timeout)
@connection_required
def ping3(self, timeout=1):
return self._ping(PING_Q3_PACKET, PONG_Q3_PACKET, timeout)
@classmethod
def create_by_server_str(cls, server_str, *args, **kwargs):
host, port = parse_server_addr(server_str)
return cls(host, port, *args, **kwargs)
class XRcon(QuakeProtocol):
RCON_NOSECURE = 0
"Old quake rcon connection"
RCON_SECURE_TIME = 1
"secure rcon with time based sign"
RCON_SECURE_CHALLENGE = 2
"secure rcon with challenge based sign"
RCON_TYPES = frozenset([
RCON_NOSECURE, RCON_SECURE_TIME, RCON_SECURE_CHALLENGE
])
_secure_rcon = RCON_SECURE_TIME
def __init__(self, host, port, password, secure_rcon=RCON_SECURE_TIME,
timeout=0.7):
""" host --- ip address or domain of server
port --- udp port of server
password --- rcon password
secure_rcon --- type of rcon connection, default secure rcon, use 0
for old quake servers
timeout --- socket timeout
"""
super(XRcon, self).__init__(host, port, timeout)
self.password = password
self.secure_rcon = secure_rcon
@property
def secure_rcon(self):
"Type of rcon connection"
return self._secure_rcon
@secure_rcon.setter
def secure_rcon(self, value):
if value not in self.RCON_TYPES:
raise ValueError("Bad value of secure_rcon")
self._secure_rcon = value
@connection_required
def send(self, command):
"Send rcon command to server"
if self.secure_rcon == self.RCON_NOSECURE:
self.sock.send(rcon_nosecure_packet(self.password, command))
elif self.secure_rcon = | = self.RCON_SECURE_TIME:
self.soc | k.send(rcon_secure_time_packet(self.password, command))
elif self.secure_rcon == self.RCON_SECURE_CHALLENGE:
challenge = self.getchallenge()
self.sock.send(rcon_secure_challenge_packet(self.password,
challenge, command))
else:
raise ValueError("Bad value of secure_rcon")
@connection_required
def read_once(self, timeout=2):
for packet in self.read_iterator(timeout):
if packet.startswith(RCON_RESPONSE_HEADER):
return parse_rcon_response(packet)
@connection_required
def read_untill(self, timeout=1):
data = []
try:
for packet in self.read_iterator(timeout):
if packet.startswith(RCON_RESPONSE_HEADER):
data.append(parse_rcon_response(packet))
except socket.timeout:
pass
if data:
return six.b('').join(data)
@connection_required
def execute(self, command, timeout=1):
"""Execute rcon command on server and fetch result
Args:
command --- executed command
timeout --- read timeout
Returns: bytes response
"""
self.send(command)
return self.read_untill(timeout)
|
astrofrog/ginga | ginga/gtkw/plugins/Catalogs.py | Python | bsd-3-clause | 31,943 | 0.003131 | #
# Catalogs.py -- Catalogs plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Bunch, Future
from ginga.gtkw import FitsImageCanvasTypesGtk as CanvasTypes
from ginga.gtkw import ColorBar
from ginga import GingaPlugin
from ginga import cmap, imap
from ginga import wcs
import gobject
import gtk
import pango
from ginga.gtkw import GtkHelp
class Catalogs(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
super(Catalogs, self).__init__(fv, fitsimage)
self.mycolor = 'skyblue'
self.color_cursor = 'red'
self.limit_stars_to_area = False
self.use_dss_channel = False
self.plot_max = 500
self.plot_limit = 100
self.plot_start = 0
# star list
self.starlist = []
# catalog listing
self.table = None
canvas = CanvasTypes.DrawingCanvas()
canvas.enable_draw(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('button-release', self.btnup)
canvas.set_callback('draw-event', self.getarea)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
self.layertag = 'catalog-canvas'
self.areatag = None
self.curstar = None
self.image_server_options = []
self.image_server_params = None
self.catalog_server_options = []
| self.catalog_server_params = None
self.tooltips = self.fv.w.tooltips
def build_gui(self, container, future=None):
vbox1 = gtk.VBox()
self.msgFont = pango.FontDescription("Sans 12")
tw = gtk.TextView()
tw.set_wrap_mode(gtk.WRAP_WORD)
tw.set_left_margin(4)
tw.set_right_margin(4)
tw.set_editable(False)
tw.set_left_margin(4)
tw.set_rig | ht_margin(4)
tw.modify_font(self.msgFont)
self.tw = tw
fr = gtk.Frame(" Instructions ")
fr.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
fr.set_label_align(0.1, 0.5)
fr.add(tw)
vbox1.pack_start(fr, padding=4, fill=True, expand=False)
nb = gtk.Notebook()
#nb.set_group_id(group)
#nb.connect("create-window", self.detach_page, group)
nb.set_tab_pos(gtk.POS_BOTTOM)
nb.set_scrollable(True)
nb.set_show_tabs(True)
nb.set_show_border(False)
vbox1.pack_start(nb, padding=4, fill=True, expand=True)
vbox0 = gtk.VBox()
hbox = gtk.HBox(spacing=4)
vbox = gtk.VBox()
fr = gtk.Frame(" Image Server ")
fr.set_shadow_type(gtk.SHADOW_ETCHED_IN)
fr.set_label_align(0.5, 0.5)
fr.add(vbox)
captions = (('Server', 'xlabel'),
('@Server', 'combobox'),
('Use DSS channel', 'checkbutton'),
('Get Image', 'button'))
w, self.w = GtkHelp.build_info(captions)
self.w.nb = nb
self.w.get_image.connect('clicked', lambda w: self.getimage_cb())
self.w.use_dss_channel.set_active(self.use_dss_channel)
self.w.use_dss_channel.connect('toggled', self.use_dss_channel_cb)
vbox.pack_start(w, padding=4, fill=True, expand=False)
self.w.img_params = gtk.VBox()
vbox.pack_start(self.w.img_params, padding=4, fill=True, expand=False)
combobox = self.w.server
index = 0
self.image_server_options = self.fv.imgsrv.getServerNames(kind='image')
for name in self.image_server_options:
combobox.insert_text(index, name)
index += 1
index = 0
combobox.set_active(index)
combobox.sconnect('changed', self.setup_params_image)
if len(self.image_server_options) > 0:
self.setup_params_image(combobox, redo=False)
hbox.pack_start(fr, fill=True, expand=True)
vbox = gtk.VBox()
fr = gtk.Frame(" Catalog Server ")
fr.set_shadow_type(gtk.SHADOW_ETCHED_IN)
fr.set_label_align(0.5, 0.5)
fr.add(vbox)
captions = (('Server', 'xlabel'),
('@Server', 'combobox'),
('Limit stars to area', 'checkbutton'),
('Search', 'button'))
w, self.w2 = GtkHelp.build_info(captions)
self.w2.search.connect('clicked', lambda w: self.getcatalog_cb())
self.w2.limit_stars_to_area.set_active(self.limit_stars_to_area)
self.w2.limit_stars_to_area.connect('toggled', self.limit_area_cb)
vbox.pack_start(w, padding=4, fill=True, expand=False)
self.w2.cat_params = gtk.VBox()
vbox.pack_start(self.w2.cat_params, padding=4, fill=True, expand=False)
combobox = self.w2.server
index = 0
self.catalog_server_options = self.fv.imgsrv.getServerNames(kind='catalog')
for name in self.catalog_server_options:
combobox.insert_text(index, name)
index += 1
index = 0
combobox.set_active(index)
combobox.sconnect('changed', self.setup_params_catalog)
if len(self.catalog_server_options) > 0:
self.setup_params_catalog(combobox, redo=False)
hbox.pack_start(fr, fill=True, expand=True)
vbox0.pack_start(hbox, fill=True, expand=True)
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_CENTER)
btns.set_spacing(5)
btn = gtk.Button("Set parameters from entire image")
btn.connect('clicked', lambda w: self.setfromimage())
btns.add(btn)
vbox0.pack_start(btns, padding=4, fill=True, expand=False)
lbl = gtk.Label("Params")
self.w.params = vbox0
nb.append_page(vbox0, lbl)
vbox = gtk.VBox()
self.table = CatalogListing(self.logger, vbox)
hbox = gtk.HBox()
scale = gtk.HScrollbar()
adj = scale.get_adjustment()
adj.configure(0, 0, 0, 1, 10, self.plot_limit)
#scale.set_size_request(200, -1)
self.tooltips.set_tip(scale, "Choose subset of stars plotted")
#scale.set_update_policy(gtk.UPDATE_DELAYED)
scale.set_update_policy(gtk.UPDATE_CONTINUOUS)
self.w.plotgrp = scale
scale.connect('value-changed', self.plot_pct_cb)
hbox.pack_start(scale, padding=2, fill=True, expand=True)
sb = GtkHelp.SpinButton()
adj = sb.get_adjustment()
adj.configure(self.plot_limit, 10, self.plot_max, 10, 100, 100)
self.w.plotnum = sb
self.tooltips.set_tip(sb, "Adjust size of subset of stars plotted")
sb.connect('value-changed', self.plot_limit_cb)
hbox.pack_start(sb, padding=2, fill=False, expand=False)
vbox.pack_start(hbox, padding=0, fill=False, expand=False)
#vbox1.pack_start(vbox, padding=4, fill=True, expand=True)
lbl = gtk.Label("Listing")
self.w.listing = vbox
nb.append_page(vbox, lbl)
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_START)
btns.set_spacing(3)
btns.set_child_size(15, -1)
self.w.buttons = btns
btn = gtk.Button("Close")
btn.connect('clicked', lambda w: self.close())
btns.add(btn)
if future:
btn = gtk.Button('Ok')
btn.connect('clicked', lambda w: self.ok())
btns.add(btn)
btn = gtk.Button('Cancel')
btn.connect('clicked', lambda w: self.cancel())
btns.add(btn)
vbox1.pack_start(btns, padding=4, fill=True, expand=False)
vbox1.show_all()
container.pack_start(vbox1, padding=0, fill=True, expand=True)
def limit_area_cb(self, w):
self.limit_stars_to_area = w.get_active()
return True
def use_dss_channel_cb(self, w):
self.use_dss_channel = w.get_active()
return True
def plot_pct_cb(self, rng):
val = rng.get_value()
self.plot_start = int(val)
self.replot_stars()
|
KrisCheng/HackerPractice | Python/module/urllibDemo.py | Python | mit | 326 | 0 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request
# URL抓取
with request.urlopen('https://api.douban.com/v2/book/2129650') as f:
data = f.read()
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s : | %s' % (k, v))
print( | 'Data:', data.decode('utf-8'))
|
BRiDGEIris/cgs-apps | code/apps/variants/src/variants/serializers.py | Python | apache-2.0 | 32,788 | 0.00549 | from rest_framework import serializers
from variants.models import *
from beeswax.design import hql_query
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config
from hbase.api import HbaseApi
from converters import *
from models import *
import time
import os
import json
import threading
# The fields of the following serializers directly come from https://cloud.google.com/genomics/v1beta2/reference/
class VCFSerializer(serializers.Serializer):
pk = serializers.IntegerField(read_only=True)
filename = serializers.CharField(max_length=100)
patients = serializers.CharField(max_length=1000) # Ids of the different patients inside the vcf, separated by a comma
analyzed = serializers.BooleanField(default=False)
def post(self, request, filename, current_analysis, current_organization):
"""
Insert a new vcf file inside the database
"""
result = {'status': -1,'data': {}}
# We take the files in the current user directory
init_path = directory_current_user(request)
files = list_directory_content(request, init_path, ".vcf", True)
length = 0
for f in files:
new_name = f['path'].replace(init_path+"/","", 1)
if new_name == filename:
length = f['stats']['size']
break
if length == 0:
# File not found
result['status'] = 0
result['error'] = 'The vcf file given was not found in the cgs file system.'
return result
# We take the number of samples (and their name) in the vcf file
samples = sample_insert_vcfinfo(request, filename, length)
samples_quantity = len(samples)
if samples_quantity == 0:
error_sample = True
result['status'] = 0
result['error'] = 'No sample found in the given file'
return result
# Some checks first about the sample data
if request.method != 'POST':
result['status'] = 0
result['error'] = 'You have to send a POST request.'
return result
if not 'vcf_data' in request.POST:
result['status'] = 0
result['error'] = 'The vcf data were not given. You have to send a POST field called "vcf_data" with the information about the related file given in parameter.'
return result
raw_lines = request.POST['vcf_data'].split(";")
samples_quantity_received = len(raw_lines)
if samples_quantity_received == samples_quantity + 1 and not raw_lines[len(raw_lines)-1]:# We allow the final ';'
raw_lines.pop()
samples_quantity_received = samples_quantity
if samples_quantity != samples_quantity_received and False:# TODO: remove this part of code later
fprint(request.POST['vcf_data'])
result['status'] = 0
result['error'] = 'The number of samples sent do not correspond to the number of samples found in the vcf file ('+str(samples_quantity_received)+' vs '+str(samples_quantity)+').'
return result
questions, q, files = sample_insert_questions(request)
questions_quantity = len(q)
for raw_line in raw_lines:
if len(raw_line.split(",")) != questions_quantity and False:# TODO: remove this part of code later
result['status'] = 0
result['error'] = 'The number of information sent do not correspond to the number of questions asked for each sample ('+str(len(raw_line.split(",")))+' vs '+str(questions_quantity)+').'
return result
# Connexion to the db
try:
query_server = get_query_server_config(name='impala')
db = dbms.get(request.user, query_server=query_server)
dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
except Exception:
result['status'] = 0
result['error'] = 'Sorry, an error occured: Impossible to connect to the db.'
return result
# Now we analyze each sample information
try:
tsv_content = ''
for raw_line in raw_lines:
answers = raw_line.split(",")
# We check each answer for each question
current_sample = {}
for key, answer in enumerate(answers):
# We take the related field
field = q[key]
info = questions['sample_registration'][field]
# We check if the information is correct
if not type(info) is dict:
pass # Nothing to do here, it's normal. We could compare the sample id received from the ones found in the file maybe.
elif info['field'] == 'select':
if not answer in info['fields']:
result['status'] = 0
result['error'] = 'The value "'+str(answer)+'" given for the field "'+field+'" is invalid (Valid values: '+str(info['fields'])+').'
return result
else:
# TODO: make the different verification of the 'text' and 'date' format
pass
current_sample[field] = answer
fprint(current_sample)
if not 'sample_id' in current_sample:
current_sample['sample_id'] = ''
sample_id = str(current_sample['sample_id'])
if not 'patient_id' in current_sample:
current_sample['patient_id'] = ''
patient_id = str(current_sample['patient_id'])
if not 'sample_collection_date' in current_sample:
current_sample['sample_collection_date'] = ''
date_of_collection = str(current_sample['sample_collection_date'])
if not 'original_sample_id' in current_sample:
current_sample['original_sample_id'] = ''
original_sample_id = str(current_sample['original_sa | mple_id'])
if not 'collection_status' in current_sample:
current_samp | le['collection_status'] = ''
status = str(current_sample['collection_status'])
if not 'sample_type' in current_sample:
current_sample['sample_type'] = ''
sample_type = str(current_sample['sample_type'])
if not 'biological_contamination' in current_sample:
current_sample['biological_contamination'] = '0'
biological_contamination = str(current_sample['biological_contamination'])
if not 'sample_storage_condition' in current_sample:
current_sample['sample_storage_condition'] = ''
storage_condition = str(current_sample['sample_storage_condition'])
if not 'biobank_id' in current_sample:
current_sample['biobank_id'] = ''
biobank_id = str(current_sample['biobank_id'])
if not 'pn_id' in current_sample:
current_sample['pn_id'] = ''
pn_id = str(current_sample['pn_id'])
# We create the tsv content
tsv_content += sample_id + ','+ patient_id + ',' +date_of_collection+','+original_sample_id+','+status+','+sample_type+','+biological_contamination+','+storage_condition+','+biobank_id+','+pn_id+'\r\n'
tsv_path = '/user/cgs/cgs_'+request.user.username+'_vcf_import.tsv'
request.fs.create(tsv_path, overwrite=True, data=tsv_content)
# We insert the data
query = hql_query("load data inpath '/user/cgs/cgs_"+request.user.username+"_vcf_import.tsv' into table clinical_sample;")
handle = db.execute_and_wait(query, timeout_sec=30.0)
except:
pass
# We analyze the vcf, then insert the data inside hbase & impala. We don't wait for the import to finish to return the page
result['text'] = 'The import started |
ryfeus/lambda-packs | pytorch/source/numpy/core/memmap.py | Python | mit | 11,612 | 0.000775 | from __future__ import division, absolute_import, print_function
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import (
long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
)
from numpy.core.overrides import set_module
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Delete the memmap instance to close the memmap file.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'. |
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on | disk.
When you delete a memmap object, flush is called first to write
changes to disk before removing the object.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Deletion flushes memory changes to disk before removing the object:
>>> del fp
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError:
if mode not in valid_filemodes:
raise ValueError("mode must be one of %s" %
(valid_filemodes + list(mode_equivalents.keys())))
if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, 'read'):
f_ctx = contextlib_nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if byt |
anjianshi/flask-restful-extend | flask_restful_extend/reqparse_fixed_type.py | Python | mit | 425 | 0.011765 | # -*- coding: utf-8 -*-
fr | om datetime import datetime, date
import six
def fix_number(target_type):
return lambda value: None if isinstance(value, (str, six.text_type)) and len(value) == 0 else target_type(value)
fixed_datetime = lambda time_str: datetime.strptime(time_str, '%Y-%m-%d | %H:%M:%S')
fixed_date = lambda time_str: date.fromtimestamp(time_str)
fixed_int = fix_number(int)
fixed_float = fix_number(float)
|
erja-gp/openthread | tools/harness-automation/cases/reed_5_7_2.py | Python | bsd-3-clause | 1,872 | 0.001603 | #!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, P | ROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case im | port HarnessCase
class REED_5_7_2(HarnessCase):
role = HarnessCase.ROLE_REED
case = '5 7 2'
golden_devices_required = 16
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
gnuvet/gnuvet | gtable.py | Python | gpl-3.0 | 37,590 | 0.004363 | # -*- coding: utf-8 -*-
"""Strict unflexible table constructor: The hand-knitted table for GnuVet.
Provides html formatting in cells.
First set_headers if required, then append_row and set_alignment, finally
adjust rows2contents if appropriate.
Setting headers after filling table deletes all table entries.
Optionally set_colwidth before or after filling table.
Horizontal header optional, vertical header none.
HScrollbar default AlwaysOff (1), VScrollbar default AsNeeded (0).
"""
# Copyright (c) 2015 Dipl.Tzt. Enno Deimel <ennodotvetatgmxdotnet>
#
# This file is part of gnuvet, published under the GNU General Public License
# version 3 or later (GPLv3+ in short). See the file LICENSE for information.
# todo:
# add check for table.width() > frame.width() on column-resize -> add HScrollBar
# height check doesn't work yet
# add doubleclick on headsep for set_colwidth()? c.toolTip().hideText()?
# add moving mostright->mostleftnext for calendar and such?
# add deleting entry_cell? Works with appoint.py without that...
# test all possible problems
from PyQt4.QtCore import pyqtSignal, Qt, QEvent
from PyQt4.QtGui import QFrame, QLabel, QPixmap, QSc | rollArea
class Gcell(QLabel):
"""Table cell."""
clicked = pyqtSignal(object)
rightclicked = pyqtSignal(object) # is QPoint
selectable = True
entry = False
data = | None
children = 0
def __init__(self, parent=None, txt='', col=None, data=None):
super(Gcell, self).__init__(parent)
self.setAttribute(55) # Qt.WA_DeleteOnClose
# parent normally gtable.table, of that gtable, of that gtable.parent
# in case of additional cells (create_cell): parent of parent
try:
self.mother = parent.parent().parent()
except TypeError:
self.mother = parent.parent.parent().parent()
self.setMargin(2)
self.parent = parent
if col is None:
col = len(self.mother.lrows[-1])
if col in self.mother.alignments:
self.setAlignment(
self.mother.ck_alignment(self.mother.alignments[col]))
else:
self.setAlignment(Qt.AlignLeft)
self.setWordWrap(True)
self.col = col
if txt:
if type(txt) == QPixmap:
self.setPixmap(txt)
else:
self.setText(txt)
self.entry = True
if data:
self.data = data
self.tabw = 58
def elidetext(self, txt):
self.fulltext = txt.strip()
fm = self.fontMetrics()
text = txt.split('\t')
lbwidth = self.width()
if len(text) > 1:
widths = []
for i, e in enumerate(text):
if i == len(text)-1:
widths.append(fm.boundingRect(e.strip()).width())
break
if fm.boundingRect(e).width() < self.tabw:
widths.append(self.tabw)
else:
widths.append(
(fm.boundingRect(e.strip())
.width()//self.tabw+1)*self.tabw)
summa = 0
etxt = ''
for i, e in enumerate(text):
summa += widths[i]
if summa > lbwidth:
etxt += fm.elidedText(text[i], 1, lbwidth-sum(widths[:i]))
if etxt[-2] == '\t':
etxt = etxt[:-2] + etxt[-1]
elif etxt[-1] == '\t':
etxt = etxt[:-1] + u'\u2026'
break
else:
etxt += text[i] + '\t'
else:
etxt = fm.elidedText(txt, 1, lbwidth)
self.setText(etxt)
def mouseDoubleClickEvent(self, ev): # GCELL, ev is QMouseEvent
if not self.selectable: return
self.mother.doubleclicked.emit(self)
def mousePressEvent(self, ev): # ev is QMouseEvent
if not self.selectable and not self.mother.selnext: return
self.clicked.emit(self)
if ev.button() == Qt.RightButton:
self.rightclicked.emit(ev.globalPos()) # for contextmenu
def row(self):
col = self.mother.column(self.col)
if self in col:
return col.index(self)
if hasattr(self.parent, 'row'):
return self.parent.row()
def setdata(self, data=None):
self.data = data
def setText(self, txt=''):
super(Gcell, self).setText(txt)
if txt: # entry still in use?
self.entry = True
else:
self.entry = False
class Headsep(QFrame):
"""Head Separator for resizing columns via mouse."""
mousemov = pyqtSignal(int, int)
def __init__(self, parent, cell, idx):
super(Headsep, self).__init__(parent)
self.setAttribute(55) # Qt.WA_DeleteOnClose
self.setFrameShape(0)
self.setFrameShadow(0)
self.resize(4, cell.height())
self.move(cell.x()+cell.width()-2, 0)
self.setCursor(Qt.SplitHCursor)
self.idx = idx
def mouseMoveEvent(self, ev):
if ev.buttons() == Qt.LeftButton:
w = ev.globalX()-self.startx
self.mousemov.emit(self.idx, w)
self.startx = ev.globalX()
def mousePressEvent(self, ev):
if ev.buttons() == Qt.LeftButton:
self.startx = ev.globalX()
class Gtable(QScrollArea):
"""Main widget."""
doubleclicked = pyqtSignal(object)
rightclicked = pyqtSignal(object) # is QPoint
rowchanged = pyqtSignal(int)
selected = pyqtSignal(object)
debug = False
alter = False
jump = False # on select unselectable jump to next cell in keyed direction
selnext = False # on select unselectable select next lower cell
prevpos = 0
rowmode = True # True select row False select cell
selcell = None
selcol = 0
selrow = None
gcellss = """Gcell {{
background: {};
color: {};
border: 1px solid {};
border-radius: 3px;
}}
"""
alternate = ('lightyellow', 'black', 'lightgray') # what about text colour?
empty = ('white', 'white', 'white')
normal = ('white', 'black', 'lightgray')
## nofocus = ('lightgray', 'black', 'lightslategray') # nice look but slow
selection = ('darkblue', 'white', 'lightgray')
prevss = ''
def __init__(self, parent=None, scrollh=1, scrollv=0, resizecols=False):
# scroll: 0 as needed 1 never 2 always
super(Gtable, self).__init__(parent)
self.setHorizontalScrollBarPolicy(scrollh)
self.scrollh = scrollh
self.setVerticalScrollBarPolicy(scrollv)
self.scrollv = scrollv
self.colresize = resizecols # GTABLE
self.alignments = {}
self.headers = []
self.headseps= []
self.hheight = 0 # header height
self.hiddencols = {}
self.lrows = [] # list of rows holds lists of cells w/o header row
self.colwids = []
self.maxy = 0 # bottom pos of last inserted cell (row)
self.setFrameShape(1)
self.setLineWidth(1)
self.table = QFrame(self)
self.table.setStyleSheet(
"""QFrame {
background: lightgray; }""")
self.setWidget(self.table)
## self.vw = self.style().pixelMetric(self.style().PM_ScrollBarExtent)?
def adjust_scroll(self):
self.setHorizontalScrollBarPolicy(self.scrollh)
self.setVerticalScrollBarPolicy(self.scrollv)
def adjust_width(self, cell, col): # Gtable, currently unused
"""Adjust cell width (if not set by set_colwidth)."""
omaxheight = cell.maximumHeight()
cell.setMaximumHeight(cell.height())
cell.adjustSize()
cell.setMaximumHeight(omaxheight)
if col < len(self.colwids) - 1:
if self.colwids[col] < cell.width():
self.set_colwidth(col, cell.width())
elif col == len(self.colwids):
self.colwids.append(cell.width())
def align_data(self, col, align='l'):
self.alignments[col] = align
if not self.lrows or col >= len(self.lrows[0]):
return
start = 0 |
ecino/compassion-switzerland | mobile_app_switzerland/controllers/__init__.py | Python | agpl-3.0 | 389 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH | (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import re | gistration_controller
|
licongyu95/learning_python | core_python_programming/cap10/cap10.py | Python | unlicense | 487 | 0.01848 | #!/usr/bin/env python
#encoding=utf-8
import os
try:
f = file('blah','r')
except IOError,e:
print 'could not open file:',e
def safe_float(obj):
try:
return float(obj)
exce | pt ValueError:
pass
ccfile = N | one
log = file('log.txt','w+')
try:
ccfile = file('card.txt','r')
txns = ccfile.readlines()
ccfile.close()
except IOError:
log.write('no txns this month%s' % os.linesep)
finally:
log.close()
if ccfile:
ccfile.close()
|
MobileCloudNetworking/epcaas | bundle/wsgi/DNSaaSClient.py | Python | apache-2.0 | 13,958 | 0.002866 | # Copyright 2014 Copyright (c) 2013-2015, OneSource, Portugal.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = 'Claudio Marques / Bruno Sousa - OneSource'
__copyright__ = "Copyright (c) 2013-2015, Mobile Cloud Networking (MCN) project"
__credits__ = ["Claudio Marques - Bruno Sousa"]
__license__ = "Apache"
__version__ = "1.0"
__maintainer__ = "Claudio Marques - Bruno Sousa"
__email__ = "claudio@onesource.pt, bmsousa@onesource.pt"
__status__ = "Production"
import traceback
import requests
import json
import re
import time
class DNSaaSClientCore:
"""
Works as a client to the API DNSaaS. This class can be employed by other MCN services, or applications that require
services from DNSaaS.
"""
idDomain = None
idRecord = None
def __init__(self, ip_api, token):
self.version = 1
self.token = token
self.apiurl_dnsaas = "http://" + ip_api + ":8080"
def do_request(self, method, path, body, token):
"""
Method to perform requests to the DNSaaS API. Requests can include creation, delete and other operations.
This method needs to handle requests through a REST API.
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json; charset=UTF-8',
'X-auth-token': token
}
try:
"""
TODO: check error output on delete operations.
"""
r = requests.request(method, self.apiurl_dnsaas + path, data=body, headers=headers)
except:
traceback.print_exc()
return -1, "Problem with the request. Error:"
return r.status_code, json.loads(r.text)
class DNSaaSClientAction:
"""
Class representing the object to interact with DNSaaS
"""
__dnsaasclient = None
__location = None
__urldnsaas = None
__fwdaddresses = None
__token = None
__endpoint = None
__tenant = None
__maas_endpoint_address = None
__maas_endpoint = None
__dispose_maas = None
@classmethod
def set_location(cls, arg):
if arg != cls.__location:
cls.__location = arg
@classmethod
def get_location(cls):
return cls.__location
@classmethod
def set_tenant(cls, arg):
if arg != cls.__tenant:
cls.__tenant = arg
@classmethod
def get_tenant(cls):
return cls.__tenant
@classmethod
def get_maas_endpoint_address(cls):
return cls.__maas_endpoint_address
@classmethod
def set_maas_endpoint_address(cls, arg):
if arg != cls.__maas_endpoint_address:
cls.__maas_endpoint_address = arg
@classmethod
def get_maas_endpoint(cls):
return cls.__maas_endpoint
@classmethod
def set_maas_endpoint(cls, arg):
if arg != cls.__maas_endpoint:
cls.__maas_endpoint = arg
@classmethod
def get_dispose_maas(cls):
return cls.__dispose_maas
@classmethod
def set_dispose_maas(cls, arg):
if arg != cls.__dispose_maas:
cls.__dispose_maas = arg
def __init__(self, endpoint, tenant, token, maas_endpoint_address, maas_endpoint, dispose_maas):
self.__endpoint = endpoint
# self.__tenant = tenant
if DNSaaSClientAction.get_tenant() is None:
DNSaaSClientAction.set_tenant(tenant)
self.__token = token
if DNSaaSClientAction.get_maas_endpoint_address() is None:
DNSaaSClientAction.set_maas_endpoint_address(maas_endpoint_address)
if DNSaaSClientAction.get_dispose_maas() is None:
DNSaaSClientAction.set_dispose_maas(dispose_maas)
if DNSaaSClientAction.get_maas_endpoint() is None:
DNSaaSClientAction.set_maas_endpoint(maas_endpoint)
"""
Domain Methods
"""
def create_domain(self, domain_name, email, ttl, token):
"""
Method used to create a domain
:param domain_name:Domain name
:param email: Domain administrator e-mail address
:param ttl: Time to live
:param token: Token
:return: Status 1 for success, or a description of error
"""
msg_json = {'name': domain_name, 'ttl': ttl, 'email': email}
status, content = self.__dnsaasclient.do_request('POST', '/domains', json.dumps(msg_json), token)
return content
def get_domain(self, domain_name, token):
"""
Method used to get the information regarding a domain
:param domain_name: Domain
:param token: token
:return: The information of the domain
"""
msg_json = {'domain_name': domain_name}
status, content = self.__dnsaasclient.do_request('GET', '/domains', json.dumps(msg_json), token)
return content
def update_domain(self, domain_name, parameter_to_update, data, token):
"""
Method used to update a domain information
:param domain_name: Domain name
:param parameter_to_update: Parameter to update, ttl, email, description
:param data: The actual information to update
:param token: Token
:return: Status 1 for success, or a description of error
"""
msg_json = {'domain_name': domain_name, 'parameter_to_update': parameter_to_update, 'data': data}
status, content = self.__dnsaasclient.do_request('PUT', '/domains', json.dumps(msg_json), token)
return content
def delete_domain(self, domain_name, token):
"""
Method used to delete a Domain
:param domain_name: Domain name
:param token: Token
:return: Status 1 for success, or a description of error
"""
msg_json = {'domain_name': domain_name}
status, content = self.__dnsaasclient.do_request('DELETE', '/domains', json.dumps(msg_json), token)
return content
"""
Record Methods
"""
def create_record(self, domain_name, record_name, record_type, record_data, token, **kwargs):
"""
Method used to create a record
:param domain_name: Domain name
:param record_name: Record name
:param record_type: Record type
:param record | _data: record data
:param token: Token
:param kwargs: Priority for the record
:return: Status 1 for success, or a description of error
"""
if record_type in ['A', 'AAAA', 'TXT', 'MX', 'PTR', 'SRV', 'NS', 'CNAME', 'SPF', 'SSHFP', 'NAPTR']:
json_record = ''
if record_type == 'MX':
if 'priority' in kwargs:
priority = kwargs['priority']
else:
priority = 10
|
record_name = ''
json_record = {'domain_name': domain_name, 'record_name': record_name, 'record_type': record_type,
'data': record_data, 'priority': int(priority)}
elif record_type in ['SRV', 'NAPTR']:
if 'priority' in kwargs:
priority = kwargs['priority']
else:
priority = 10
json_record = {'domain_name': domain_name, 'record_name': record_name, 'record_type': record_type,
'data': record_data, 'priority': int(priority)}
elif record_type == 'NS':
j |
ktsaou/netdata | collectors/python.d.plugin/python_modules/pyyaml3/composer.py | Python | gpl-3.0 | 4,912 | 0.003664 | # SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
from .error import MarkedYAMLError
from .events import *
from .nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor, self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
| while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == | '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
|
eleme/polaris | polaris/server.py | Python | mit | 3,719 | 0 | """
polaris.server
~~~~~~~~~~~~~~
:copyright: (c) 2013 Eleme, http://polaris.eleme.io
:license: MIT
Polaris app server.
"""
import hashlib
import os.path
from flask import Flask as _Flask
from werkzeug.datastructures import ImmutableDict
from werkzeug.utils import import_string
__all__ = ['create_app']
class Flask(_Flask):
jinja_options = ImmutableDict(
trim_blocks=True,
lstrip_blocks=True,
extensions=[
'jinja2.ext.autoescape',
'jinja2.ext.with_',
]
)
def create_app(config=None):
app = Flask(__name__)
app.config.from_object("polaris.defaults")
# try to find config in $PWD, and "/etc/".
if not config:
for path in ("%s/" % os.getcwd(), "/etc/", ):
fname = os.path.join(path, "polaris_config.py")
if os.path.isfile(fname):
config = fname
break
if config:
if isinstance(config, str) and config.endswith(".py"):
app.config.from_pyfile(config)
else:
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
##########
# Dogpile Cache
from dogpile.cache import make_region
from polaris.utils import kw_generator
region = make_region(function_key_generator=kw_generator).configure(
**app.config["CACHE"])
app.config["cache_region"] = region
##########
# Register sources
app.config["ext"] = {}
for name, path in app.config["EXTENSIONS"].items():
app.config["ext"][na | me] = import_string(path)
app.config["source"] = {}
app.config["category"] = {}
for name, cfg in app.config["SOURCES"].items():
source = app.config["ext"][cfg["ext"]](**cfg["params"])
app.config["source"][name] = source
app.config["category"][name] = source.c | ategory
register_jinja(app)
return app
def register_jinja(app):
app._assets = app.config.get('ASSETS', {})
def static_url(filename):
if app.testing:
return filename
if filename in app._assets:
return app._assets[filename]
filepath = os.path.join(app.static_folder, filename)
if not os.path.exists(filepath):
app.logger.warn('%s not exists', filepath)
return filename
with open(os.path.join(app.static_folder, filename), 'r') as f:
content = f.read()
hsh = hashlib.md5(content.encode('utf-8')).hexdigest()
app.logger.info('Generate %s md5sum: %s' % (filename, hsh))
value = '/static/%s?v=%s' % (filename, hsh)
app._assets[filename] = value
return value
app.jinja_env.globals['static_url'] = static_url
def register_extensions(app):
from polaris.models import db
db.init_app(app)
db.app = app
from polaris.auth.oauth import oauth
oauth.init_app(app)
from polaris.auth.login import login
login.init_app(app)
from polaris.auth.form import csrf
csrf.init_app(app)
# Babel for internationalization
from flask import current_app, request
from flask_babel import Babel
babel = Babel(app)
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(
current_app.config["BABEL_SUPPORTED_LOCALE"])
def register_blueprints(app):
from polaris.views import (
api,
chart,
dashboard,
errors,
index,
profile,
)
app.register_blueprint(api)
app.register_blueprint(index)
app.register_blueprint(chart)
app.register_blueprint(dashboard)
app.register_blueprint(errors)
app.register_blueprint(profile)
|
otfried/cs101 | code/files/planets4.py | Python | gpl-3.0 | 213 | 0.018779 |
f = open("data/planetsc.txt", "r")
earth = 0
for line in f:
planet = line.strip().low | er()
if planet[0] == "#":
continue
earth += 1
if planet == "earth":
break
print "Earth is planet #%d" % eart | h
|
memorycoin/asm2mmc | asm2mmc/__init__.py | Python | mit | 49 | 0.020408 | # -*- coding: utf8 -*-
from de | code import decod | e |
kyubifire/softlayer-python | tests/CLI/modules/file_tests.py | Python | mit | 28,087 | 0.000392 | """
SoftLayer.tests.CLI.modules.file_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import exceptions
from SoftLayer import testing
import json
import mock
class FileTests(testing.TestCase):
def test_access_list(self):
result = self.run_command(['file', 'access-list', '1234'])
self.assert_no_fail(result)
def test_authorize_host_to_volume(self):
result = self.run_command(['file', 'access-authorize', '12345678',
'--hardware-id=100', '--virtual-id=10',
'--ip-address-id=192',
'--ip-address=192.3.2.1',
'--subnet-id=200'])
self.assert_no_fail(result)
def test_deauthorize_host_to_volume(self):
result = self.run_command(['file', 'access-revoke', '12345678',
'--hardware-id=100', '--virtual-id=10',
| '--ip-address-id=192',
'--ip-address=192.3.2.1',
'--subnet-id=200'])
self.assert_no_fail(result)
def test_volume_list(self):
result = self.run_command(['file', 'volume-list'])
self.assert_no_fail(result)
self.assertEqual([
{
'bytes_used': None,
'capacity_gb': 10,
'datacenter': 'Dallas',
'id': 1,
| 'ip_addr': '127.0.0.1',
'storage_type': 'ENDURANCE',
'username': 'user',
'active_transactions': None,
'mount_addr': '127.0.0.1:/TEST',
'rep_partner_count': None
}],
json.loads(result.output))
@mock.patch('SoftLayer.FileStorageManager.list_file_volumes')
def test_volume_count(self, list_mock):
list_mock.return_value = [
{'serviceResource': {'datacenter': {'name': 'dal09'}}},
{'serviceResource': {'datacenter': {'name': 'ams01'}}},
{'serviceResource': {'datacenter': {'name': 'ams01'}}}
]
result = self.run_command(['file', 'volume-count'])
self.assert_no_fail(result)
self.assertEqual(
{
'ams01': 2,
'dal09': 1
},
json.loads(result.output))
def test_snapshot_list(self):
result = self.run_command(['file', 'snapshot-list', '1234'])
self.assert_no_fail(result)
self.assertEqual([
{
'id': 470,
'name': 'unit_testing_note',
'created': '2016-07-06T07:41:19-05:00',
'size_bytes': '42',
}],
json.loads(result.output))
def test_volume_cancel(self):
result = self.run_command([
'--really', 'file', 'volume-cancel', '1234'])
self.assert_no_fail(result)
self.assertEqual('File volume with id 1234 has been marked'
' for cancellation\n', result.output)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelItem',
args=(False, True, None))
def test_volume_cancel_with_billing_item(self):
result = self.run_command([
'--really', 'file', 'volume-cancel', '1234'])
self.assert_no_fail(result)
self.assertEqual('File volume with id 1234 has been marked'
' for cancellation\n', result.output)
self.assert_called_with('SoftLayer_Network_Storage', 'getObject')
def test_volume_cancel_without_billing_item(self):
p_mock = self.set_mock('SoftLayer_Network_Storage', 'getObject')
p_mock.return_value = {
"accountId": 1234,
"capacityGb": 20,
"createDate": "2015-04-29T06:55:55-07:00",
"id": 11111,
"nasType": "NAS",
"username": "SL01SEV307608_1"
}
result = self.run_command([
'--really', 'file', 'volume-cancel', '1234'])
self.assertIsInstance(result.exception, exceptions.SoftLayerError)
def test_volume_detail(self):
result = self.run_command(['file', 'volume-detail', '1234'])
self.assert_no_fail(result)
self.assertEqual({
'Username': 'username',
'Used Space': '0B',
'Endurance Tier': 'READHEAVY_TIER',
'IOPs': 1000,
'Mount Address': '127.0.0.1:/TEST',
'Snapshot Capacity (GB)': '10',
'Snapshot Used (Bytes)': 1024,
'Capacity (GB)': '20GB',
'Target IP': '10.1.2.3',
'Data Center': 'dal05',
'Type': 'ENDURANCE',
'ID': 100,
'# of Active Transactions': '1',
'Ongoing Transaction': 'This is a buffer time in which the customer may cancel the server',
'Replicant Count': '1',
'Replication Status': 'Replicant Volume Provisioning '
'has completed.',
'Replicant Volumes': [[
{'Replicant ID': 'Volume Name', '1784': 'TEST_REP_1'},
{'Replicant ID': 'Target IP', '1784': '10.3.174.79'},
{'Replicant ID': 'Data Center', '1784': 'wdc01'},
{'Replicant ID': 'Schedule', '1784': 'REPLICATION_HOURLY'},
], [
{'Replicant ID': 'Volume Name', '1785': 'TEST_REP_2'},
{'Replicant ID': 'Target IP', '1785': '10.3.177.84'},
{'Replicant ID': 'Data Center', '1785': 'dal01'},
{'Replicant ID': 'Schedule', '1785': 'REPLICATION_DAILY'},
]],
'Original Volume Properties': [
{'Property': 'Original Volume Size',
'Value': '20'},
{'Property': 'Original Volume Name',
'Value': 'test-original-volume-name'},
{'Property': 'Original Snapshot Name',
'Value': 'test-original-snapshot-name'}
]
}, json.loads(result.output))
def test_volume_order_performance_iops_not_given(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--location=dal05'])
self.assertEqual(2, result.exit_code)
def test_volume_order_performance_snapshot_error(self):
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--iops=100', '--location=dal05',
'--snapshot-size=10',
'--service-offering=performance'])
self.assertEqual(2, result.exit_code)
@mock.patch('SoftLayer.FileStorageManager.order_file_volume')
def test_volume_order_performance(self, order_mock):
order_mock.return_value = {
'placedOrder': {
'id': 478,
'items': [
{'description': 'Performance Storage'},
{'description': 'File Storage'},
{'description': '0.25 IOPS per GB'},
{'description': '20 GB Storage Space'},
{'description': '10 GB Storage Space (Snapshot Space)'}]
}
}
result = self.run_command(['file', 'volume-order',
'--storage-type=performance', '--size=20',
'--iops=100', '--location=dal05',
'--snapshot-size=10'])
self.assert_no_fail(result)
self.assertEqual(result.output,
'Order #478 placed successfully!\n'
' > Performance Storage\n > File Storage\n'
' > 0.25 IOPS per GB\n > 20 GB Storage Space\n'
' > 10 GB Storage Space (Snapshot Space)\n')
def test_volume_order_endurance_tier_not_given(self):
result = self.r |
qilicun/python | python2/diveintopythonzh-cn-5.4b/zh_cn/makerealworddoc.py | Python | gpl-3.0 | 2,374 | 0.029486 | #coding=gbk
"""Convert HTML page to Word 97 document
This script is used during the build process of "Dive Into Python"
(http://diveintopython.org/) to create the downloadable Word 97 version
of the book (http://diveintopython.org/diveintopython.doc)
Looks for 2 arguments on the command line. The first argument is the input (HTML)
file; the second argument is the output (.doc) file.
Only runs on Windows. Requires Microsoft Word 2000.
Safe to run on the same file(s) more than once. The output file will be
silently overwritten if it already exists.
The script has been modified by xiaq (xiaqqaix@gmail.com) to fit Simplified Chinese version of Microsoft Word.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import sys, os
from win32com.client import gencache, constants
def makeRealWordDoc(infile, outfile):
word = gencache.EnsureDisp | atch("Word.Application | ")
try:
worddoc = word.Documents.Open(FileName=infile)
try:
worddoc.TablesOfContents.Add(Range=word.ActiveWindow.Selection.Range, \
RightAlignPageNumbers=1, \
UseHeadingStyles=1, \
UpperHeadingLevel=1, \
LowerHeadingLevel=2, \
IncludePageNumbers=1, \
AddedStyles='', \
UseHyperlinks=1, \
HidePageNumbersInWeb=1)
worddoc.TablesOfContents(1).TabLeader = constants.wdTabLeaderDots
worddoc.TablesOfContents.Format = constants.wdIndexIndent
word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageHeader
word.Selection.TypeText(Text="Dive Into Python\t\thttp://diveintopython.org/")
word.ActiveWindow.ActivePane.View.SeekView = constants.wdSeekCurrentPageFooter
word.NormalTemplate.AutoTextEntries("- Ò³Âë -").Insert(Where=word.ActiveWindow.Selection.Range)
word.ActiveWindow.View.Type = constants.wdPrintView
worddoc.TablesOfContents(1).Update()
worddoc.SaveAs(FileName=outfile, \
FileFormat=constants.wdFormatDocument)
finally:
worddoc.Close(0)
del worddoc
finally:
word.Quit()
del word
if __name__ == "__main__":
infile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1]))
outfile = os.path.normpath(os.path.join(os.getcwd(), sys.argv[2]))
makeRealWordDoc(infile, outfile)
|
raultron/ivs_sim | python/5points_gradientdescent.py | Python | mit | 5,075 | 0.040197 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 11:52:11 2017
@author: lracuna
"""
from vision.camera import *
from vision.plane import Plane
import vision.error_functions as ef
import gdescent.hpoints_gradient5 as gd5
from ippe import homo2d
## CREATE A SIMULATED CAMERA
cam = Camera()
cam.set_K(fx = 800,fy = 800,cx = 640,cy = 480)
cam.set_width_heigth(1280,960)
## DEFINE CAMERA POSE LOOKING STRAIGTH DOWN INTO THE PLANE MODEL
#cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(180.0))
#cam.set_t(0.0,-0.0,0.5, frame='world')
cam.set_R_axisAngle(1.0, 1.0, 0.0, np.deg2rad(130.0))
cam.set_t(0.0,-0.4,2.0, frame='world')
## Define a Display plane
pl = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.3,0.3), n = (2,2))
pl.random(n =5, r = 0.001, min_sep = 0.001)
## CREATE A SET OF IMAGE POINTS FOR VALIDATION OF THE HOMOGRAPHY ESTIMATION
validation_plane = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.5,0.5), n = (4,4))
validation_plane.uniform()
## we create the gradient for the point distribution
normalize= False
n = 0.0 | 00001 #condition number norm
gradient = gd5.create_gradient(metric='condition_number')
#gradient = gd5.create_gradient(metric='volker_metric')
#gradient = gd5.create_gradient(metric='pnorm_condition_number')
objectPoin | ts_des = pl.get_points()
alpha=0.2
imagePoints_des = np.array(cam.project(objectPoints_des, False))
objectPoints_list = list()
imagePoints_list = list()
transfer_error_list = list()
condition_number_list = list()
normalized_condition_number_list = list()
new_objectPoints = objectPoints_des
for i in range(1000):
objectPoints = np.copy(new_objectPoints)
gradient = gd5.evaluate_gradient(gradient,objectPoints, np.array(cam.P))
#gradient = gd5.normalize_gradient(gradient)
new_objectPoints = gd5.update_points(gradient, objectPoints)#, limit = 3)
new_imagePoints = np.array(cam.project(new_objectPoints, False))
objectPoints_list.append(new_objectPoints)
imagePoints_list.append(new_imagePoints)
#plt.cla()
plt.figure('Image Points')
plt.ion()
if i==0:
plt.cla()
cam.plot_plane(pl)
plt.plot(imagePoints_des[0],imagePoints_des[1],'x',color = 'black',)
plt.xlim(0,1280)
plt.ylim(0,960)
plt.gca().invert_yaxis()
plt.axes().set_aspect('equal', 'datalim')
plt.cla()
cam.plot_plane(pl)
plt.plot(new_imagePoints[0],new_imagePoints[1],'.',color = 'blue',)
plt.pause(0.01)
plt.figure('Object Points')
plt.ion()
if i==0:
plt.cla()
plt.plot(objectPoints_des[0],objectPoints_des[1],'x',color = 'black',)
plt.axes().set_aspect('equal', 'datalim')
plt.plot(new_objectPoints[0],new_objectPoints[1],'.',color = 'blue',)
plt.pause(0.01)
Xo = np.copy(new_objectPoints[[0,1,3],:]) #without the z coordinate (plane)
Xi = np.copy(new_imagePoints)
Hnoisy,A_t_ref,H_t = homo2d.homography2d(Xo,Xi)
Aideal_norm = ef.calculate_A_matrix(Xo,Xi)
x1,y1,x2,y2,x3,y3,x4,y4,x5,y5 = gd5.extract_objectpoints_vars(new_objectPoints)
mat_cond_autrograd = gd5.matrix_condition_number_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,np.array(cam.P))
#volkerMetric = ef.volker_metric(Aideal)
#mat_cond = ef.get_matrix_pnorm_condition_number(Aideal)
#mat_cond = get_matrix_conditioning_number(Aideal)
#condition_number_list.append()
##HOMOGRAPHY ERRORS
## TRUE VALUE OF HOMOGRAPHY OBTAINED FROM CAMERA PARAMETERS
Hcam = cam.homography_from_Rt()
homography_iters = 100
##We add noise to the image points and calculate the noisy homography
transfer_error_sum = 0
for j in range(homography_iters):
new_imagePoints_noisy = cam.addnoise_imagePoints(new_imagePoints, mean = 0, sd = 2)
#Noisy homography calculation
Xo = new_objectPoints[[0,1,3],:]
Xi = new_imagePoints_noisy
Hnoisy,A_t_ref,H_t = homo2d.homography2d(Xo,Xi)
Hnoisy = Hnoisy/Hnoisy[2,2]
## ERRORS FOR THE NOISY HOMOGRAPHY
## VALIDATION OBJECT POINTS
validation_objectPoints =validation_plane.get_points()
validation_imagePoints = np.array(cam.project(validation_objectPoints, False))
Xo = np.copy(validation_objectPoints)
Xo = np.delete(Xo, 2, axis=0)
Xi = np.copy(validation_imagePoints)
transfer_error_sum += ef.validation_points_error(Xi, Xo, Hnoisy)
transfer_error_list.append(transfer_error_sum/homography_iters)
plt.figure("Average Transfer error")
plt.cla()
plt.ion()
plt.plot(transfer_error_list)
plt.pause(0.01)
print "Iteration: ", i
print "Mat cond Autograd: ", mat_cond_autrograd
#print "Mat cond:", mat_cond
#print "Volker Metric:", volkerMetric
print "dx1,dy1 :", gradient.dx1_eval,gradient.dy1_eval
print "dx2,dy2 :", gradient.dx2_eval,gradient.dy2_eval
print "dx3,dy3 :", gradient.dx3_eval,gradient.dy3_eval
print "dx4,dy4 :", gradient.dx4_eval,gradient.dy4_eval
print "dx5,dy5 :", gradient.dx5_eval,gradient.dy5_eval
print "------------------------------------------------------"
plt.figure('Image Points')
plt.plot(new_imagePoints[0],new_imagePoints[1],'.',color = 'red',)
|
AsgerPetersen/QGIS | python/plugins/processing/algs/qgis/SumLines.py | Python | gpl-2.0 | 5,242 | 0.001908 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SumLines.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsFeature, QgsGeometry, QgsFeatureRequest, QgsDistanceArea
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class SumLines(GeoAlgorithm):
LINES = 'LINES'
POLYGONS = 'POLYGONS'
LEN_FIELD = 'LEN_FIELD'
COUNT_FIELD = 'COUNT_FIELD'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'sum_lines.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Sum line lengths')
self.group, self.i18n_group = self.trAlgorithm('Vector analysis tools')
self.addParameter(ParameterVector(self.LINES,
self.tr('Lines'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterVector(self.POLYGONS,
self.tr('Polygons'), [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterString(self.LEN_FIELD,
self.tr('Lines length field name', 'LENGTH')))
self.addParameter(ParameterString(self.COUNT_FIELD,
self.tr('Lines count field name', 'COUN | T')))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Line length')))
def processAlgorithm(self, progress):
lineLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.LINES))
polyLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.POLYGONS))
lengthFieldName = self.getParameterValue(self.LEN_FIELD)
countFieldName = self.getParameterValue(self.COUNT_FIELD)
polyProvider = pol | yLayer.dataProvider()
(idxLength, fieldList) = vector.findOrCreateField(polyLayer,
polyLayer.pendingFields(), lengthFieldName)
(idxCount, fieldList) = vector.findOrCreateField(polyLayer, fieldList,
countFieldName)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fieldList.toList(), polyProvider.geometryType(), polyProvider.crs())
spatialIndex = vector.spatialindex(lineLayer)
ftLine = QgsFeature()
ftPoly = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
distArea = QgsDistanceArea()
features = vector.features(polyLayer)
total = 100.0 / len(features)
hasIntersections = False
for current, ftPoly in enumerate(features):
inGeom = QgsGeometry(ftPoly.geometry())
attrs = ftPoly.attributes()
count = 0
length = 0
hasIntersections = False
lines = spatialIndex.intersects(inGeom.boundingBox())
if len(lines) > 0:
hasIntersections = True
if hasIntersections:
for i in lines:
request = QgsFeatureRequest().setFilterFid(i)
ftLine = lineLayer.getFeatures(request).next()
tmpGeom = QgsGeometry(ftLine.geometry())
if inGeom.intersects(tmpGeom):
outGeom = inGeom.intersection(tmpGeom)
length += distArea.measure(outGeom)
count += 1
outFeat.setGeometry(inGeom)
if idxLength == len(attrs):
attrs.append(length)
else:
attrs[idxLength] = length
if idxCount == len(attrs):
attrs.append(count)
else:
attrs[idxCount] = count
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
progress.setPercentage(int(current * total))
del writer
|
wangybgit/Chameleon | hostapd-OpenWrt/tests/hwsim/test_rfkill.py | Python | apache-2.0 | 6,236 | 0.003688 | # rfkill tests
# Copyright (c) 2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import hostapd
from hostapd import HostapdGlobal
import hwsim_utils
from wpasupplicant import WpaSupplicant
from rfkill import RFKill
from utils import HwsimSkip
def get_rfkill(dev):
phy = dev.get_driver_status_field("phyname")
try:
for r, s, h in RFKill.list():
if r.name == phy:
return r
except Exception, e:
raise HwsimSkip("No rfkill available: " + str(e))
raise HwsimSkip("No rfkill match found for the interface")
def test_rfkill_open(dev, apdev):
"""rfkill block/unblock during open mode connection"""
rfk = get_rfkill(dev[0])
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
try:
logger.info("rfkill block")
rfk.block()
dev[0].wait_disconnected(timeout=10,
error="Missing disconnection event on rfkill block")
if "FAIL" not in dev[0].request("REASSOCIATE"):
raise Exception("REASSOCIATE accepted while disabled")
if "FAIL" not in dev[0].request("REATTACH"):
raise Exception("REATTACH accepted while disabled")
if "FAIL" not in dev[0].request("RECONNECT"):
raise Exception("RECONNECT accepted while disabled")
if "FAIL" not in dev[0].request("FETCH_OSU"):
raise Exception("FETCH_OSU accepted while disabled")
logger.info("rfkill unblock")
rfk.unblock()
dev[0].wait_connected(timeout=10,
error="Missing connection event on rfkill unblock")
hwsim_utils.test_connectivity(dev[0], hapd)
finally:
rfk.unblock()
def test_rfkill_wpa2_psk(dev, apdev):
"""rfkill block/unblock during WPA2-PSK connection"""
rfk = get_rfkill(dev[0])
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
try:
logger.info("rfkill block")
rfk.block()
dev[0].wait_disconnected(timeout=10,
error="Missing disconnection event on rfkill block")
logger.info("rfkill unblock")
rfk.unblock()
dev[0].wait_connected(timeout=10,
error="Missing connection event on rfkill unblock")
hwsim_utils.test_connectivity(dev[0], hapd)
| finally:
rfk.unblock()
def test_rfkill_autogo(dev, apdev):
"""rfkill block/unblock for autonomous P2P GO"""
rfk0 = get_rfkill(dev[0])
rfk1 = get_rfkill(dev[1])
dev[0].p2p_start_go()
dev[1].request("SET p2p_no_group_iface 0")
dev[1].p2p_start_go()
try:
logger.info("rfkill block 0")
rfk0.block()
ev = dev[0].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
ra | ise Exception("Group removal not reported")
if "reason=UNAVAILABLE" not in ev:
raise Exception("Unexpected group removal reason: " + ev)
if "FAIL" not in dev[0].request("P2P_LISTEN 1"):
raise Exception("P2P_LISTEN accepted unexpectedly")
if "FAIL" not in dev[0].request("P2P_LISTEN"):
raise Exception("P2P_LISTEN accepted unexpectedly")
logger.info("rfkill block 1")
rfk1.block()
ev = dev[1].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("Group removal not reported")
if "reason=UNAVAILABLE" not in ev:
raise Exception("Unexpected group removal reason: " + ev)
logger.info("rfkill unblock 0")
rfk0.unblock()
logger.info("rfkill unblock 1")
rfk1.unblock()
time.sleep(1)
finally:
rfk0.unblock()
rfk1.unblock()
def test_rfkill_hostapd(dev, apdev):
"""rfkill block/unblock during and prior to hostapd operations"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
rfk = get_rfkill(hapd)
try:
rfk.block()
ev = hapd.wait_event(["INTERFACE-DISABLED"], timeout=5)
if ev is None:
raise Exception("INTERFACE-DISABLED event not seen")
rfk.unblock()
ev = hapd.wait_event(["INTERFACE-ENABLED"], timeout=5)
if ev is None:
raise Exception("INTERFACE-ENABLED event not seen")
# hostapd does not current re-enable beaconing automatically
hapd.disable()
hapd.enable()
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
rfk.block()
ev = hapd.wait_event(["INTERFACE-DISABLED"], timeout=5)
if ev is None:
raise Exception("INTERFACE-DISABLED event not seen")
dev[0].wait_disconnected(timeout=10)
dev[0].request("DISCONNECT")
hapd.disable()
hglobal = HostapdGlobal()
hglobal.flush()
hglobal.remove(apdev[0]['ifname'])
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open2" },
no_enable=True)
if "FAIL" not in hapd.request("ENABLE"):
raise Exception("ENABLE succeeded unexpectedly (rfkill)")
finally:
rfk.unblock()
def test_rfkill_wpas(dev, apdev):
"""rfkill block prior to wpa_supplicant start"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
rfk = get_rfkill(wpas)
wpas.interface_remove("wlan5")
try:
rfk.block()
wpas.interface_add("wlan5")
time.sleep(0.5)
state = wpas.get_status_field("wpa_state")
if state != "INTERFACE_DISABLED":
raise Exception("Unexpected state with rfkill blocked: " + state)
rfk.unblock()
time.sleep(0.5)
state = wpas.get_status_field("wpa_state")
if state == "INTERFACE_DISABLED":
raise Exception("Unexpected state with rfkill unblocked: " + state)
finally:
rfk.unblock()
|
tic-ull/defensatfc-proto | tfc_webapps/packages/suds-timestamp/suds/xsd/sxbasic.py | Python | agpl-3.0 | 31,569 | 0.00529 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbasic} module provides classes that represent
I{basic} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.xsd.sxbase import *
from suds.xsd.query import *
from suds.sax import splitPrefix, Namespace
from suds.sax.parser import Parser
from suds.transport import TransportError
from urlparse import urljoin
log = getLogger(__name__)
class Factory:
"""
@cvar tags: A factory to create object objects based on tag.
@type tags: {tag:fn,}
"""
tags =\
{
'import' : lambda x,y: Import(x,y),
'include' : lambda x,y: Include(x,y),
'complexType' : lambda x,y: Complex(x,y),
'group' : lambda x,y: Group(x,y),
'attributeGroup' : lambda x,y: AttributeGroup(x,y),
'simpleType' : lambda x,y: Simple(x,y),
'element' : lambda x,y: Element(x,y),
'attribute' : lambda x,y: Attribute(x,y),
'sequence' : lambda x,y: Sequence(x,y),
'all' : lambda x,y: All(x,y),
'choice' : lambda x,y: Choice(x,y),
'complexContent' : lambda x,y: ComplexContent(x,y),
'simpleContent' : lambda x,y: SimpleContent(x,y),
'restriction' : lambda x,y: Restriction(x,y),
'enumeration' : lambda x,y: Enumeration(x,y),
'extension' : lambda x,y: Extension(x,y),
'any' : lambda x,y: Any(x,y),
}
@classmethod
def create(cls, root, schema):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param schema: A schema object.
@type schema: L{schema.Schema}
@return: The created object.
@rtype: L{SchemaObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(schema, root)
else:
return None
@classmethod
def build(cls, root, schema, filter=('*',)):
"""
Build an xsobject representation.
@param root: An schema XML root.
@type root: L{sax.element.Element}
@param filter: A tag filter.
@type filter: [str,...]
@return: A schema object graph.
@rtype: L{sxbase.SchemaObject}
"""
children = []
for node in root.getChildren(ns=Namespace.xsdns):
if '*' in filter or node.name in filter:
child = cls.create(node, schema)
if child is None:
continue
children.append(child)
c = cls.build(node, schema, child.childtags())
child.rawchildren = c
return children
@classmethod
def collate(cls, children):
imports = []
elements = {}
attributes = {}
types = {}
groups = {}
agrps = {}
for c in children:
if isinstance(c, (Import, Include)):
imports.append(c)
continue
if isinstance(c, Attribute):
attributes[c.qname] = c
continue
if isinstance(c, Element):
elements[c.qname] = c
continue
if isinstance(c, Group):
groups[c.qname] = c
continue
if isinstance(c, AttributeGroup):
agrps[c.qname] = c
continue
types[c.qname] = c
for i in imports:
children.remove(i)
return (children, imports, attributes, elements, types, groups, agrps)
class TypedContent(Content):
def resolve(self, nobuiltin=False):
"""
Resolve and return the nodes true self.
@param nobuiltin: Flag indicates that resolution must
not continue to include xsd builtins.
@return: The resolved (true) type.
@rtype: L{SchemaObject}
"""
if self.type is None:
return self
cached = self.cache.get(nobuiltin)
if cached is not None:
return cached
result = self
defns = self.root.defaultNamespace()
qref = qualify(self.type, self.root, defns)
que | ry = TypeQuery(qref)
q | uery.history = [self]
log.debug('%s, resolving: %s\n using:%s', self.id, qref, query)
resolved = query.execute(self.schema)
if resolved is None:
log.debug(self.schema)
raise TypeNotFound(qref)
if resolved.builtin():
if nobuiltin:
result = self
else:
result = resolved
else:
result = resolved.resolve(nobuiltin)
return result
class Complex(SchemaObject):
"""
Represents an (xsd) schema <xs:complexType/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
"""
Get a list of valid child tag names.
@return: A list of child tag names.
@rtype: [str,...]
"""
return (
'attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'complexContent',
'simpleContent',
'any',
'group')
def description(self):
"""
Get the names used for str() and repr() description.
@return: A dictionary of relavent attributes.
@rtype: [str,...]
"""
return ('name',)
def extension(self):
"""
Get whether the object contains an extension/restriction
@return: True if a restriction, else False.
@rtype: boolean
"""
for c in self.rawchildren:
if c.extension():
return True
return False
class Group(SchemaObject):
"""
Represents an (xsd) schema <xs:group/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def __init__(self, schema, root):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
@param root: The xml root node.
@type root: L{sax.element.Element}
"""
SchemaObject.__init__(self, schema, root)
self.min = root.get('minOccurs', default='1')
self.max = root.get('maxOccurs', default='1')
def childtags(self):
"""
Get a list of valid child tag names.
@return: A list of child tag names.
@rtype: [str,...]
"""
return ('sequence', 'all', 'choice')
def unbounded(self):
"""
Get whether this node is unbounded I{(a collection)}.
@return: True if unbounded, else False.
@rtype: boolean
"""
if self.max.isdigit():
return (int(self.max) > 1)
else:
return ( self.max == 'unbounded' )
def optional(self):
"""
Get whether this type is optional.
@return: True if optional, else False
@rtype: boolean
"""
return ( self.min == '0' )
def dependencies(self):
"""
Get a list of dependancies for dereferencing.
@return: A merge dependancy index and a list of dependancies.
@rtype: (int, [L{SchemaObject},...])
"""
dep |
midnightradio/gensim | gensim/corpora/indexedcorpus.py | Python | gpl-3.0 | 6,432 | 0.002177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Base Indexed Corpus class."""
import logging
import numpy
from gensim import interfaces, utils
logger = logging.getLogger(__name__)
class IndexedCorpus(interfaces.CorpusABC):
"""Indexed corpus is a mechanism for random-accessing corpora.
While the standard corpus interface in gensim allows iterating over corpus,
we'll show it with :class:`~gensim.corpora.mmcorpus.MmCorpus`.
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('testcorpus.mm'))
>>> for doc in corpus:
... pass
:class:`~gensim.corpora.indexedcorpus.IndexedCorpus` allows accessing the documents with index
in :math:`{O}(1)` look-up time.
.. sourcecode:: pycon
>>> document_index = 3
>>> doc = corpus[document_index]
Notes
-----
This functionality is achieved by storing an extra file (by default named the same as the `fname.index`)
that stores the byte offset of the beginning of each document.
"""
def __init__(self, fname, index_fname=None):
"""
Parameters
----------
fname : str
Path to corpus.
index_fname : str, optional
Path to index, if not provided - used `fname.index`.
"""
try:
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
self.index = utils.unpickle(index_fname)
# change self.index into a numpy.ndarray to support fancy indexing
self.index = numpy.asarray(self.index)
logger.info("loaded corpus index from %s", index_fname)
except Exception:
self.index = None
self.length = None
@classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None,
progress_cnt=None, labels=None, metadata=False):
"""Serialize corpus with offset metadata, allows to use direct indexes after loading.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
index_fname : str, optional
Where to save resulting index, if None - store index to `fname`.index.
progress_cnt : int, optional
Number of documents after which progress info is printed.
labels : bool, optional
If True - ignore first column (class labels).
metadata : bool, optional
If True - ensure that serialize will write out article titles to a pickle file.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import get_tmpfile
>>>
>>> corpus = [[(1, 0.3), (2, 0.1)], [(1, 0.1)], [(2, 0.3)]]
>>> output_fname = get_tmpfile("test.mm")
>>>
>>> MmCorpus.serialize(output_fname, corpus)
>>> mm = MmCorpus(output_fname) # `mm` document stream now has random access
>>> print(mm[1]) # retrieve document no. 42, etc.
[(1, 0.1)]
"""
if getattr(corpus, 'fname', None) == fname:
raise ValueError("identical input vs. output corpus filename, refusing to serialize: %s" % fname)
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
kwargs = {'metadata': metadata}
if progress_cnt is not None:
kwargs['progress_cnt'] = progress_cnt
if labels is not None:
kwargs['labels'] = labels
offsets = serializer.save_corpus(fname, corpus, id2word, **kwargs)
if offsets is None:
raise NotImplementedError(
"Called serialize on class %s which d | oesn't support indexing!" % serializer.__name__
)
# store offsets persistently, using pickle
# we shouldn't have to worry about self.index being a numpy.ndarray as the serializer will return
# the offsets that are actually stored on disk - we're not storing self.index in any case, the
# load just needs to turn whatever is loaded from disk back into a ndarray - this should also ensure
# backwards compatibility
logger.info("saving %s index to % | s", serializer.__name__, index_fname)
utils.pickle(offsets, index_fname)
def __len__(self):
"""Get the index length.
Notes
-----
If the corpus is not indexed, also count corpus length and cache this value.
Returns
-------
int
Length of index.
"""
if self.index is not None:
return len(self.index)
if self.length is None:
logger.info("caching corpus length")
self.length = sum(1 for _ in self)
return self.length
def __getitem__(self, docno):
"""Get document by `docno` index.
Parameters
----------
docno : {int, iterable of int}
Document number or iterable of numbers (like a list of str).
Returns
-------
list of (int, float)
If `docno` is int - return document in BoW format.
:class:`~gensim.utils.SlicedCorpus`
If `docno` is iterable of int - return several documents in BoW format
wrapped to :class:`~gensim.utils.SlicedCorpus`.
Raises
------
RuntimeError
If index isn't exist.
"""
if self.index is None:
raise RuntimeError("Cannot call corpus[docid] without an index")
if isinstance(docno, (slice, list, numpy.ndarray)):
return utils.SlicedCorpus(self, docno)
elif isinstance(docno, (int, numpy.integer,)):
return self.docbyoffset(self.index[docno])
# TODO: no `docbyoffset` method, should be defined in this class
else:
raise ValueError('Unrecognised value for docno, use either a single integer, a slice or a numpy.ndarray')
|
spamalot/quichem | quichem/compilers/display/__init__.py | Python | lgpl-3.0 | 2,168 | 0.000461 | # This file is part of quichem.
#
# quichem is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# quichem is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with quichem. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from quichem.compilers.compiler import Compiler, tokened_strings
from quichem.compilers.display import fragments
class DisplayCompiler(Compiler):
"""Generic compiler for rendering to displayable text formats.
Attributes
----------
fragments : dict
Maps between token names and display fragments.
token_fragments : dict
Maps between token class objects and display fragments. The
display fragment objects are the same objects as those stored
in ``fragments``.
"""
def __init__(self):
Compiler.__init__(self)
self.fragments = {
'separator': fragments.Separator(),
'element': fragments.Element(),
| 'coefficient': fragments.Coefficient(),
'charge': fragments.Charge(),
'state': fragments.State(),
'counter': fragme | nts.Counter(),
'open group': fragments.OpenCloseGroup(),
'close group': fragments.OpenCloseGroup()}
self.token_fragments = {tokened_strings[string]: fragment for
string, fragment in self.fragments.items()}
def compile(self, ast):
"""Compile a `quichem` AST into a string of the compiled
tokens.
"""
return ''.join(Compiler.compile(self, ast))
def handle(self, token):
return self.token_fragments[next(x for x in self.token_fragments if isinstance(token, x))].render(token)
|
lingui/js-lingui | docs/_ext/linguidocs.py | Python | mit | 2,809 | 0.000712 | """
Lingui docs extensions
Inspired by Django Docs
https://github.com/django/django/blob/main/docs/_ext/djangodocs.py
"""
from docutils import nodes
from sphinx import addnodes
from sphinx.domains.std import Cmdoption
from sphinx.locale import _
from sphinx.util.docfields import TypedField
class jsxmacro(nodes.Inline, nodes.TextElement):
pass
def visit_react_macro_html(self, node):
self.body.append('<')
def depart_react_macro_html(self, node):
self.body.append('>')
class react_component(nodes.Inline, nodes.TextElement):
pass
def visit_react_component_html(self, node):
self.body.append('<')
def depart_react_component_html(self, node):
self.body.append('>')
class jsmacro(nodes.Inline, nodes.TextElement):
pass
def visit_jsmacro_html(self, node):
pass
def depart_jsmacro_html(self, node):
pass
def parse_lingui_cli_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "lingui %s" % sig
signode += addnodes.desc_name(title, title)
return command
def setup(app):
app.add_object_type(
directivename='jsxmacro',
rolename='jsxmacro',
indextemplate="pair: %s; jsxmacro",
ref_nodeclass=jsxmacro,
objname='React macro',
doc_field_types=[
TypedField('props', label=_('Props'),
names=('prop',),
typerolename='jsxmacro',
typenames=('proptype', 'type')),
]
)
app.add_node(jsxmacro,
html=(visit_react_macro_html, depart_react_macro_html))
app.add_object_type(
directivename='comp | onent',
rolename='component',
indextemplate="pair: %s; component",
r | ef_nodeclass=react_component,
objname='Component',
doc_field_types=[
TypedField('props', label=_('Props'),
names=('prop',),
typerolename='component',
typenames=('proptype', 'type')),
]
)
app.add_node(react_component,
html=(visit_react_component_html, depart_react_component_html))
app.add_object_type(
directivename='jsmacro',
rolename='jsmacro',
indextemplate="pair: %s; jsmacro",
ref_nodeclass=jsmacro,
objname='JS Macro'
)
app.add_node(jsmacro,
html=(visit_jsmacro_html, depart_jsmacro_html))
app.add_crossref_type('config', 'conf')
app.add_crossref_type('icu', 'icu')
app.add_object_type(
directivename="lingui-cli",
rolename="cli",
indextemplate="pair: %s; lingui-cli command",
parse_node=parse_lingui_cli_node,
)
app.add_directive('lingui-cli-option', Cmdoption)
|
uwgraphics/Ubiqu-Ity | Ity/celery_tasks.py | Python | bsd-2-clause | 12,033 | 0.001828 | # coding=utf-8
from __future__ import absolute_import
import os
import sys
sys.path.append(os.path.join(
os.path.dirname(os.path.basename(__file__)),
".."
))
sys.path.append(os.path.dirname(__file__))
from collections import OrderedDict
import codecs
import json
import hashlib
# Ity Imports
import Ity
from Ity.Utilities import Corpus, CorpusText
from Ity.TaskSupport import *
# Celery
from celery import Celery
from celery.utils.log import get_task_logger
celery_app = Celery(__name__)
celery_app.config_from_object("celery_config")
logger = get_task_logger(__name__)
# Sentry / Raven
if "SENTRY_DSN" in celery_app.conf:
from raven import Client
from raven.contrib.celery import register_signal
client = Client(
dsn=celery_app.conf["SENTRY_DSN"]
)
register_signal(client)
@celery_app.task
def upload_corpus(data):
pass
@celery_app.task
def process_corpus(
tokenizer="RegexTokenizer",
taggers=("DocuscopeTagger",),
formatters=("HTMLFormatter",),
corpus_formatters=("CSVFormatter",),
**corpus_kwargs
):
"""
Processes an entire corpus of texts, given a path to them, using a certain
tokenizer, taggers, formatters, and corpus formatters.
Additional arguments include "path", "output_path" and more---
please refer to init method of the Ity.Utilities.Corpus class.
By the way, while the `formatters` operate on single texts with their
format() method, `corpus_formatters` operate on the entire corpus with
their batch_format() method. That's the distinction.
The `tokenizer` argument, as well as **any tuple item** in the `taggers`,
`formatters`, or `corpus_formatters` arguments may be one of the following:
* A **str** equal to the name of an appropriate Ity class, i.e.
`tokenizer="RegexTokenizer"`, `taggers=("DocuscopeTagger", "CSVTagger")`
* An appropriate **class** that has been imported, i.e.
`tokenizer=RegexTokenizer`, `taggers=(DocuscopeTagger, CSVTagger)`
* An **instance** of an appropriate Ity class, i.e.
`tokenizer=my_tokenizer`, `taggers=(first_tagger, second_tagger)`
You may process texts in a corpus with multiple taggers and formatters,
but may only use one tokenizer; the rest of the modules up the chain have
to agree on *something*, right?
:param tokenizer: A str, an Ity Tokenizer class,
or an Ity Tokenizer instance.
:param taggers: A tuple of strs, Ity Tagger classes,
or Ity Taggers instances.
:param formatters: A tuple of strs, Ity Formatter classes,
or Ity Formatter instances.
:param corpus_formatters: A tuple of strs, Ity Formatter classes,
or Ity Formatter instances.
:param corpus_kwargs: Keyword arguments to be passed | to the
Ity.Utilities.Corpus init() method:
`pat | h`, `output_path`, etc.
:return:
"""
corpus = Corpus(**corpus_kwargs)
# Take the tokenizer, taggers, formatters, and batch_formatters arguments
# and initialize whatever modules we need.
tokenizer = init_tokenizer(tokenizer)
taggers = [
init_tagger(tagger)
for tagger in taggers
]
formatters = [
init_formatter(formatter)
for formatter in formatters
]
corpus_formatters = [
init_formatter(corpus_formatter)
for corpus_formatter in corpus_formatters
]
# Process each text in the corpus.
results = OrderedDict()
for name, path in corpus.texts.items():
results[name] = process_text(
path=path,
tokenizer=tokenizer,
taggers=taggers,
formatters=formatters,
corpus_instance=corpus
)
# Use some of the results to generate output with the corpus_formatters.
# for corpus_formatter in corpus_formatters:
corpus_results = None
return corpus, results, corpus_results
@celery_app.task
def process_text(
path,
name=None,
tokenizer="RegexTokenizer",
taggers=("DocuscopeTagger",),
formatters=("StaticHTMLFormatter",),
corpus_instance=None,
save=("format_data",),
save_to_disk=True
):
"""
Given a path to a text file, process a text using Ity.
"""
if save is None or (
"text_str" not in save and
"tokens" not in save and
"rules" not in save and
"tags" not in save and
"formats" not in save
):
raise ValueError("We're not supposed to save any data? Why are we even generating it, then?")
# Create a CorpusText instance for this text file.
text_instance = CorpusText(path, name=name, corpus=corpus_instance)
# Prep the Ity modules for processing.
# This is going to look a little weird: "didn't you initialize the modules
# in process_corpus()?"
# Yes, we did. the init_tokenizer(), init_tagger(), and init_formatter()
# functions all check to see if the input is already an instance. If they
# get a str or a class instead, they'll do the right thing!
tokenizer = init_tokenizer(tokenizer)
taggers = [
init_tagger(tagger)
for tagger in taggers
]
formatters = [
init_formatter(formatter)
for formatter in formatters
]
# Tokenize the text content.
text_instance.tokens = tokenize_text(text_instance.text_str, tokenizer)
# Tag this text with the specified Tagger classes.
for tagger_index, tagger in enumerate(taggers):
# Raise an exception if we're tagging a second time with [effectively]
# the exact same tagger---all the same settings that matter and such.
# (This is why it's important to make sure that Ity modules provide
# a precise full_label properties.)
if tagger.full_label in text_instance.tag_data:
raise ValueError("Needlessly tagging a text with an identically configured tagger for a second time: %s" % tagger.full_label)
rules, tags = tag_tokens(text_instance.tokens, tagger)
# Append a dict of information from this tagger.
text_instance.tag_data[tagger.full_label] = {
"tags": tags,
"rules": rules,
"label": tagger.label,
"full_label": tagger.full_label
}
# Format each tagged output for this text with the specified Formatter classes.
for formatter_index, formatter in enumerate(formatters):
# Raise an exception if we're formatting a second time with
# [effectively] the exact same formatter---all the same settings
# that matter and such.
if formatter.full_label in text_instance.format_data:
raise ValueError("Needlessly formatting a text with an identically configured formatter for a second time: %s" % tagger.full_label)
# tagger_instance.tag_data may contain the output of multiple taggers.
# The format_text() function will generate a separate output for each tagger.
# Also, note that we're not passing the format_text() function the
# text_str or tokens arguments because we're passing it a CorpusText
# instance, which has been previously updated above to contain the
# text's tokens and tag_data. Additionally, the text_str property
# provides the text file's contents.
text_instance.format_data = format_text(
text_instance=text_instance, # Contains tokens, text_str, and tag_data for one or more taggers
formatter=formatter,
save_to_disk=save_to_disk and "formats" in save
)
# Return ONLY the processed text results we want by way of the CorpusText instance.
# This means we're going to clear out the stuff we weren't asked to save.
# Conditionally add data to the return value.
# TODO: Add support for writing certain Python data structures to disk (other than Formatters, which will be able to write to disk on their own.)
if "metadata" not in save:
text_instance.metadata = None
if "text_str" not in save:
text_instance._text_str = None
if "tokens" not in save:
text_instance.tokens = [ |
hyperized/ansible | lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py | Python | gpl-3.0 | 16,660 | 0.002221 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_galleryimageversion
version_added: '2.9'
short_description: Manage Azure SIG Image Version instance.
description:
- 'Create, update and delete instance of Azure SIG Image Version.'
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
gallery_name:
description:
- >-
The name of the Shared Image Gallery in which the Image Definition
resides.
required: true
type: str
gallery_image_name:
description:
- >-
The name of the gallery Image Def | inition in which the Image Version is
to | be created.
required: true
type: str
name:
description:
- >-
The name of the gallery Image Version to be created. Needs to follow
semantic version name pattern: The allowed characters are digit and
period. Digits must be within the range of a 32-bit integer. Format:
<MajorVersion>.<MinorVersion>.<Patch>
required: true
type: str
location:
description:
- Resource location
type: str
publishing_profile:
description:
- Publishing profile.
required: true
type: dict
suboptions:
target_regions:
description:
- >-
The target regions where the Image Version is going to be replicated
to. This property is updatable.
type: list
suboptions:
name:
description:
- Region name.
type: str
regional_replica_count:
description:
- >-
The number of replicas of the Image Version to be created per
region. This property would take effect for a region when
regionalReplicaCount is not specified. This property is updatable.
type: str
storage_account_type:
description:
- Storage account type.
type: str
managed_image:
description:
- Managed image reference, could be resource id, or dictionary containing C(resource_group) and C(name)
type: raw
snapshot:
description:
- Source snapshot to be used.
type: raw
replica_count:
description:
- >-
The number of replicas of the Image Version to be created per
region. This property would take effect for a region when
regionalReplicaCount is not specified. This property is updatable.
type: number
exclude_from_latest:
description:
- >-
If set to true, Virtual Machines deployed from the latest version of
the Image Definition won't use this Image Version.
type: bool
end_of_life_date:
description:
- >-
The end of life date of the gallery Image Version. This property can
be used for decommissioning purposes. This property is updatable.
Format should be according to ISO-8601, for instance "2019-06-26".
type: str
storage_account_type:
description:
- >-
Specifies the storage account type to be used to store the image.
This property is not updatable.
type: str
state:
description:
- Assert the state of the GalleryImageVersion.
- >-
Use C(present) to create or update an GalleryImageVersion and C(absent)
to delete it.
default: present
choices:
- absent
- present
type: str
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create or update a simple gallery Image Version.
azure_rm_galleryimageversion:
resource_group: myResourceGroup
gallery_name: myGallery1283
gallery_image_name: myImage
name: 10.1.3
location: West US
publishing_profile:
end_of_life_date: "2020-10-01t00:00:00+00:00"
exclude_from_latest: yes
replica_count: 3
storage_account_type: Standard_LRS
target_regions:
- name: West US
regional_replica_count: 1
- name: East US
regional_replica_count: 2
storage_account_type: Standard_ZRS
managed_image:
name: myImage
resource_group: myResourceGroup
'''
RETURN = '''
id:
description:
- Resource Id
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle
ry1283/images/myImage/versions/10.1.3"
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMGalleryImageVersions(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resourceGroupName',
required=True
),
gallery_name=dict(
type='str',
updatable=False,
disposition='galleryName',
required=True
),
gallery_image_name=dict(
type='str',
updatable=False,
disposition='galleryImageName',
required=True
),
name=dict(
type='str',
updatable=False,
disposition='galleryImageVersionName',
required=True
),
location=dict(
type='str',
updatable=False,
disposition='/'
),
publishing_profile=dict(
type='dict',
disposition='/properties/publishingProfile',
options=dict(
target_regions=dict(
type='list',
disposition='targetRegions',
options=dict(
name=dict(
type='str',
required=True
),
regional_replica_count=dict(
type='int',
disposition='regionalReplicaCount'
),
storage_account_type=dict(
type='str',
disposition='storageAccountType'
)
)
),
managed_image=dict(
type='raw',
pattern=('/subscriptions/{subscription_id}/resourceGroups'
'/{resource_group}/providers/Microsoft.Compute'
'/images/{name}'),
disposition='source/managedImage/id'
),
snapshot=dict(
type='raw',
pattern=('/subscriptions/{subscription_id}/resourceGroups'
'/{resource_group}/providers/Microsoft.Compute'
'/snapshots/{name}'),
disposition='source/managedImage/id'
),
replica_count=dict(
|
exploreshaifali/portal | systers_portal/community/forms.py | Python | gpl-2.0 | 3,969 | 0 | from django import forms
from django.contrib.auth.models import Group
from common.forms import ModelFormWithHelper
from common.helpers import SubmitCancelFormHelper
from community.constants import COMMUNITY_ADMIN
from community.models import Community, CommunityPage
from community.utils import get_groups
from users.models import SystersUser
class CommunityForm(ModelFormWithHelper):
"""Form to edit Community profile"""
class Meta:
model = Community
fields = ('name', 'slug', 'order', 'email', 'mailing_list',
'parent_community', 'website', 'facebook', 'googleplus',
'twitter')
helper_class = SubmitCancelFormHelper
helper_cancel_href = "{% url 'view_community_profile' " \
"community.slug %}"
class AddCommunityPageForm(ModelFormWithHelper):
"""Form to create new CommunityPage. The author and the community of the
page are expected to be provided when initializing the form:
* author - currently logged in user, aka the author of the page
* community - to which Community the CommunityPage belongs
"""
class Meta:
model = CommunityPage
fields = ('title', 'slug', 'order', 'content')
helper_class = SubmitCancelFormHelper
helper_cancel_href = "{% url 'view_community_landing' " \
"community.slug %}"
def __init__(self, *args, **kwargs):
self.author = kwargs.pop('author')
self.community = kwargs.pop('community')
super(AddCommunityPageForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
"""Override save to add author and community to the instance"""
instance = super(AddCommunityPageForm, self).save(commit=False)
instance.author = SystersUser.objects.get(user=self.author)
instance.community = self.community
if commit:
instance.save()
return instance
class EditCommunityPageForm(ModelFormWithHelper):
"""Form to edit a CommunityPage."""
class Meta:
model = CommunityPage
fields = ('slug', 'title', 'order', 'content')
helper_class = SubmitCancelFormHelper
helper_cancel_href = "{% url 'view_community_page' community.slug " \
"object.slug %}"
class PermissionGroupsForm(forms.Form):
"""Form to manage (select/deselect) user permission groups"""
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
community = kwargs.pop('community')
super(PermissionGroupsForm, self).__init__(*args, **kwargs)
# get all community groups and remove community admin group
# from the list of choices
self.groups = list(get_groups(community.name))
admin_group = Group.objects.get(
name=COMMUNITY_ADMIN.format(community.name))
self.groups.remove(admin_group)
choices = [(group.pk, group.name) for group in self.groups]
self.fields['groups'] = forms.\
MultipleChoiceField(choices=choices, label="", required=False,
widget=forms.CheckboxSelectMultiple)
self.member_groups = self.user.get_member_groups(self.groups)
self.fields['groups'].initial = [group.pk for group in
self.member_groups]
self.helper = SubmitCancelFormHelper(
self, cancel_href="{% url 'community_users' community.slug %}")
def s | ave(self):
"""Update the groups of which the user is member of"""
group_pks = [int(pk) for pk in self.cleaned_data['groups']]
for member_group in self.member_groups:
if member_group.pk not in group_pks:
self.user.leave_group(member_group)
for pk in | group_pks:
group = Group.objects.get(pk=pk)
if not self.user.is_group_member(group.name):
self.user.join_group(group)
|
ama-jharrison/agdc | agdc/tests/test_new_ingest.py | Python | apache-2.0 | 1,629 | 0.003683 | #!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
Run all unit tests for the new ingest process.
"""
import unittest
import test_abstract_ingester
import test_landsat_dataset
import test_landsat_bandstack
import test_dataset_record
import test_tile_record
import test_tile_contents
def the_suite():
"""Returns a test suile of all the tests to be run."""
suite_list = []
suite_list.append(test_abstract_ingester.the_suite())
suite_list.append(test_landsat_d | ataset.the_suite())
suite_list.append(test_landsat_bandstack.the_suite())
suite_list.append(test_dataset_record.the_suite())
# suite_list.append(test_tile_record.the_suite(fast=True))
# suite_list.append(test_tile_contests.the_suite(fast=True))
return unittest.TestSuite(suite_list)
#
# Run unit tests if in __main__
#
if __name__ == | '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite())
|
walty8/trac | trac/attachment.py | Python | bsd-3-clause | 47,208 | 0.00053 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from cStringIO import StringIO
from datetime import datetime
import errno
import hashlib
import os.path
import posixpath
import re
import shutil
import sys
import unicodedata
from genshi.builder import tag
from trac.admin import AdminCommandError, IAdminCommandProvider, PrefixList, \
console_datetime_format, get_dir_list
from trac.config import BoolOption, IntOption
from trac.core import *
from trac.mimeview import *
from trac.perm import PermissionError, IPermissionPolicy
from trac.resource import *
from trac.search import search_to_sql, shorten_result
from trac.util import content_disposition, create_zipinfo, get_reporter_id
from trac.util.datefmt import datetime_now, format_datetime, from_utimestamp, \
to_datetime, to_utimestamp, utc
from trac.util.text import exception_to_unicode, path_to_unicode, \
pretty_size, print_table, stripws, unicode_unquote
from trac.util.translation import _, tag_
from trac.web import HTTPBadRequest, IRequestHandler, RequestDone
from trac.web.chrome import (INavigationContributor, add_ctxtnav, add_link,
add_stylesheet, web_context, add_warning)
from trac.web.href import Href
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class InvalidAttachment(TracError):
"""Exception raised when attachment validation fails."""
class IAttachmentChangeListener(Interface):
"""Extension point interface for components that require
notification when attachments are created or deleted."""
def attachment_added(attachment):
"""Called when an attachment is added."""
def attachment_deleted(attachment):
"""Called when an attachment is deleted."""
def attachment_reparented(attachment, old_parent_realm, old_parent_id):
"""Called when an attachment is reparented."""
class IAttachmentManipulator(Interface):
"""Extension point interface for components that need to
manipulate attachments.
Unlike change listeners, a manipulator can reject changes being
committed to the database."""
def prepare_attachment(req, attachment, fields):
"""Not currently called, but should be provided for future
compatibility."""
def validate_attachment(req, attachment):
"""Validate an attachment after upload but before being stored
in Trac environment.
Must return a list of ``(field, message)`` tuples, one for
each problem detected. ``field`` can be any of
``description``, ``username``, ``filename``, ``content``, or
`None` to indicate an overall problem with the
attachment. Therefore, a return value of ``[]`` means
everything is OK."""
class ILegacyAttachmentPolicyDelegate(Interface):
"""Interface that can be used by plugins to seamlessly participate
to the legacy way of checking for attachment permissions.
This should no longer be necessary once it becomes easier to
setup fine-grained permissions in the default permission store.
"""
def check_attachment_permission(action, username, resource, perm):
"""Return the usual `True`/`False`/`None` security policy
decision appropriate for the requested action on an
attachment.
:param action: one of ATTACHMENT_VIEW, ATTACHMENT_CREATE,
ATTACHMENT_DELETE
:param username: the user string
:param resource: the `~trac.resource.Resource` for the
attachment. Note that when
ATTACHMENT_CREATE is checked, the
resource ``.id`` will be `None`.
:param perm: the permission cache for that username and resource
"""
class AttachmentModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider,
IResourceManager)
realm = 'attachment'
is_valid_default_handler = False
change_listeners = ExtensionPoint(IAttachmentChangeListener)
manipulators = ExtensionPoint(IAttachmentManipulator)
CHUNK_SIZE = 4096
max_size = IntOption('attachment', 'max_size', 262144,
"""Maximum allowed file size (in bytes) for attachments.""")
max_zip_size = IntOption('attachment', 'max_zip_size', 2097152,
"""Maximum allowed total size (in bytes) for an attachment list to be
downloadable as a `.zip`. Set this to -1 to disable download as `.zip`.
(''since 1.0'')""")
render_unsafe_content = BoolOption('attachment', 'render_unsafe_content',
'false',
"""Whether attachments should be rendered in the browser, or
only made downloadable.
Pretty much any file may be interpreted as HTML by the browser,
which allows a malicious user to attach a file containing cross-site
scripting attacks.
For public sites where anonymous users can create attachments it is
recommended to leave this option disabled.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return req.args.get('realm')
def get_navigation_items(self, req):
return []
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/(raw-|zip-)?attachment/([^/]+)(?:/(.*))?$',
req.path_info)
if match:
format, realm, path = match.groups()
if format:
req.args['format'] = format[:-1]
req.args['realm'] = realm
if path:
req.args['path'] = path
return True
def process_request(self, req):
parent_id = None
parent_realm = req.args.get('realm')
path = req.args.get('path')
filename = None
if not parent_realm or not path:
raise HTTPBadRequest(_('Bad request'))
if parent_realm == 'attachment':
raise TracError(tag_("%(realm)s is not a valid parent realm",
realm=tag.code(parent_realm)))
parent_realm = Resource(parent_realm)
action = req.args.get('action', 'view')
if action == 'new':
parent_id = path.rstrip('/')
else: |
last_slash = path.rfind('/')
if last_slash == -1:
parent_id, filename = path, ''
else:
parent_id, filename = path[:last_slash], path[last_slash + 1:]
parent = parent_realm(id=parent_id)
if not resource_exists(self.env, parent):
raise ResourceNotFound(
| _("Parent resource %(parent)s doesn't exist",
parent=get_resource_name(self.env, parent)))
# Link the attachment page to parent resource
parent_name = get_resource_name(self.env, parent)
parent_url = get_resource_url(self.env, parent, req.href)
add_link(req, 'up', parent_url, parent_name)
add_ctxtnav(req, _('Back to %(parent)s', parent=parent_name),
parent_url)
if not filename: # there's a trailing '/'
if req.args.get('format') == 'zip':
self._download_as_zip(req, parent)
elif action != 'new':
return self._render_list(req, parent)
attachment = Attachment(self.env, parent.child(self.realm, fi |
AaronM04/coding-practice | pe_49/main.py | Python | gpl-3.0 | 1,693 | 0.004135 | #!/usr/bin/python
## globals
primes_set = set()
primes_list = []
def is_prime(n):
limit = int(round(sqrt(n)))
i = 2
while True:
if i > limit:
return True
if n % i == 0:
return False
def find_prime_permutations(n):
"find the prime permutations including n"
global primes_set
assert n >= 1000 and n <= 9999
perm_set = set()
s = str(n)
for i in xrange(4):
for j in xrange(4):
if j == i:
continue
for k in xrange(4):
i | f k == i or k == j:
continue
for l in xrange(4):
if l == i or l == j or l == k:
continue
s2 = s[i] + s[j] + s[k] + s[l]
n2 = int(s2)
if n2 in primes_s | et:
perm_set.add(n2)
return perm_set
def find_arith_seq(_set):
l = sorted(list(_set))
if len(l) < 3:
return None
for i in xrange(1, len(l)-1): # not either end
for j in xrange(0, i):
n = l[i]*2 - l[j]
if n in _set and n != l[i] and n != l[j]:
return (l[j], l[i], n)
return None
if __name__ == '__main__':
if len(primes_set) == 0: # if not initialized
for i in xrange(1001, 9999+1):
if is_prime(i):
primes_set.add(i)
primes_list.append(i)
solutions = set()
for p in primes_list:
prime_perm_set = find_prime_permutations(p)
result = find_arith_seq(prime_perm_set)
if result is not None:
solutions.add(result)
print repr(solutions)
|
eldie1984/Scripts | pg/scripts mios/python/ly/errores_cc_ant.py | Python | gpl-2.0 | 9,892 | 0.024876 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
#
# Autor: Diego Gasch
#
# Empresa: CMD s.a
#
# Fecha de generación: 2013-07-03
#
# Fecha de ultima modificación: 2013-07-03
#
# Ultima modificación: ([quien] + [que]) dgasch creacion del script
#
#
# Objetivo: Enviar un reporte con los errores de envio desde BAC a Libertya incluyendo la cantidad de los mismos
#
# Forma de uso: En el crontab y en la documentacion de la wiki tenemos la linea con la que podemos ejecutar el script, el mismo no debe usarse con parametros.
#
# Proximas mejoras a incorporar: Securizar el script testeando que el mismo rechace cualquier parametro que se le incluya.
#
# Version : 1.1
######################################
# Codigo: #
#####################################
# IMPORTO LIBRERIAS, DEFINO VARIABLES, CREO CLASES Y FUNCIONES:
# Importo las librerias necesarias para la ejecucion del script
import MySQLdb
import psycopg2
from socket import gethostname
from sys import argv,exit
import getopt
import logging
# Funcion que iporta las conexiones a la base
from commons import *
receivers=['ndemarchi@cmd.com.ar','dgasch@cmd.com.ar','mcortez@cmd.com.ar','sostapowicz@cmd.com.ar','mmolina@cmd.com.ar']
test=0
def main(argv):
level_opt=logging.INFO
global receivers
global test
try:
opts, args = getopt.getopt(argv,"hdt:",["debug","testing"])
except getopt.GetoptError:
print """-d --debug ---> Pone en debug el log
-t --testing ---> Pone a soporte como receptor del mail
"""
exit(2)
for opt, arg in opts:
if opt == '-h':
print """-d --debug ---> Pone en debug el log
-t --testing ---> Pone a soporte como receptor del mail
"""
exit()
elif opt in ("-d", "--debug"):
level_opt=logging.DEBUG
elif opt in ("-t", "--testing"):
print "Testing"
test=1
receivers = []
receivers.append(arg)
logging.basicConfig(format=FORMAT,level=level_opt)
# Genero dos funciones fuera de la clase
##################################################
###############################
if __name__ == "__main__":
main(argv[1:])
################################
# Parametros
################################
hostname=gethostname()
pathDir=argv[0]
is_mail=0
##################################################
logging.debug("Seteo variables")
logging.info("---------Inicio Script ----------------")
# Declaro la instancia del mail y el encabezado
mail_bac=mail(','.join(receivers),'Alerta Pagos BAC (py)')
mail_bac.createHtmlHeader("id_lote,id_pago,id_cargo")
# COMIENZO CON LAS VERIFICACIONES:
try:
db=MySQLdb.connect(host=db_host, user=db_user, passwd=db_pass , db=db_database )
cursor = db.cursor()
logging.debug("Se realizo la conexion a la base")
except Exception:
logging.error("Nos se pudo realizar la conexion a la base")
exit(2)
try:
db_cc=MySQLdb.connect(host=db_host_cc, user=db_user_cc, passwd=db_pass_cc , db=db_database_cc )
cursor_cc = db_cc.cursor()
logging.debug("Se realizo la conexion a la base de CC")
except Exception:
logging.error("Nos se pudo realizar la conexion a la base de CC")
exit(2)
try:
db_pr=MySQLdb.connect(host=db_host_pr, user=db_user_pr, passwd=db_pass_pr , db=db_database )
cursor_pr = db_pr.cursor()
logging.debug("Se realizo la conexion a la base")
except Exception:
logging.error("Nos se pudo realizar la conexion a la base")
exit(2)
try:
db_ly=psycopg2.connect(host=db_host_ly_pr, user=db_user_ly_pr, password=db_pass_ly_pr , database=db_database_ly )
cursor_ly = db_ly.cursor()
logging.debug("Se realizo la | conexion a la base de LY")
except Exception:
logging.error("Nos se pudo realizar la conexion a la base de LY")
exit(2)
cursor_cc.execute("""select de.id from deals d,
deal_externals de
where d.is_end_user=1
and end_date<'2013-11-01'
and de.created > '2013-08-01'
and de.deal_id=d.id
""")
# Procesamos los datos obtenidos para incluirlos luego en un mail.
lista=[]
lista_bac=[]
str_lista=''
str_lista_bac=''
for registro in cursor_cc.f | etchall():
lista.append(str(registro[0]))
str_lista = (',').join(lista)
#print str_lista
#logging.debug("la lista de ID's es: \n %s" % str_lista)
#print "la lista de ID's es: \n %s" % str_lista
cursor.execute("""select efl.C_Invoice_ID,efl.id_lote,epl.C_AllocationHdr_ID,epl.id_pago
from cce_cargo cc
left join cce_pago cp on cp.id_pago=cc.id_pago
left join erp_factura_ly efl on cc.id_lote=efl.id_lote
left join erp_pago_ly epl on epl.id_pago=cc.id_pago
where cp.id_portal=18 and cc.id_lote is not null
and cc.id_producto=18001
and efl.C_Invoice_ID is not null
and epl.C_AllocationHdr_ID is not null
and cp.id_pago_portal in (%s)
and cc.generacion < '2013-11-14'
and cc.id_lote not in (1179510)
""" % str_lista)
#
result=cursor.fetchall()
#for i in result:
# print i[0]
print result
for registro in result:
lista_bac.append(str(registro[0]))
str_lista_bac = (',').join(lista_bac)
for miembro in result:
mail_bac.addDataHtml(miembro[0],miembro[1],miembro[2])
logging.debug("| %s | %s | %s | %s" % (miembro[0],miembro[1],miembro[2],miembro[3]))
print "/* id_lote=%s |id_pago= %s */" % (miembro[1],miembro[3])
print """update erp_factura_ly
set issendrequired = 'Y',
tipocomprobante='FC',
c_doctypetarget_id=null,
punto_venta=101,
fecha_envio=null,
dateacct='2013-11-30'
where id_lote in (%s);""" % miembro[1]
print """ update erp_cargo_ly
set m_product_id=1017122
where id_lote in (%s);""" % miembro[1]
print """ update cce_cargo
set id_producto=18000
where id_lote in (%s) ;""" % miembro[1]
print """ update erp_pago_ly
set fecha_envio=null
where id_pago in (%s) ;""" % miembro[3]
cursor_pr.execute( """update erp_factura_ly
set issendrequired = 'Y',
tipocomprobante='FC',
c_doctypetarget_id=null,
punto_venta=101,
fecha_envio=null
where id_lote in (%s);""" % miembro[1])
cursor_pr.execute( """ update erp_cargo_ly
set m_product_id=1017122
where id_lote in (%s);""" % miembro[1])
cursor_pr.execute(""" update cce_cargo
set id_producto=18000
where id_lote in (%s) ;""" % miembro[1])
cursor_pr.execute( """update erp_pago_ly
set fecha_envio=null
where id_pago in (%s) ;
""" % miembro[3])
db_pr.commit()
# is_mail=1
#if is_mail==1 or test==1:
# text='REPORTE: ERRORES de los envios de LIBERTYA'
# html="""\
# <html>
# <head>
# <meta content="text/html; charset=utf-8" http-equiv="Content-Type">
# </head>
# <body>
# <h1 style="color:OrangeRed">Este es un reporte solo INFORMATIVO, NO ESCALAR </h1>
# """+mail_bac.html+"""</tbody>
# </table>
# <h3> Reenviar el mail a los responsables del portal</h3>
#
# <p>Alama corriendo en el equipo """ + hostname + """ en """ + pathDir+ """ </p>
# </body>
# </html>"""
# mail_bac.send_mail(sender,receivers, text, html)
# logging.info("c_invoice_id: %s, id_lote: %s " % (miembro[0], miembro[1]))
# logging.info("Ejecuto en bac la siguiente sentencia: "+ """
#insert into erp_anulacion_ly
#(id_portal,C_AllocationHdr_ID,AllocationAction,AD_Client_ID,AD_Org_ID,fecha_envio,pago_enviado,is_error_pago,error_msg_pago,generacion,actualizacion,baja,id_pago)
#values
#(18,%s,null,1010057,1010093,null,'N','N','','2013-11-19 14:23:59',null,null,%s)
#""" % (miembro[2], miembro[3]))
# cursor_pr.execute("""insert into erp_anulacion_ly
# (id_portal,C_AllocationHdr_ID,AllocationAction,AD_Client_ID,AD_Org_ID,fecha_envio,pago_enviado,is_error_pago,error_msg_pago,generacion,actualizacion,baja)
# values
# (18,%s,null,1010057,1010093,null,'N','N','','2013-11-19 14:23:59',null,%s)""" % (miembro[2], miembro[3]))
# db_pr.commit()
# logging.info("Ejecuto en bac la siguiente sentencia: "+"""
#insert into erp_anulacion_detalle_ly
#(id_anulacion_ly,C_Invoice_ID,C_Order_ID,CreditNote_DocumentNo,factura_enviado,is_error_factura,error_msg_factura,pedido_enviado,is_error_pedido,error_msg_pedido,generacion, |
gems-uff/labsys | migrations/versions/2018-09-19_09:49:05__251447ab2060.py | Python | mit | 680 | 0.001471 | """empty message
Revision ID: 251447ab2060
Revises: 53ac0b4e8891
Create Date: 2018-09-19 09:49:05.552597
"""
from alembic import op
import sqlalchemy as sa
# revision id | entifiers, used by Alembic.
revision = '251447ab2060'
down_revision = '53ac0b4e8891'
branch_labels = None
depends_on = None
def upg | rade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('transactions', sa.Column('lot_number', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('transactions', 'lot_number')
# ### end Alembic commands ###
|
DeniSix/SublimeNodeStacktrace | node_debugger/commands/attach_debugger.py | Python | mit | 3,423 | 0.025124 | import sublime, sublime_plugin
from .. import config
from .. import globals
from .. import logger
from ..debug_client import DebugClient
from ..clicks import Clicks
log = logger.get('cmd_attach_debugger')
def lookup_ref(id, refs):
for ref in refs:
if id == ref['handle']:
return ref
return None
def open_file(data):
if '/' not in data['script'].replace('\\', '/'):
print('[NDBG] Internal scripts (%s) doesn\'t supported for now. Sorry :(' % data['script'])
return
# TODO: fetch node's internal scripts with `scripts` request
window = sublime.active_window()
filename = '%s:%d:%d' % (data['script'], data['line'], 1) # it's better to use '1' instead of data['column']
src = window.open_file(filename, sublime.ENCODED_POSITION)
window.set_view_index(src, 0, 0)
if 'exception' in data:
src.set_status('node_error', data['exception'])
def trace_callback(data):
body = data['body']
refs = data['refs']
trace = []
funcLen = 0
for frame in body['frames']:
func = frame['func']['name'] or frame['func']['inferredName'] or 'Anonymous'
script = lookup_ref(frame['script']['ref'], refs)
trace.append({'func': func, 'script': script['name'], 'line': int(frame['line']) + 1, 'column': int(frame['column']) + 1})
l = len(func)
if funcLen < l:
funcLen = l
text = '%s\n' % globals.exception
globals.exception = None
for line in trace:
s = '\t%s (%s:%d:%d)\n' % (line['func'].ljust(funcLen), line['script'], line['line'], line['column'])
globals.clicks.add(sublime.Region(len(text), len(text + s)), open_file, line)
text = text + s
globals.st.run_command('node_debugger_insert_text', {'text': text})
def exception | _callback(data):
log('exception', data)
body = data['body']
window = sublime.active_window()
if config.get('show_stack | trace'):
globals.exception = body['exception']['text']
window.set_layout(config.get('debug_layout'))
# Create new buffer for stacktrace
globals.st = st = window.new_file()
st.set_scratch(True)
st.set_name(config.get('stacktrace_name'))
st.settings().set('word_wrap', False)
st.settings().set('syntax', 'Packages/' + globals.prefix + '/node stacktrace.tmLanguage')
window.set_view_index(st, 1, 0)
# Request backtrace
globals.client.execute('backtrace', trace_callback, inlineRefs=True)
# Open file with error
open_file({'script': body['script']['name'], 'line': body['sourceLine'] + 1, 'column': body['sourceColumn'] + 1, 'exception': body['exception']['text']})
def after_compile_callback(data):
pass
def disconnect_handler(e):
log('disconnect_handler', e)
globals.client = None
class NodeDebuggerAttachCommand(sublime_plugin.ApplicationCommand):
def run(self):
if globals.client:
globals.client.close()
address = config.get('address')
try:
globals.original_layout = sublime.active_window().get_layout()
globals.clicks = Clicks()
globals.client = client = DebugClient(address)
client.on_disconnect(disconnect_handler)
# client.add_handler('break', exception_callback)
client.add_handler('exception', exception_callback)
client.add_handler('afterCompile', after_compile_callback)
client.execute_sync('setexceptionbreak', lambda data: client.execute('continue', lambda x: str(1)), type='uncaught', enabled=True)
except (IOError) as e:
log('Error connecting to %s' % address, e)
message = 'Error connecting to node.js instance at %s' % address
sublime.error_message(message)
|
mbr/tinyrpc | tests/test_protocols.py | Python | mit | 1,721 | 0.002905 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc import RPCErrorResponse
@pytest.fixture(params=['jsonrpc'])
def protocol(request):
if 'jsonrpc':
return JSONRPCProtocol()
raise RuntimeError('Bad protocol name in test case')
def test_protocol_returns_bytes(protocol):
req = protocol.create_request('foo', ['bar'])
assert isinstance(req.serialize(), bytes)
def test_procotol_responds_bytes(protocol):
req = protocol.create_request('foo', ['bar'])
rep = req.respond(42)
err_rep = req.error_respond(Exception('foo'))
assert isinstance(rep.serialize(), bytes)
a | ssert isinstance(err_rep.serialize(), bytes)
def test_one_way(protocol):
req = protocol.create_request('foo', None, {'a': 'b'}, True)
assert req.respond(None) == None
def test_raises_on_args_and_kwargs(protocol):
with pytest.raises(Exception):
protocol.create_request('foo', ['arg1', 'arg2'], {'kw_key': 'kw_value'})
def test_supports_no | _args(protocol):
protocol.create_request('foo')
def test_creates_error_response(protocol):
req = protocol.create_request('foo', ['bar'])
err_rep = req.error_respond(Exception('foo'))
assert hasattr(err_rep, 'error')
def test_parses_error_response(protocol):
req = protocol.create_request('foo', ['bar'])
err_rep = req.error_respond(Exception('foo'))
parsed = protocol.parse_reply(err_rep.serialize())
assert hasattr(parsed, 'error')
def test_default_id_generator():
from tinyrpc.protocols import default_id_generator
g = default_id_generator(1)
assert next(g) == 1
assert next(g) == 2
assert next(g) == 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.