repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
bmya/odoo-argentina
|
l10n_ar_account/models/__init__.py
|
Python
|
agpl-3.0
| 929
| 0
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from . import account_journal
from . import account_tax
from . import account_document_letter
from . import afip_responsability_type
from . import account_document_type
from . import afip_incoterm
from . import res_partner
from . import res_country
from . import res_currency
from . import res_company
from . import account_invoice_tax
from . import account_invoice
from . import
|
account_invoice_line
from . import product_uom
from . import account_chart_template
from . import afip_vat_f2002_category
from . import product_template
from . import account_mov
|
e_line
from . import account_move
from . import afip_padron
from . import account_account
from . import account_account_tag
|
bearstech/ansible
|
lib/ansible/modules/network/netscaler/netscaler_cs_action.py
|
Python
|
gpl-3.0
| 9,091
| 0.00242
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: netscaler_cs_action
short_description: Manage content switching actions
description:
- Manage content switching actions
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the content switching action. Must begin with an ASCII alphanumeric or underscore C(_)
character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon
C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the content
switching action is created.
targetlbvserver:
description:
- "Name of the load balancing virtual server to which the content is switched."
targetvserver:
description:
- "Name of the VPN virtual server to which the content is switched."
targetvserverexpr:
description:
- "Information about this content switching action."
comment:
description:
- "Comments associated with this cs action."
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# lb_vserver_1 must have been already created with the netscaler_lb_vserver module
- name: Configure netscaler content switching action
delegate_to: localhost
netscaler_cs_action:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
state: present
name: action-1
targetlbvserver: lb_vserver_1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
import json
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csaction import csaction
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import ni
|
tro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log, logline
|
s,
ensure_feature_is_enabled,
get_immutables_intersection
)
def action_exists(client, module):
if csaction.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def action_identical(client, module, csaction_proxy):
if len(diff_list(client, module, csaction_proxy)) == 0:
return True
else:
return False
def diff_list(client, module, csaction_proxy):
action_list = csaction.get_filtered(client, 'name:%s' % module.params['name'])
diff_list = csaction_proxy.diff_object(action_list[0])
if False and 'targetvserverexpr' in diff_list:
json_value = json.loads(action_list[0].targetvserverexpr)
if json_value == module.params['targetvserverexpr']:
del diff_list['targetvserverexpr']
return diff_list
def main():
module_specific_arguments = dict(
name=dict(type='str'),
targetlbvserver=dict(type='str'),
targetvserverexpr=dict(type='str'),
comment=dict(type='str'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'name',
'targetlbvserver',
'targetvserverexpr',
'comment',
]
readonly_attrs = [
'hits',
'referencecount',
'undefhits',
'builtin',
]
immutable_attrs = [
'name',
'targetvserverexpr',
]
transforms = {
}
json_encodes = ['targetvserverexpr']
# Instantiate config proxy
csaction_proxy = ConfigProxy(
actual=csaction(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
json_encodes=json_encodes,
)
try:
ensure_feature_is_enabled(client, 'CS')
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not action_exists(client, module):
if not module.check_mode:
csaction_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not action_identical(client, module, csaction_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(csaction_proxy, diff_list(client, module, csaction_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, csaction_proxy),
**module_result
)
if not module.check_mode:
csaction_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
log('Sanity checks for state present')
if not module.check_mode:
if not action_exists(client, module):
module.fail_json(msg='Content switching action does not exist', **module_result)
if not action_identical(client, module, csaction_proxy):
module.fail_json(
msg='Content switching action differs from configured',
diff=diff_list(client, module, csaction_proxy),
**module_result
)
elif module.params['state'] == 'absent':
log('Applying actions for s
|
drammock/expyfun
|
expyfun/_utils.py
|
Python
|
bsd-3-clause
| 30,243
| 0
|
"""Some utility functions"""
# Authors: Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import warnings
import operator
from copy import deepcopy
import subprocess
import importlib
import os
import os.path as op
import inspect
import sys
import time
import tempfile
import traceback
import ssl
from shutil import rmtree
import atexit
import json
from functools import partial
from distutils.version import LooseVersion
import logging
import datetime
from timeit import default_timer as clock
from threading import Timer
import numpy as np
import scipy as sp
from ._externals import decorator
# set this first thing to make sure it "takes"
try:
import pyglet
pyglet.options['debug_gl'] = False
del pyglet
except Exception:
pass
# for py3k (eventually)
if sys.version.startswith('2'):
string_types = basestring # noqa
input = raw_input # noqa, input is raw_input in py3k
text_type = unicode # noqa
from __builtin__ import reload
from urllib2 import urlopen # noqa
from cStringIO import StringIO # noqa
else:
string_types = str
text_type = str
from urllib.request import urlopen
input = input
from io import StringIO # noqa, analysis:ignore
from importlib import reload # noqa, analysis:ignore
###############################################################################
# LOGGING
EXP = 25
logging.addLevelName(EXP, 'EXP')
def exp(self, message, *args, **kwargs):
"""Experiment-level logging."""
self.log(EXP, message, *args, **kwargs)
logging.Logger.exp = exp
logger = logging.getLogger('expyfun')
def flush_logger():
"""Flush expyfun logger"""
for handler in logger.handlers:
handler.flush()
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable EXPYFUN_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = get_config('EXPYFUN_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
verbose = 'INFO' if verbose is True else 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None,
output_format='%(asctime)s - %(levelname)-7s - %(message)s',
overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is T
|
rue else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout,
|
we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
###############################################################################
# RANDOM UTILITIES
building_doc = any('sphinx-build' in ((''.join(i[4]).lower() + i[1])
if i[4] is not None else '')
for i in inspect.stack())
def run_subprocess(command, **kwargs):
"""Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
**kwargs : objects
Keywoard arguments to pass to ``subprocess.Popen``.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
# code adapted with permission from mne-python
kw = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)
kw.update(kwargs)
p = subprocess.Popen(command, **kw)
stdout_, stderr = p.communicate()
output = (stdout_.decode(), stderr.decode())
if p.returncode:
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
class ZeroClock(object):
"""Clock that uses "clock" function but starts at zero on init."""
def __init__(self):
self._start_time = clock()
def get_time(self):
"""Get time."""
return clock() - self._start_time
def date_str():
"""Produce a date string for the current date and time
Returns
-------
datestr : str
The date string.
"""
return str(datetime.datetime.today()).replace(':', '_')
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout."""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead. Passing del_after and print_del kwargs to the constructor are
helpful primarily for debugging purposes.
"""
def __new__(self, del_after=True, print_del=False):
new = str.__new__(self, tempfile.mkdtemp())
self._del_after = del_after
self._print_del = print_del
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
if self._del_after is True:
if self._print_del is True:
print('Deleting {} ...'.format(self._path))
rmtree(self._path, ignore_errors=True)
de
|
conan-io/conan
|
conans/test/functional/util/tools_test.py
|
Python
|
mit
| 5,578
| 0.002868
|
# -*- coding: utf-8 -*-
import os
import platform
import subprocess
import unittest
import pytest
import six
from conans.client import tools
from conans.client.conf import get_default_settings_yml
from conans.client.tools.files import which
from conans.client.tools.win import vswhere
from conans.errors import ConanException
from conans.model.settings import Settings
from conans.test.utils.mocks import TestBufferConanOutput
from conans.test.utils.test_files import temp_folder
from conans.util.env_reader import get_env
from conans.util.files import save
from conans.util.runners import check_output_runner
class FunctionalToolsTest(unittest.TestCase):
output = TestBufferConanOutput()
@pytest.mark.tool_file # Needs the "file" command, not by default in linux
@pytest.mark.skipif(which("file") is None,
reason="Needs the 'file' command, not by default in linux")
def test_unix_to_dos_unit(self):
def save_file(contents):
tmp = temp_folder()
filepath = os.path.join(tmp, "a_file.txt")
save(filepath, contents)
return filepath
fp = save_file(b"a line\notherline\n")
if platform.system() != "Windows":
output = check_output_runner(["file", fp], stderr=subprocess.STDOUT)
self.assertIn("ASCII text", str(output))
self.assertNotIn("CRLF", str(output))
tools.unix2dos(fp)
output = check_output_runner(["file", fp], stderr=subprocess.STDOUT)
self.assertIn("ASCII text", str(output))
self.assertIn("CRLF", str(output))
else:
fc = tools.load(fp)
self.assertNotIn("\r\n", fc)
tools.unix2dos(fp)
fc = tools.load(fp)
self.assertIn("\r\n", fc)
self.assertEqual("a line\r\notherline\r\n", str(tools.load(fp)))
fp = save_file(b"a line\r\notherline\r\n")
if platform.system() != "Windows":
output = check_output_runner(["file", fp], stderr=subprocess.STDOUT)
self.assertIn("ASCII text", str(output))
self.assertIn("CRLF", str(output))
tools.dos2unix(fp)
output = check_output_runner(["file", fp], stderr=subprocess.STDOUT)
self.assertIn("ASCII text", str(output))
self.assertNotIn("CRLF", str(output))
else:
fc = tools.load(fp)
self.assertIn("\r\n", fc)
tools.dos2unix(fp)
fc = tools.load(fp)
self.assertNotIn("\r\n", fc)
self.assertEqual("a line\notherline\n", str(tools.load(fp)))
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires Visual Studio")
@pytest.mark.tool_visual_studio
class VisualStudioToolsTest(unittest.TestCase):
output = TestBufferConanOutput()
@pytest.mark.skipif(six.PY2, reason="Does not pass on Py2 with Pytest")
def test_msvc_build_command(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
# test build_type and arch override, for multi-config packages
cmd = tools.msvc_build_command(settings, "project.sln", build_type="Debug",
arch="x86", output=self.output)
self.assertIn('msbuild "project.sln" /p:Configuration="Debug" '
'/p:UseEnv=false /p:Platform="x86"', cmd)
self.assertIn('vcvarsall.bat', cmd)
# tests errors if args not defined
with six.assertRaisesRegex(self, ConanException, "Cannot build_sln_command"):
tools.msvc_build_command(settings, "project.sln", output=self
|
.output)
settings.arch = "x86"
with six.assertRaisesRegex(self, ConanException, "Cannot build_sln_command"):
tools.msvc_build_command(settings, "project.sln", output=self.output)
# successful definition via settings
settings.build_type = "Debug"
cmd = tools.msvc_build_command(settings, "project.sln", output=self.output)
self.assertIn('msbuild "project.sln" /p:Configuration="Debug" '
|
'/p:UseEnv=false /p:Platform="x86"', cmd)
self.assertIn('vcvarsall.bat', cmd)
def test_vswhere_path(self):
"""
Locate vswhere in PATH or in ProgramFiles
"""
# vswhere not found
with tools.environment_append({"ProgramFiles": None, "ProgramFiles(x86)": None, "PATH": ""}):
with six.assertRaisesRegex(self, ConanException, "Cannot locate vswhere"):
vswhere()
# vswhere in ProgramFiles but not in PATH
program_files = get_env("ProgramFiles(x86)") or get_env("ProgramFiles")
vswhere_path = None
if program_files:
expected_path = os.path.join(program_files, "Microsoft Visual Studio", "Installer",
"vswhere.exe")
if os.path.isfile(expected_path):
vswhere_path = expected_path
with tools.environment_append({"PATH": ""}):
self.assertTrue(vswhere())
# vswhere in PATH but not in ProgramFiles
env = {"ProgramFiles": None, "ProgramFiles(x86)": None}
if not which("vswhere") and vswhere_path:
vswhere_folder = os.path.join(program_files, "Microsoft Visual Studio", "Installer")
env.update({"PATH": [vswhere_folder]})
with tools.environment_append(env):
self.assertTrue(vswhere())
|
steveb/heat
|
heat/db/sqlalchemy/models.py
|
Python
|
apache-2.0
| 17,625
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy models for heat data."""
import uuid
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import six
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import session as orm_session
from heat.db.sqlalchemy import types
BASE = declarative.declarative_base()
def get_session():
from heat.db.sqlalchemy import api as db_api
return db_api.get_session()
class HeatBase(models.ModelBase, models.TimestampMixin):
"""Base class for Heat Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
def expire(self, session=None, attrs=None):
"""Expire this object ()."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.expire(self, attrs)
def refresh(self, session=None, attrs=None):
"""Refresh this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.refresh(self, attrs)
def delete(self, session=None):
"""Delete this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin(subtransactions=True)
session.delete(self)
session.commit()
def update_and_save(self, values, session=None):
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin(subtransactions=True)
for k, v in six.iteritems(values):
setattr(self, k, v)
session.commit()
class SoftDelete(object):
deleted_at = sqlalchemy.Column(sqlalchemy.DateTime)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.update_and_save({'deleted_at': timeutils.utcnow()},
session=session)
class StateAware(object):
action = sqlalchemy.Column('action', sqlalchemy.String(255))
status = sqlalchemy.Column('status', sqlalchemy.String(255))
status_reason = sqlalchemy.Column('status_reason', sqlalchemy.Text)
class RawTemplate(BASE, HeatBase):
"""Represents an unparsed template which should be in JSON format."""
__tablename__ = 'raw_template'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
template = sqlalchemy.Column(types.Json)
files = sqlalchemy.Column(types.Json)
environment = sqlalchemy.Column('environment', types.Json)
class StackTag(BASE, HeatBase):
"""Key/value store of arbitrary stack tags."""
__tablename__ = 'stack_tag'
id = sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False)
tag = sqlalchemy.Column('tag', sqlalchemy.Unicode(80))
stack_id = sqlalchemy.Column('stack_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
class SyncPoint(BASE, HeatBase):
"""Represents a syncpoint for a stack that is being worked on."""
__tablename__ = 'sync_point'
__table_args__ = (
sqlalchemy.PrimaryKeyConstraint('entity_id',
'traversal_id',
'is_update'),
sqlalchemy.ForeignKeyConstraint(['stack_id'], ['stack.id'])
)
entity_id = sqlalchemy.Column(sqlalchemy.String(36))
traversal_id = sqlalchemy.Column(sqlalchemy.String(36))
is_update = sqlalchemy.Column(sqlalchemy.Boolean)
# integer field for atomic update operations
atomic_key = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False)
input_data = sqlalchemy.Column(types.Json)
class Stack(BASE, HeatBase, SoftDelete, StateAware):
"""Represents a stack created by the heat engine."""
__tablename__ = 'stack'
__table_args__ = (
sqlalchemy.Index('ix_stack_name', 'name', mysql_length=255),
sqlalchemy.Index('ix_stack_tenant', 'tenant', mysql_length=255),
)
id = sqlalchemy.Column(sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column(sqlalchemy.String(255))
raw_template_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'),
nullable=False)
raw_template = relationship(RawTemplate, backref=backref('stack'),
foreign_keys=[raw_template_id])
|
prev_raw_template_id = sqlalchemy.Column(
'prev_raw_template_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'))
prev_raw_template = relationship(RawTemplate,
foreign_keys=[prev_raw_template_id])
username = sqlalchemy.Column(sqlalchemy.String(256))
tenant = sqlalchemy.Column(sqlalchemy.String(256))
user_creds_id =
|
sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('user_creds.id'))
owner_id = sqlalchemy.Column(sqlalchemy.String(36), index=True)
parent_resource_name = sqlalchemy.Column(sqlalchemy.String(255))
timeout = sqlalchemy.Column(sqlalchemy.Integer)
disable_rollback = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False)
stack_user_project_id = sqlalchemy.Column(sqlalchemy.String(64))
backup = sqlalchemy.Column('backup', sqlalchemy.Boolean)
nested_depth = sqlalchemy.Column('nested_depth', sqlalchemy.Integer)
convergence = sqlalchemy.Column('convergence', sqlalchemy.Boolean)
tags = relationship(StackTag, cascade="all,delete",
backref=backref('stack'))
current_traversal = sqlalchemy.Column('current_traversal',
sqlalchemy.String(36))
current_deps = sqlalchemy.Column('current_deps', types.Json)
# Override timestamp column to store the correct value: it should be the
# time the create/update call was issued, not the time the DB entry is
# created/modified. (bug #1193269)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
class StackLock(BASE, HeatBase):
"""Store stack locks for deployments with multiple-engines."""
__tablename__ = 'stack_lock'
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
primary_key=True)
engine_id = sqlalchemy.Column(sqlalchemy.String(36))
class UserCreds(BASE, HeatBase):
"""Represents user credentials.
Also, mirrors the 'context' handed in by wsgi.
"""
__tablename__ = 'user_creds'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
username = sqlalchemy.Column(sqlalchemy.String(255))
password = sqlalchemy.Column(sqlalchemy.String(255))
region_name = sqlalchemy.Column(sqlalchemy.String(255))
decrypt_method = sqlalchemy.Column(sqlalchemy.String(64))
tenant = sqlalchemy.Column(sqlalchemy.String(1024))
auth_url = sqlalchemy.Column(sqlalchemy.Text)
tenant_id = sqlalchemy.Column(sqlalchemy.String(256))
trust_id = sqlalchemy.Column(sqlalchemy.String(255))
trustor_user_id = sqlalch
|
zamattiac/SHARE
|
providers/org/sldr/migrations/0001_initial.py
|
Python
|
apache-2.0
| 649
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 15:45
from __f
|
uture__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=sh
|
are.robot.RobotUserMigration('org.sldr'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('org.sldr'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('org.sldr'),
),
]
|
raghakot/keras-vis
|
applications/self_driving/model.py
|
Python
|
mit
| 845
| 0
|
from keras.layers.core import Dropout, Flatten
from keras.layers.convolutional import MaxPooling2D, Conv2D
from keras.models import Model
from keras.layers import Input, Dense
FRAME_H = 7
|
0
FRAME_W = 180
def build_model():
inp = Input(shape=(FRAME_H, FRAME_W, 3))
x = Conv2D(filters=8, kernel_size=(5, 5), activation=
|
'relu')(inp)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(filters=16, kernel_size=(5, 5), activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(filters=32, kernel_size=(5, 5), activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='tanh')(x)
return Model(inputs=[inp], outputs=[x])
if __name__ == '__main__':
model = build_model()
model.summary()
|
hrashk/sympy
|
sympy/combinatorics/graycode.py
|
Python
|
bsd-3-clause
| 11,202
| 0.000357
|
from __future__ import print_function, division
from sympy.core import Basic
from sympy.core.compatibility import xrange
import random
class GrayCode(Basic):
"""
A Gray code is essentially a Hamiltonian walk on
a n-dimensional cube with edge length of one.
The vertices of the cube are represented by vectors
whose values are binary. The Hamilton walk visits
each vertex exactly once. The Gray code for a 3d
cube is ['000','100','110','010','011','111','101',
'001'].
A Gray code solves the problem of sequentially
generating all possible subsets of n objects in such
a way that each subset is obtained from the previous
one by either deleting or adding a single object.
In the above example, 1 indicates that the object is
present, and 0 indicates that its absent.
Gray codes have applications in statistics as well when
we want to compute various statistics related to subsets
in an efficient manner.
References:
[1] Nijenhuis,A. and Wilf,H.S.(1978).
Combinatorial Algorithms. Academic Press.
[2] Knuth, D. (2011). The Art of Computer Programming, Vol 4
Addison Wesley
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> a = GrayCode(4)
>>> list(a.generate_gray())
['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \
'1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000']
"""
_skip = False
_current = 0
_rank = None
def __new__(cls, n, *args, **kw_args):
"""
Default constructor.
It takes a single argument ``n`` which gives the dimension of the Gray
code. The starting Gray code string (``start``) or the starting ``rank``
may also be given; the default is to start at rank = 0 ('0...0').
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a
GrayCode(3)
>>> a.n
3
>>> a = GrayCode(3, start='100')
>>> a.current
'100'
>>> a = GrayCode(4, rank=4)
>>> a.current
'0110'
>>> a.rank
4
"""
if n < 1 or int(n) != n:
raise ValueError(
'Gray code dimension must be a positive integer, not %i' % n)
n = int(n)
args = (n,) + args
obj = Basic.__new__(cls, *args)
if 'start' in kw_args:
obj._current = kw_args["start"]
if len(obj._current) > n:
raise ValueError('Gray code start has length %i but '
'should not be greater than %i' % (len(obj._current), n))
elif 'rank' in kw_args:
if int(kw_args["rank"]) != kw_args["rank"]:
raise ValueError('Gray code rank must be a positive integer, '
'not %i' % kw_args["rank"])
obj._rank = int(kw_args["rank"]) % obj.selections
obj._current = obj.unrank(n, obj._rank)
return obj
def next(self, delta=1):
"""
Returns the Gray code a distance ``delta`` (default = 1) from the
current value in canonical order.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3, start='110')
>>> a.next().current
'111'
>>> a.next(-1).current
'010'
"""
return GrayCode(self.n, rank=(self.rank + delta) % self.selections)
@property
def selections(self):
"""
Returns the number of bit vectors in the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a.selections
8
"""
return 2**self.n
@property
def n(self):
"""
Returns the dimension of the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(5)
>>> a.n
5
"""
return self.args[0]
def generate_gray(self, **hints):
"""
Generates the sequence of bit vectors of a Gray Code.
[1] Knuth, D. (2011). The Art of Computer Programming,
Vol 4, Addison Wesley
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(start='011'))
['011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(rank=4))
['110', '111', '101', '100']
See Also
========
skip
"""
bits = self.n
start = None
if "start" in hints:
start = hints["start"]
elif "rank" in hints:
start = GrayCode.unrank(self.n, hints["rank"])
if start is not None:
self._current = start
current = self.current
graycode_bin = gray_to_bin(current)
if len(graycode_bin) > self.n:
raise ValueError('Gray code start has length %i but should '
'not be greater than %i' % (len(graycode_bin), bits))
self._current = int(current, 2)
graycode_int = int(''.join(graycode_bin), 2)
for i in xrange(graycode_int, 1 << bits):
if self._skip:
self._skip = False
else:
yield self.current
bbtc = (i ^ (i + 1))
gbtc = (bbtc ^ (bbtc >> 1))
self._current = (self._current ^ gbtc)
self._current = 0
def skip(self):
"""
Skips the bit generation.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> for i in a.generate_gray():
... if i == '010':
... a.skip()
... print(i)
...
000
001
011
010
111
101
100
See Also
========
generate_gray
"""
self._skip = True
@property
def rank(self):
"""
Ranks the Gray code.
A ranking algorithm determines the position (or rank)
of a combinatorial object among all the objects w.r.t.
a given order. For example, the 4 bit binary reflected
Gray code (BRGC) '0101' has a rank of 6 as it appears in
the 6th position in the canonical ordering of the family
of 4 bit Gray codes.
References:
[1] http://www-stat.stanford.edu/~susan/courses/s208/node12.html
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> GrayCode(3, start='100').rank
7
>>> GrayCode(3, rank=7).current
'100'
See Also
========
unrank
"""
if self._rank is None:
self._rank =
|
int(gray_to_bin(self.current), 2)
return self._rank
@property
def current(self):
|
"""
Returns the currently referenced Gray code as a bit string.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(3, start='100').current
'100'
"""
rv = self._current or '0'
if type(rv) is not str:
rv = bin(rv)[2:]
return rv.rjust(self.n, '0')
@classmethod
def unrank(self, n, rank):
"""
Unranks an n-bit sized Gray code of rank k. This method exists
so that a derivative GrayCode class can define its own code of
a given rank.
The string here is generated in reverse order to allow for tail-call
optimization.
Examples
========
>>> from sympy.combinatorics.graycode imp
|
Tristramg/mumoro
|
server.py
|
Python
|
gpl-3.0
| 26,253
| 0.020876
|
# -*- coding: utf-8 -*-
# This file is part of Mumoro.
#
# Mumoro is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mumoro is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mumoro. If not, see <http://www.gnu.org/licenses/>.
#
# © Université de Toulouse 1 2010
# Author: Tristram Gräbener, Odysseas Gabrielides
from lib.core import mumoro
from lib.core.mumoro import Bike, Car, Foot, PublicTransport, cost, co2, dist, elevation, line_change, mode_change, Costs
from lib import layer
from lib import bikestations as bikestations
from web import shorturl
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
import cherrypy
import sys
import simplejson as json
import os
import time
import urllib
import httplib
import hashlib
import datetime
from cherrypy import request
from genshi.template import TemplateLoader
loader = TemplateLoader(
os.path.join(os.path.dirname(__file__), 'web/templates'),
auto_reload=True
)
layer_array = []
bike_stations_array = []
same_nodes_connection_array = []
nearest_nodes_connection_array = []
nodes_list_connection_array = []
paths_array = []
def md5_of_file(filename):
block_size=2**20
md5 = hashlib.md5()
while True:
data = filename.read(block_size)
if not data:
break
md5.update(data)
filename.close()
return md5.hexdigest()
def is_color_valid( color ):
if len( color ) == 7:
if color[0] == '#':
try:
r = int( color[1:3], 16)
if r <= 255 and r >= 0:
try:
g = int( color[3:5], 16)
if g <= 255 and g >= 0:
try:
b = int( color[5:7], 16)
if b <= 255 and b >= 0:
return True
except ValueError:
return False
except ValueError:
return False
except ValueError:
return False
return False
#Loads an osm (compressed of not) file and insert data into database
def import_street_data( filename ):
engine = create_engine( db_type + ":///" + db_params )
metadata = MetaData(bind = engine)
mumoro_metadata = Table('metadata', metadata, autoload = True)
s = mumoro_metadata.select((mumoro_metadata.c.origin == filename) & (mumoro_metadata.c.node_or_edge == 'Nodes'))
rs = s.execute()
nd = 0
for row in rs:
nd = row[0]
s = mumoro_metadata.select((mumoro_metadata.c.origin == filename) & (mumoro_metadata.c.node_or_edge == 'Edges'))
rs = s.execute()
ed = 0
for row in rs:
ed = row[0]
return {'nodes': str(nd), 'edges' : str(ed)}
# Loads the tables corresponding to the public transport layer
def import_gtfs_data( filename, network_name = "Public Transport"):
engine = create_engine(db_type + ":///" + db_params)
metadata = MetaData(bind = engine)
mumoro_metadata = Table('metadata', metadata, autoload = True)
nd = mumoro_metadata.select((mumoro_metadata.c.origin == filename) & (mumoro_metadata.c.node_or_edge == 'Nodes')).execute().first()[0]
ed = mumoro_metadata.select((mumoro_metadata.c.origin == filename) & (mumoro_metadata.c.node_or_edge == 'Edges')).execute().first()[0]
services = mumoro_metadata.select((mumoro_metadata.c.origin == filename) & (mumoro_metadata.c.node_or_edge == 'Services')).execute().first()[0]
return {'nodes': str(nd), 'edges' : str(ed), 'services': str(services) }
def import_kalkati_data(filename, network_name = "Public Transport"):
return import_gtfs_data(filename, network_name)
def import_freq(self, line_name, nodesf, linesf):
return import_gtfs_data(line_name)
#Loads a bike service API ( from already formatted URL ). Insert bike stations in database and enables schedulded re-check.
def import_bike_service( url, name ):
engine = create_engine(db_type + ":///" + db_params)
metadata = MetaData(bind = engine)
mumoro_metadata = Table('metadata', metadata, autoload = True)
s = mumoro_metadata.select((mumoro_metadata.c.origin == url) & (mumoro_metadata.c.node_or_edge == 'bike_stations'))
rs = s.execute()
for row in rs:
bt = row[0]
bike_stations_array.append( {'url_api': url,'table': str(bt)} )
return {'url_api': url,'table': str(bt)}
#Loads data from previous inserted data and creates a layer used in multi-modal graph
def street_layer( data, name, color, mode ):
if not data or not name:
raise NameError('One or more parameters are missing')
if not is_color_valid( color ):
raise NameError('Color for the layer is invalid')
if mode != mumoro.Foot and mode != mumoro.Bike and mode != mumoro.Car and mode != None:
raise NameError('Wrong layer mode paramater')
engine = create_engine(db_type + ":///" + db_params)
meta
|
data = MetaData(bind = engine)
res = layer.Layer(name, mode, data, metadata)
layer_array.append( {'layer':res,'name':name,'mode':mode,'origin':data,'color':color} )
return {'layer':res,'name':name,'mode':mode,'origin':data,'color':color}
def public_transport_layer(data, name, color):
engine = create_engine(db_type + ":///" + db_params)
metadata = MetaData(bind = engine)
res = layer.GTFSLayer(name, data, metadata)
layer_array.append( {'layer':res,
|
'name':name,'mode':PublicTransport,'origin':data,'color':color} )
return {'layer':res,'name':name,'mode':PublicTransport,'origin':PublicTransport,'color':color}
def paths( starting_layer, destination_layer, objectives ):
if not starting_layer or not destination_layer:
raise NameError('Empty layer(s)')
for i in range( len( objectives ) ):
if objectives[i] != mumoro.dist and objectives[i] != mumoro.cost and objectives[i] != mumoro.elevation and objectives[i] != mumoro.co2 and objectives[i] != mumoro.mode_change and objectives[i] != mumoro.line_change:
raise NameError('Wrong objective parameter')
paths_array.append( {'starting_layer':starting_layer,'destination_layer':destination_layer,'objectives':objectives} )
#Creates a transit cost variable, including the duration in seconds of the transit and if the mode is changed
def cost( duration, mode_change ):
e = mumoro.Edge()
if mode_change:
e.mode_change = 1
else:
e.mode_change = 0
e.duration = mumoro.Duration( duration );
return e
#Connects 2 given layers on same nodes with the given cost(s)
def connect_layers_same_nodes( layer1, layer2, cost ):
if not layer1 or not layer2 or not cost:
raise NameError('One or more paramaters are empty')
same_nodes_connection_array.append( { 'layer1':layer1, 'layer2':layer2, 'cost':cost } )
#Connect 2 given layers on a node list (arg 3 which should be the returned data from import_municipal_data or import_bike_service) with the given cost(s)
def connect_layers_from_node_list( layer1, layer2, node_list, cost, cost2 = None ):
if not layer1 or not layer2 or not node_list or not cost:
raise NameError('One or more paramaters are empty')
if not cost2:
nodes_list_connection_array.append( { 'layer1':layer1, 'layer2':layer2, 'node_list':node_list, 'cost1':cost, 'cost2':cost } )
else:
nodes_list_connection_array.append( { 'layer1':layer1, 'layer2':layer2, 'node_list':node_list, 'cost1':cost, 'cost2':cost2 } )
#Connect 2 given layers on nearest nodes
def connect_layers_on_nearest_nodes( layer1 , layer2, cost, cost2 = None):
if not layer1 or not layer2 or not cost:
raise NameEr
|
cladmi/RIOT
|
tests/test_tools/tests/01-run.py
|
Python
|
lgpl-2.1
| 1,512
| 0
|
#!/usr/bin/env python3
"""Test behaviour of the test running and the term program interaction."""
import sys
import p
|
expect
from testrunner import run
def _shellping(child, timeout=1):
"""Issue a 'shellping' command.
Raises a pexpect exception on failure.
:param timeout: timeout for the answer
"""
child.sendline('shellping')
child.expect_exact('shellpong\r\n', timeout=timeout)
def _
|
wait_shell_ready(child, numtries=5):
"""Wait until the shell is ready by using 'shellping'."""
for _ in range(numtries - 1):
try:
_shellping(child)
except pexpect.TIMEOUT:
pass
else:
break
else:
# This one should fail
_shellping(child)
def _test_no_local_echo(child):
"""Verify that there is not local echo while testing."""
msg = 'true this should not be echoed'
child.sendline(msg)
res = child.expect_exact([pexpect.TIMEOUT, msg], timeout=1)
assert res == 0, "There should have been a timeout and not match stdin"
def testfunc(child):
"""Run some tests to verify the board under test behaves correctly.
It currently tests:
* local echo
"""
child.expect_exact("Running 'tests_tools' application")
_wait_shell_ready(child)
# Verify there is no local and remote echo as it is disabled
_test_no_local_echo(child)
# The node should still answer after the previous one
_shellping(child)
if __name__ == "__main__":
sys.exit(run(testfunc))
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/examples/orca/learn/tf/image_segmentation/image_segmentation.py
|
Python
|
apache-2.0
| 9,188
| 0.001741
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import zipfile
import pandas as pd
from PIL import Image
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras import backend as K
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.data import XShards
from zoo.orca.learn.tf.estimator import Estimator
def load_data_from_zip(file_path, file):
with zipfile.ZipFile(os.path.join(file_path, file), "r") as zip_ref:
unzipped_file = zip_ref.namelist()[0]
zip_ref.extractall(file_path)
def load_data(file_path):
load_data_from_zip(file_path, 'train.zip')
load_data_from_zip(file_path, 'train_masks.zip')
load_data_from_zip(file_path, 'train_masks.csv.zip')
def main(cluster_mode, max_epoch, file_path, batch_size, platform, non_interactive):
import matplotlib
if not non_interactive and platform == "mac":
matplotlib.use('qt5agg')
if cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=4, memory="3g")
elif cluster_mode == "yarn":
init_orca_context(cluster_mode="yarn-client", num_nodes=2, cores=2, driver_memory="3g")
load_data(file_path)
img_dir = os.path.join(file_path, "train")
label_dir = os.path.join(file_path, "train_masks")
# Here we only take the first 1000 files for simplicity
df_train = pd.read_csv(os.path.join(file_path, 'train_masks.csv'))
ids_train = df_train['img'].map(lambda s: s.split('.')[0])
ids_train = ids_train[:1000]
x_train_filenames = []
y_train_filenames = []
for img_id in ids_train:
x_train_filenames.append(os.path.join(img_dir, "{}.jpg".format(img_id)))
y_train_filenames.append(os.path.join(label_dir, "{}_mask.gif".format(img_id)))
x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = \
train_test_split(x_train_filenames, y_train_filenames, test_size=0.2, random_state=42)
def load_and_process_image(path):
array = mpimg.imread(path)
result = np.array(Image.fromarray(array).resize(size=(128, 128)))
result = result.astype(float)
result /= 255.0
return result
def load_and_process_image_label(path):
array = mpimg.imread(path)
result = np.array(Image.fromarray(array).resize(size=(128, 128)))
result = np.expand_dims(result[:, :, 1], axis=-1)
result = result.astype(float)
result /= 255.0
return result
train_images = np.stack([load_and_process_image(filepath) for filepath in x_train_filenames])
train_label_images = np.stack([load_and_process_image_label(filepath)
for filepath
|
in y_train_filenames])
val_images = np.stack([load_and_process_image(filepath) for filepath in x_val_filenames])
val_label_images = np.stack([load_and_process_image_label(filepath)
for fi
|
lepath in y_val_filenames])
train_shards = XShards.partition({"x": train_images, "y": train_label_images})
val_shards = XShards.partition({"x": val_images, "y": val_label_images})
# Build the U-Net model
def conv_block(input_tensor, num_filters):
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
encoder = layers.Activation('relu')(encoder)
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
encoder = layers.Activation('relu')(encoder)
return encoder
def encoder_block(input_tensor, num_filters):
encoder = conv_block(input_tensor, num_filters)
encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters):
decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(
input_tensor)
decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.Activation('relu')(decoder)
return decoder
inputs = layers.Input(shape=(128, 128, 3)) # 128
encoder0_pool, encoder0 = encoder_block(inputs, 16) # 64
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 32) # 32
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 64) # 16
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 128) # 8
center = conv_block(encoder3_pool, 256) # center
decoder3 = decoder_block(center, encoder3, 128) # 16
decoder2 = decoder_block(decoder3, encoder2, 64) # 32
decoder1 = decoder_block(decoder2, encoder1, 32) # 64
decoder0 = decoder_block(decoder1, encoder0, 16) # 128
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
net = models.Model(inputs=[inputs], outputs=[outputs])
# Define custom metrics
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / \
(tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
# Define custom loss function
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
# compile model
net.compile(optimizer=tf.keras.optimizers.Adam(2e-3), loss=bce_dice_loss)
print(net.summary())
# create an estimator from keras model
est = Estimator.from_keras(keras_model=net)
# fit with estimator
est.fit(data=train_shards,
batch_size=batch_size,
epochs=max_epoch)
# evaluate with estimator
result = est.evaluate(val_shards)
print(result)
# predict with estimator
val_shards.cache()
val_image_shards = val_shards.transform_shard(lambda val_dict: {"x": val_dict["x"]})
pred_shards = est.predict(data=val_image_shards, batch_size=batch_size)
pred = pred_shards.collect()[0]["prediction"]
val_image_label = val_shards.collect()[0]
val_image = val_image_label["x"]
val_label = val_image_label["y"]
if not non_interactive:
# visualize 5 predicted results
plt.figure(figsize=(10, 20))
for i in range(5):
img = val_image[i]
label = val_label[i]
predicted_label = pred[i]
plt.subplot(5, 3, 3 * i + 1)
plt.imshow(img)
plt.title("Input image")
plt.subplot(5, 3, 3 * i + 2)
plt.imshow(label[:, :, 0], cmap='gray')
plt.title("Actual Mask")
plt.subplot(5, 3, 3 * i + 3)
plt.imshow(predicted_label, cmap='gray')
plt.title("Predicted Mask")
plt.suptitle("Examples of Input Image, Label, and Prediction")
plt.show()
stop_orca_context()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cl
|
mkheirkhah/mptcp
|
src/dsdv/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 466,437
| 0.015177
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.dsdv', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', import_from_module='ns.core', templ
|
ate_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
m
|
odule.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class]
module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress [class]
module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e [enumeration]
module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e [enumeration]
module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_cla
|
lsst-sqre/s3s3
|
s3s3/scripts/echo_s3s3_ini_template.py
|
Python
|
mit
| 978
| 0
|
#!/usr/bin/env python
"""
Echo the s3s3 configuration template.
"""
from pkg_resources import Requirement, resource_string
def try_resource(location):
"""
Try to get the resource in ``location``.
"""
try:
return resource_string(Requirement.parse('s3s3'), location)
except FileNotFoundError:
return ''
def echo():
"""
Echo the s3s3 configuration file in.
Use pkg_resources module to try to find the s3s3.ini.dist configuration
file.
|
The file is located in a different location depending on if it's
a sdist or bdist_wheel install.
"""
try:
conf = try_resource('extras/s3s3.ini.dist') # sdist
if not conf:
conf = try_resource('../../../extras/s3s3.ini.dist') # bdist
print(conf.decode('utf-8'))
return True
|
except Exception as e:
return False
def main():
if echo():
exit(0)
else:
exit(1)
if __name__ == "__main__":
main()
|
AlexStarov/Shop
|
applications/authModel/migrations/0008_email_hash.py
|
Python
|
apache-2.0
| 508
| 0.001969
|
# -
|
*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import applications.utils.captcha.utils
class Migration(migrations.Migration):
dependencies = [
('authModel', '0007_email_test'),
]
operations = [
migrations.AddField(
model_name='email',
name='hash',
field=models.CharField(default=applications.utils.captcha.utils.key_generator, max_length=16, verbose_name='Hash'),
|
),
]
|
dc3-plaso/dfvfs
|
dfvfs/vfs/zip_file_system.py
|
Python
|
apache-2.0
| 4,907
| 0.006521
|
# -*- coding: utf-8 -*-
"""The zip file system implementation."""
import zipfile
# This is necessary to prevent a circular import.
import dfvfs.vfs.zip_file_entry
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import zip_path_spec
from dfvfs.resolver import resolver
from dfvfs.vfs import file_system
class ZipFileSystem(file_system.FileSystem):
"""File system that uses zipfile.
Attributes:
encoding (str): encoding of the file entry name.
"""
LOCATION_ROOT = u'/'
TYPE_INDICATOR = definitions.TYPE_INDICATOR_ZIP
def __init__(self, resolver_context, encoding=u'utf-8'):
"""Initializes a file system.
Args:
resolver_context: the resolver context (instance of resolver.Context).
encoding (Optional[str]): encoding of the file entry name.
"""
super(ZipFileSystem, self).__init__(resolver_context)
self._file_object = None
self._zip_file = None
self.encoding = encoding
def _Close(self):
"""Closes the file system object.
Raises:
IOError: if the close failed.
"""
self._zip_file.close()
self._zip_file = None
self._file_object.close()
self._file_object = None
def _Open(self, path_spec, mode='rb'):
"""Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification of the file system.
mode (Optional[str]): file access mode.
Raises:
Ac
|
cessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueErro
|
r: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
u'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
zip_file = zipfile.ZipFile(file_object, 'r')
except:
file_object.close()
raise
self._file_object = file_object
self._zip_file = zip_file
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
bool: True if the file entry exists.
"""
location = getattr(path_spec, u'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._zip_file.getinfo(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._zip_file.namelist()):
# The ZIP info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
ZipFileEntry: a file entry or None.
"""
if not self.FileEntryExistsByPathSpec(path_spec):
return
location = getattr(path_spec, u'location', None)
if len(location) == 1:
return dfvfs.vfs.zip_file_entry.ZipFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
kwargs = {}
try:
kwargs[u'zip_info'] = self._zip_file.getinfo(location[1:])
except KeyError:
kwargs[u'is_virtual'] = True
return dfvfs.vfs.zip_file_entry.ZipFileEntry(
self._resolver_context, self, path_spec, **kwargs)
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
ZipFileEntry: a file entry or None.
"""
path_spec = zip_path_spec.ZipPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec)
def GetZipFile(self):
"""Retrieves the ZIP file object.
Returns:
zipfile.ZipFile: a ZIP file object or None.
"""
return self._zip_file
def GetZipInfoByPathSpec(self, path_spec):
"""Retrieves the ZIP info object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
zipfile.ZipInfo: a ZIP info object or None if not available.
Raises:
PathSpecError: if the path specification is incorrect.
"""
location = getattr(path_spec, u'location', None)
if location is None:
raise errors.PathSpecError(u'Path specification missing location.')
if not location.startswith(self.LOCATION_ROOT):
raise errors.PathSpecError(u'Invalid location in path specification.')
if len(location) > 1:
return self._zip_file.getinfo(location[1:])
|
django-bmf/django-bmf
|
djangobmf/migrations/0001_squashed_0_2_9.py
|
Python
|
bsd-3-clause
| 12,381
| 0.005169
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
import djangobmf.storage
import djangobmf.fields.file
import django.utils.timezone
import djangobmf.utils.generate_filename
class Migration(migrations.Migration):
replaces = [('djangobmf', '0001_squashed_0_2_0'), ('djangobmf', '0002_dashboard_update'), ('djangobmf', '0003_delete_workspace'), ('djangobmf', '0004_added_active_field'), ('djangobmf', '0005_added_unique_together'), ('djangobmf', '0006_report_settings'), ('djangobmf', '0007_update_renderer'), ('djangobmf', '0008_renderer_filefields'), ('djangobmf', '0009_notification_rename'), ('djangobmf', '0010_notification_db_optimization'), ('djangobmf', '0011_added_numberrange'), ('djangobmf', '0012_delete_dashboard'), ('djangobmf', '0013_update_document')]
dependencies = [
migrations.swappable_dependency(settings.BMF_CONTRIB_CUSTOMER),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.BMF_CONTRIB_PROJECT),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('topic', models.CharField(verbose_name='Topic', blank=True, null=True, max_length=100)),
('text', models.TextField(verbose_name='Text', blank=True, null=True)),
('action', models.PositiveSmallIntegerField(verbose_name='Action', default=1, choices=[(1, 'Comment'), (2, 'Created'), (3, 'Updated'), (4, 'Workflow'), (5, 'File')], editable=False, null=True)),
('template', models.CharField(verbose_name='Template', editable=False, null=True, max_length=100)),
('parent_id', models.PositiveIntegerField()),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified')),
('parent_ct', models.ForeignKey(related_name='bmf_history_parent', to='contenttypes.ContentType')),
('user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'modified',
'verbose_name': 'Activity',
'ordering': ('-modified',),
'verbose_name_plural': 'Activity',
'abstract': False,
},
),
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('app_label', models.CharField(verbose_name='Application', editable=False, null=True, max_length=100)),
('field_name', models.CharField(verbose_name='Fieldname', editable=False, null=True, max_length=100)),
('value', models.TextField(verbose_name='Value', null=True)),
('active', models.BooleanField(verbose_name='Active', default=True)),
],
options={
'ordering': ['app_label', 'field_name'],
'verbose_name_plural': 'Configurations',
'abstract': False,
'verbose_name': 'Configuration',
'default_permissions': ('change',),
},
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('name', models.CharField(verbose_name='Name', blank=True, null=True, max_length=120)),
('mimetype', models.CharField(verbose_name='Mimetype', editable=False, null=True, max_length=120)),
('encoding', models.CharField(verbose_name='Encoding', editable=False, null=True, max_length=60)),
('description', models.TextField(verbose_name='Description', blank=True, null=True)),
('file', models.FileField(upload_to=djangobmf.utils.generate_filename.generate_filename, storage=djangobmf.storage.Storage(), verbose_name='File')),
('size', models.PositiveIntegerField(editable=False, blank=True, null=True)),
('sha1', models.CharField(verbose_name='SHA1', editable=False, null=True, max_length=40)),
('is_static', models.BooleanField(editable=False, default=True)),
('file_exists', models.BooleanField(default=True)),
('content_id', models.PositiveIntegerField(editable=False, blank=True, null=True)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified', null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created', null=True)),
('content_type', models.ForeignKey(related_name='bmf_document', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contenttypes.ContentType')),
('created_by', models.ForeignKey(related_name='+', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL)),
('customer', models.ForeignKey(related_name='documents', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.BMF_CONTRIB_CUSTOMER)),
('modified_by', models.ForeignKey(related_name='+', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(related_name='documents', null=True, editable=False, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.BMF_CONTRIB_PROJECT)),
],
options={
'get_latest_by': 'modified',
'verbose_name': 'Document',
'permissions': [('view_document', 'Can view documents')],
'verbose_name_plural': 'Documents',
'ab
|
stract': False,
},
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID',
|
auto_created=True, primary_key=True)),
('watch_id', models.PositiveIntegerField(null=True, db_index=True)),
('triggered', models.BooleanField(verbose_name='Triggered', default=True, editable=False, db_index=True)),
('unread', models.BooleanField(verbose_name='Unread', default=True, editable=False, db_index=True)),
('last_seen_object', models.PositiveIntegerField(null=True)),
('new_entry', models.BooleanField(verbose_name='New entry', default=False, db_index=True)),
('comments', models.BooleanField(verbose_name='Comment written', default=False, db_index=True)),
('files', models.BooleanField(verbose_name='File added', default=False, db_index=True)),
('detectchanges', models.BooleanField(verbose_name='Object changed', default=False, db_index=True)),
('workflow', models.BooleanField(verbose_name='Workflowstate changed', default=False, db_index=True)),
('modified', models.DateTimeField(verbose_name='Modified', default=django.utils.timezone.now, editable=False, null=True)),
('user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL)),
('watch_ct', models.ForeignKey(to='contenttypes.ContentType', null=True)),
],
options={
'ordering': ('-modified',),
'verbose_name_plural': 'Watched activities',
'abstract': False,
'verbose_name': 'Watched activity',
'get_latest_by': 'modified',
'default_permissions': (),
},
),
migrations.CreateModel(
name='NumberRange',
fields=[
('id', models.AutoField(
|
aslab/rct
|
mrt/src/tum_simulator_ws/devel/lib/python2.7/dist-packages/ardrone_autonomy/msg/_navdata_time.py
|
Python
|
gpl-3.0
| 6,555
| 0.019222
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from ardrone_autonomy/navdata_time.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class navdata_time(genpy.Message):
_md5sum = "8642a5656fdcc9310938a1807c46d608"
_type = "ardrone_autonomy/navdata_time"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
float64 drone_time
uint16 tag
uint16 size
uint32 time
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','drone_time','tag','size','time']
_slot_types = ['std_msgs/Header','float64','uint16','uint16','uint32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,drone_time,tag,size,time
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(navdata_time, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.drone_time is None:
self.drone_time = 0.
if self.tag is None:
self.tag = 0
if self.size is None:
self.size = 0
if self.time is None:
self.time = 0
else:
self.header = std_msgs.msg.Header()
self.drone_time = 0.
self.tag = 0
self.size = 0
self.time = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header
|
.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, len
|
gth, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_d2HI.pack(_x.drone_time, _x.tag, _x.size, _x.time))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 16
(_x.drone_time, _x.tag, _x.size, _x.time,) = _struct_d2HI.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_d2HI.pack(_x.drone_time, _x.tag, _x.size, _x.time))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 16
(_x.drone_time, _x.tag, _x.size, _x.time,) = _struct_d2HI.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_d2HI = struct.Struct("<d2HI")
|
hsteinhaus/ardupilot
|
Tools/autotest/sim_vehicle.py
|
Python
|
gpl-3.0
| 31,053
| 0.00293
|
#!/usr/bin/env python
"""
Framework to start a simulated vehicle and connect it to MAVProxy.
Peter Barker, April 2016
based on sim_vehicle.sh by Andrew Tridgell, October 2011
"""
import atexit
import getpass
import optparse
import os
import os.path
import signal
import subprocess
import sys
import tempfile
import time
import shlex
# List of open terminal windows for macosx
windowID = []
class CompatError(Exception):
"""A custom exception class to hold state if we encounter the parse error we are looking for"""
def __init__(self, error, opts, rargs):
Exception.__init__(self, error)
self.opts = opts
self.rargs = rargs
class CompatOptionParser(optparse.OptionParser):
"""An option parser which emulates the behaviour of the old sim_vehicle.sh; if passed -C, the first argument not understood starts a list of arguments that are passed
|
straight to mavproxy"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, error):
"""Override default error handler called by optparse.OptionParser.parse_args when a parse error occurs;
|
raise a detailed exception which can be caught"""
if error.find("no such option") != -1:
raise CompatError(error, self.values, self.rargs)
optparse.OptionParser.error(self, error)
def parse_args(self, args=None, values=None):
"""Wrap parse_args so we can catch the exception raised upon discovering the known parameter parsing error"""
try:
opts, args = optparse.OptionParser.parse_args(self)
except CompatError as e:
if not e.opts.sim_vehicle_sh_compatible:
print(e)
print("Perhaps you want --sim_vehicle_sh_compatible (-C)?")
sys.exit(1)
if e.opts.mavproxy_args:
print("--mavproxy-args not permitted in compat mode")
sys.exit(1)
args = []
opts = e.opts
mavproxy_args = [str(e)[16:]] # this trims "no such option" off
mavproxy_args.extend(e.rargs)
opts.ensure_value("mavproxy_args", " ".join(mavproxy_args))
return opts, args
def cygwin_pidof(proc_name):
""" Thanks to kata198 for this:
https://github.com/kata198/cygwin-ps-misc/blob/master/pidof
"""
pipe = subprocess.Popen("ps -ea | grep " + proc_name, shell=True, stdout=subprocess.PIPE)
output_lines = pipe.stdout.read().replace("\r", "").split("\n")
ret = pipe.wait()
pids = []
if ret != 0:
# No results
return []
for line in output_lines:
if not line:
continue
line_split = [item for item in line.split(' ') if item]
cmd = line_split[-1].split('/')[-1]
if cmd == proc_name:
try:
pid = int(line_split[0].strip())
except:
pid = int(line_split[1].strip())
if pid not in pids:
pids.append(pid)
return pids
def under_cygwin():
"""Return if Cygwin binary exist"""
return os.path.exists("/usr/bin/cygstart")
def under_macos():
return sys.platform == 'darwin'
def kill_tasks_cygwin(victims):
"""Shell out to ps -ea to find processes to kill"""
for victim in list(victims):
pids = cygwin_pidof(victim)
# progress("pids for (%s): %s" % (victim,",".join([ str(p) for p in pids])))
for apid in pids:
os.kill(apid, signal.SIGKILL)
def kill_tasks_macos():
for window in windowID:
cmd = "osascript -e \'tell application \"Terminal\" to close (window(get index of window id %s))\'" % window
os.system(cmd)
def kill_tasks_psutil(victims):
"""Use the psutil module to kill tasks by name. Sadly, this module is not available on Windows, but when it is we should be able to *just* use this routine"""
import psutil
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
continue
if proc.name in victims:
proc.kill()
def kill_tasks_pkill(victims):
"""Shell out to pkill(1) to kill processed by name"""
for victim in victims: # pkill takes a single pattern, so iterate
cmd = ["pkill", victim]
run_cmd_blocking("pkill", cmd, quiet=True)
class BobException(Exception):
"""Handle Bob's Exceptions"""
pass
def kill_tasks():
"""Clean up stray processes by name. This is a somewhat shotgun approach"""
progress("Killing tasks")
try:
victim_names = {
'JSBSim',
'lt-JSBSim',
'ArduPlane.elf',
'ArduCopter.elf',
'APMrover2.elf',
'AntennaTracker.elf',
'JSBSIm.exe',
'MAVProxy.exe',
'runsim.py',
'AntennaTracker.elf',
}
for frame in _options_for_frame.keys():
if "waf_target" not in _options_for_frame[frame]:
continue
exe_name = os.path.basename(_options_for_frame[frame]["waf_target"])
victim_names.add(exe_name)
if under_cygwin():
return kill_tasks_cygwin(victim_names)
if under_macos():
return kill_tasks_macos()
try:
kill_tasks_psutil(victim_names)
except ImportError:
kill_tasks_pkill(victim_names)
except Exception as e:
progress("kill_tasks failed: {}".format(str(e)))
def check_jsbsim_version():
"""Assert that the JSBSim we will run is the one we expect to run"""
jsbsim_cmd = ["JSBSim", "--version"]
progress_cmd("Get JSBSim version", jsbsim_cmd)
try:
jsbsim_version = subprocess.Popen(jsbsim_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
jsbsim_version = '' # this value will trigger the ".index"
# check below and produce a reasonable
# error message
try:
jsbsim_version.index("ArduPilot")
except ValueError:
print(r"""
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://ardupilot.org/dev/docs/setting-up-sitl-on-linux.html
for more details
=========================================================
""")
sys.exit(1)
def progress(text):
"""Display sim_vehicle progress text"""
print("SIM_VEHICLE: " + text)
def find_autotest_dir():
"""Return path to autotest directory"""
return os.path.dirname(os.path.realpath(__file__))
def find_root_dir():
"""Return path to root directory"""
return os.path.realpath(os.path.join(find_autotest_dir(), '../..'))
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
_options_for_frame = {
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
# COPTER
"+": {
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter-quad",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter-quad",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the param fetch happens asynchronously
"default_params_filename": "default_params/copter.parm",
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl-hexa",
"waf_target": "bin/arducopter-hexa",
"default_params_filename": "default_params/copter.parm",
},
"octa": {
"make_target": "sitl-octa",
"waf_target": "bin/arducopter-octa",
"default_params_filena
|
tohodson/modis-hdf5
|
code/compile_hdf.py
|
Python
|
gpl-2.0
| 1,818
| 0.008801
|
from pyIST import *
##############
terra_dir = '/Users/tohodson/Desktop/modis_download/MOST/MOD29E1D.005/'
aqua_dir = '/Users/tohodson/Desktop/modis_download/MOSA/MYD29E1D.005/'
hdf_file = '/Users/tohodson/Desktop/AQ_IST.hdf5'
DATAFIELD_NAME='Ice_Surface_Temperature_SP'
aqua_count = count_files(aqua_dir,'*.hdf')
terra_count = count_files(terra_dir,'*.hdf')
print aqua_count
print terra_count
print terra_count - aqua_count
day_count = 90 * 15 # 15 years with at most 90 days per year
################################################################################
# main
f = h5py.File(hdf_file,'r+') #turned off for saftey
terra_set = f['/daily/MOST'] # remove XXX
terra_set.attrs.create('dates',MODIS_dates(terra_dir),dtype='S10') #remove
'''
#load terra files
######################
terra_set = f.create_dataset("/daily/MOST", (terra_count,4501,4501), dtype=np.uint16, compression="lzf", shuffle=True)
terra_set = f['/daily/MOST']
terra_set.attrs.create('dates',MODIS_dates(terra_dir),dtype='S10')
counter = 0
for filename in find_files(terra_dir, '*.hdf'):
hdf = SD(filename, SDC.READ)
# Read dataset.
data_raw = hdf.select(DATAFIELD_NAME)
aqua_set[counter] = data_raw[:,:]
counter = counter +1
print 'terra ' + str(counter)
#load aqua files
##################
aqua_set = f.create_dataset("/daily/MOSA", (aqua_count,4501,4501
|
), dtype=np.uint16, compression="lzf", shuffle=True)
aqua_set.attrs.create('dates',MODIS_dates(aqua_dir),dtype='S10'
|
)
counter = terra_count - aqua_count #align daily records with terra
for filename in find_files(aqua_dir, '*.hdf'):
hdf = SD(filename, SDC.READ)
# Read dataset.
data_raw = hdf.select(DATAFIELD_NAME)
aqua_set[counter] = data_raw[:,:]
counter = counter +1
print 'aqua ' + str(counter)
'''
f.close()
|
jonwright/ImageD11
|
scripts/huber2bruker.py
|
Python
|
gpl-2.0
| 20,959
| 0.015172
|
#!/usr/bin/env python
from __future__ import print_function
## Automatically adapted for numpy.oldnumeric Sep 06, 2007 by alter_code1.py
# ImageD11_v0.4 Software for beamline ID11
# Copyright (C) 2005 Jon Wright
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
A script to convert edf images into bruker format
"""
import time
import numpy
from fabio.openimage import openimage
from fabio.brukerimage import brukerimage
from fabio import file_series
from pyFAI import detectors, _distortion
class darkflood:
""" apply dark and flood corrections """
def __init__(self,
darkfile = None,
darkoffset = None,
floodfile = None,
floodmultiplier = None,
splinefile = None,
border = None,
powfac = 1.0,
overflow= 65534,
detrend = None,
monitorcol = None,
monitorval = None,
maskfilename = None
):
self.overflow=overflow
self.darkfile = darkfile
self.darkoffset = darkoffset
self.floodfile = floodfile
self.splinefile = splinefile
self.distnorm = None
self.border = border
self.floodmultiplier = None
#
self.darkimage = None
self.floodimage = None
self.flmult = None
self.powfac = powfac
self.detrend = detrend
self.monitorcol = monitorcol
self.monitorval = monitorval
if maskfilename is not None:
self.mask = 1-openimage( maskfilename ).data
else:
self.mask = None
def readdark(self,darkfile):
""" read the dark"""
try:
self.darkdata = openimage(darkfile)
self.darkfile = darkfile
self.darkimage = self.darkdata.data.astype(numpy.float32)
if self.powfac != 1.0:
print("apply 0.96 to dark")
self.darkimage = numpy.power(self.darkimage, 0.96)
except:
print("No dark file")
self.darkdata = None
self.darkimage= None
self.darkfile = None
def readflood(self, floodfile):
""" read the flood """
try:
self.flooddata = openimage(floodfile)
self.floodfile = floodfile
self.floodimage = self.flooddata.data.astype(numpy.float32)
if self.floodmultiplier is None:
# centre = self.floodimage[100:-100,100:-100]
# npix = centre.shape[0]*centre.shape[1]
# self.floodmultiplier = numpy.sum(numpy.ravel(
# centre).astype(numpy.float32))/npix
self.flmult = 1 / (self.floodimage)
print("using flood from",floodfile)
except:
print("No flood file")
self.flooddata = None
self.floodimage= None
self.floodfile = None
self.floodmultiplier = None
def readspline(self, splinefile):
"""read the spline """
self.det = detectors.FReLoN(splinefile)
self.dis = _distortion.Distortion(self.det)
self.dis.calc_LUT_size()
self.dis.calc_LUT()
# remove the pixel size normalisation
im = numpy.ones((2048,2048),numpy.float32)
im2 = self.dis.correct(im)
im2 = numpy.where(im2<0.1,1,im2)
self.distnorm = 1/im2
def correct(self, dataobj, detrend = None):
""" correct the data """
tin = dataobj.data.dtype
# Start by copying
cor = dataobj.data.astype(numpy.float32).copy()
self.report = ""
msk = numpy.where( cor > self.overflow,
65534,
0 )
if self.powfac != 1.0:
self.report += "powfac %f;"%(self.powfac)
numpy.power(cor, 0.96, cor)
if self.darkimage is not None:
numpy.subtract(cor, self.darkimage, cor)
self.report += "dark;"
if self.detrend is not None:
assert cor.dtype == numpy.float32, cor.dtype
cor = self.do_detrend( cor )
self.report += "detrend;"
if self.flmult is not None:
numpy.multiply(cor , self.flmult, cor)
self.report +="flood;"
if self.monitorcol is not None:
scal = self.monitorval / float( dataobj.header[self.monitorcol] )
numpy.multiply( cor, scal, cor )
self.report += '%s %.4f;'%(self.monitorcol, scal )
if self.border is not None:
# set the edges to zero
b=self.border
cor[:b,:]=0
cor[:,:b]=0
cor[-b:,:]=0
cor[:,-b:]=0
self.report += "border b(%d)=0;"%(self.border)
if self.splinefile is not None:
cor = self.dis.correct( cor )
numpy.multiply(cor, self.distnorm, cor)
self.report += "splinen;"
if self.darkoffset is not None:
# print "applying offset of",self.darkoffset,
numpy.add(cor, self.darkoffset, cor)
self.report += "+darkoffset %.2f;"%(self.darkoffset)
if self.mask is not None:
numpy.multiply(cor, self.mask, cor)
self.report += "fit2d style mask"
# Should we bother with this - yes please - noisy pixels overflow
cor = numpy.where(cor>0.1, cor, 0.) # truncate zero
self.report += ">0;"
cor = numpy.where(msk != 0 , msk, cor) # mask overflows
self.report += "msk>%.2f"%(self.overflow)
ret = cor.astype(tin)
return ret
def do_detrend( self, ar):
|
if self.detrend is None:
return ar
# print "detrending",
s = ar.copy()
np = ar.shape[1]/2
s[:,:np].sort()
s[:,np:].sort()
n = self.detrend
nb = 5 # bad pixels (eg negative outliers)
o1 =
|
s[:,nb:(n+nb)].sum(axis=1)/n
o2 = s[:,(np+nb):(np+n+nb)].sum(axis=1)/n
s1 = ar.copy()
s1[:,:np] = (ar[:,:np].T - o1 + o1.mean() ).T
s1[:,np:] = (ar[:,np:].T - o2 + o2.mean() ).T
return s1
class edf2bruker:
def __init__(self,
dark,
flood,
template,
darkoffset = 100,
distance = 5.0,
border = None,
wvln = 0.5,
omegasign = 1.,
powfac = 1.0,
overflow=65534,
detrend = None,
monitorval = None,
monitorcol = None,
maskfilename=None,
splinefile = None,
omega_zero=0,
chi_zero=0):
""" converts edf (esrf id11 frelon) to bruker (esrf id11 smart6500)"""
self.distance = distance
self.overflow = overflow
self.omegasign = omegasign
self.wvln = wvln
self.powfac = powfac
self.darkflood = darkflood(darkoffset = darkoffset,
border = border,
powfac = self.powfac,
overflow = self.overflow,
splinefile = splinefile,
detrend = detrend,
monitorval = monitorval,
monitorcol = monitorcol,
maskfilename=maskfi
|
Juanlu001/CBC.Solve
|
cbc/beat/heart.py
|
Python
|
gpl-3.0
| 897
| 0.001115
|
from dolfin import error, info
class Heart:
def __init__(self, cell_model):
self._cell_model = cell_model
# Mandatory stuff
def mesh(self):
error("Need to prescribe domain")
|
def conductivities(self):
error("Need to prescribe conducitivites")
# Optional stuff
def applied_current(self):
return None
def end_time(self):
info("Using default end time (T = 1.0)")
return 1.0
def essential_boundaries(self):
return None
def essential_boundary_values(self):
return None
def initial_conditions(self):
return None
|
def neumann_boundaries(self):
return None
def boundary_current(self):
return None
# Peculiar stuff (for now)
def is_dynamic(self):
return True
# Helper functions
def cell_model(self):
return self._cell_model
|
alexm92/sentry
|
src/sentry/api/serializers/models/user.py
|
Python
|
bsd-3-clause
| 4,943
| 0.000809
|
from __future__ import absolute_import
import six
from collections import defaultdict
from django.conf import settings
from sentry.app import env
from sentry.api.serializers import Serializer, register
from sentry.models import AuthIdentity, Authenticator, User, UserAvatar, UserOption
from sentry.utils.avatar import get_gravatar_url
@register(User)
class UserSerializer(Serializer):
def _get_identities(self, item_list, user):
if not (env.request and env.request.is_superuser()):
item_list = [x for x i
|
n item_list if x == user]
queryset = AuthIdentity.objects.filter(
user__in=item_list,
).select_related('auth_provider', 'auth_provider__organization')
results = {i.id: [] for i in item_list}
for item in queryset:
results[item.user_id].append(item)
return results
def get_attrs(self, item_list, user):
avatars = {
a.user_id: a
fo
|
r a in UserAvatar.objects.filter(
user__in=item_list
)
}
identities = self._get_identities(item_list, user)
authenticators = Authenticator.objects.bulk_users_have_2fa([i.id for i in item_list])
data = {}
for item in item_list:
data[item] = {
'avatar': avatars.get(item.id),
'identities': identities.get(item.id),
'has2fa': authenticators[item.id],
}
return data
def serialize(self, obj, attrs, user):
d = {
'id': six.text_type(obj.id),
'name': obj.get_display_name(),
'username': obj.username,
'email': obj.email,
'avatarUrl': get_gravatar_url(obj.email, size=32),
'isActive': obj.is_active,
'isManaged': obj.is_managed,
'dateJoined': obj.date_joined,
'lastLogin': obj.last_login,
'has2fa': attrs['has2fa'],
}
if obj == user:
options = {
o.key: o.value
for o in UserOption.objects.filter(
user=user,
project__isnull=True,
)
}
stacktrace_order = int(options.get('stacktrace_order', -1) or -1)
if stacktrace_order == -1:
stacktrace_order = 'default'
elif stacktrace_order == 2:
stacktrace_order = 'newestFirst'
elif stacktrace_order == 1:
stacktrace_order = 'newestLast'
d['options'] = {
'language': options.get('language') or 'en',
'stacktraceOrder': stacktrace_order,
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
'clock24Hours': options.get('clock_24_hours') or False,
}
if attrs.get('avatar'):
avatar = {
'avatarType': attrs['avatar'].get_avatar_type_display(),
'avatarUuid': attrs['avatar'].ident if attrs['avatar'].file else None
}
else:
avatar = {'avatarType': 'letter_avatar', 'avatarUuid': None}
d['avatar'] = avatar
if attrs['identities'] is not None:
d['identities'] = [{
'id': six.text_type(i.id),
'name': i.ident,
'organization': {
'slug': i.auth_provider.organization.slug,
'name': i.auth_provider.organization.name,
},
'provider': {
'id': i.auth_provider.provider,
'name': i.auth_provider.get_provider().name,
},
'dateSynced': i.last_synced,
'dateVerified': i.last_verified,
} for i in attrs['identities']]
return d
class DetailedUserSerializer(UserSerializer):
def get_attrs(self, item_list, user):
attrs = super(DetailedUserSerializer, self).get_attrs(item_list, user)
authenticators = defaultdict(list)
queryset = Authenticator.objects.filter(
user__in=item_list,
)
for auth in queryset:
# ignore things that aren't user controlled (like recovery codes)
if auth.interface.is_backup_interface:
continue
authenticators[auth.user_id].append(auth)
for item in item_list:
attrs[item]['authenticators'] = authenticators[item.id]
return attrs
def serialize(self, obj, attrs, user):
d = super(DetailedUserSerializer, self).serialize(obj, attrs, user)
d['authenticators'] = [{
'id': six.text_type(a.id),
'type': a.interface.interface_id,
'name': a.interface.name,
'dateCreated': a.created_at,
'dateUsed': a.last_used_at,
} for a in attrs['authenticators']]
return d
|
huntxu/neutron
|
neutron/agent/linux/interface.py
|
Python
|
apache-2.0
| 19,111
| 0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import time
import netaddr
from neutron_lib import constants
from oslo_log import log as logging
import six
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.common import constants as n_const
from neutron.common import exceptions
LOG = logging.getLogger(__name__)
def _get_veth(name1, name2, namespace2):
return (ip_lib.IPDevice(name1),
ip_lib.IPDevice(name2, namespace=namespace2))
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
DEV_NAME_LEN = n_const.LINUX_DEV_LEN
DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX
def __init__(self, conf):
self.conf = conf
self._mtu_update_warn_logged = False
@property
def use_gateway_ips(self):
"""Whether to use gateway IPs instead of unique IP allocations.
In each place where the DHCP agent runs, and for each subnet for
which DHCP is handling out IP addresses, the DHCP port needs -
at the Linux level - to have an IP address within that subnet.
Generally this needs to be a unique Neutron-allocated IP
address, because the subnet's underlying L2 domain is bridged
across multiple compute hosts and network nodes, and for HA
there may be multiple DHCP agents running on that same bridged
L2 domain.
However, if the DHCP ports - on multiple compute/network nodes
but for the same network - are _not_ bridged to each other,
they do not need each to have a unique IP address. Instead
they can all share the same address from the relevant subnet.
This works, without creating any ambiguity, because those
ports are not all present on the same L2 domain, and because
no data within the network is ever sent to that address.
(DHCP requests are broadcast, and it is the network's job to
ensure that such a broadcast will reach at least one of the
available DHCP servers. DHCP responses will be sent _from_
the DHCP port address.)
Specifically, for networking backends where it makes sense,
the DHCP agent allows all DHCP ports to use the subnet's
gateway IP address, and thereby to completely avoid any unique
IP address allo
|
cation. This behaviour is selected by running
the DHCP agent with a configured interface driver whose
'use_gateway_ips' property is True.
When an operator deploys Neutron with an interface driver that
makes use_gateway_ips True, they should also ensure that a
gateway IP address is defined for each
|
DHCP-enabled subnet,
and that the gateway IP address doesn't change during the
subnet's lifetime.
"""
return False
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=None, clean_connections=False):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
"""
preserve_ips = preserve_ips or []
device = ip_lib.IPDevice(device_name, namespace=namespace)
# The LLA generated by the operating system is not known to
# Neutron, so it would be deleted if we added it to the 'previous'
# list here
default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address)
cidrs = set()
remove_ips = set()
# normalize all the IP addresses first
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
# Convert to compact IPv6 address because the return values of
# "ip addr list" are compact.
if net.version == 6:
ip_cidr = str(net)
cidrs.add(ip_cidr)
# Determine the addresses that must be added and removed
for address in device.addr.list():
cidr = address['cidr']
dynamic = address['dynamic']
# skip the IPv6 link-local
if cidr == default_ipv6_lla:
# it's already configured, leave it alone
cidrs.discard(cidr)
continue
if cidr in preserve_ips:
continue
# Statically created addresses are OK, dynamically created
# addresses must be removed and replaced
if cidr in cidrs and not dynamic:
cidrs.remove(cidr)
continue
remove_ips.add(cidr)
# Clean up any old addresses. This must be done first since there
# could be a dynamic address being replaced with a static one.
for ip_cidr in remove_ips:
if clean_connections:
device.delete_addr_and_conntrack_state(ip_cidr)
else:
device.addr.delete(ip_cidr)
# add any new addresses
for ip_cidr in cidrs:
device.addr.add(ip_cidr)
def init_router_port(self,
device_name,
ip_cidrs,
namespace,
preserve_ips=None,
extra_subnets=None,
clean_connections=False):
"""Set the L3 settings for a router interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
extra_subnets: An iterable of cidrs to add as routes without address
"""
LOG.debug("init_router_port: device_name(%s), namespace(%s)",
device_name, namespace)
self.init_l3(device_name=device_name,
ip_cidrs=ip_cidrs,
namespace=namespace,
preserve_ips=preserve_ips or [],
clean_connections=clean_connections)
device = ip_lib.IPDevice(device_name, namespace=namespace)
# Manage on-link routes (routes without an associated address)
new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
v4_onlink = device.route.list_onlink_routes(constants.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(constants.IP_VERSION_6)
existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
for route in new_onlink_cidrs - existing_onlink_cidrs:
LOG.debug("adding onlink route(%s)", route)
device.route.add_onlink_route(route)
for route in (existing_onlink_cidrs - new_onlink_cidrs -
set(preserve_ips or [])):
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)
def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
net = netaddr.IPNetwork(v6addr)
device.addr.add(str(net), scope)
def delete_ipv6_addr(self, device_name, v6addr, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
device.delete_addr_and_conntrack_state(v6addr)
def delete_ipv6_addr_with_prefix(self, device_name, prefix,
|
bentiss/hid-replay
|
tools/hid.py
|
Python
|
gpl-2.0
| 3,108
| 0.009653
|
#!
|
/bin/env python3
# -*- coding: utf-8 -*-
#
# Hid replay / hid.py: table of hid usages and definitions
#
# Copyright (c) 2012-2017 Benjamin Tissoires <benjamin.tissoires@gmail.com>
# Copyrig
|
ht (c) 2012-2017 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import parse_hut
hid_items = {
"Main": {
"Input" : 0b10000000,
"Output" : 0b10010000,
"Feature" : 0b10110000,
"Collection" : 0b10100000,
"End Collection" : 0b11000000,
},
"Global": {
"Usage Page" : 0b00000100,
"Logical Minimum" : 0b00010100,
"Logical Maximum" : 0b00100100,
"Physical Minimum" : 0b00110100,
"Physical Maximum" : 0b01000100,
"Unit Exponent" : 0b01010100,
"Unit" : 0b01100100,
"Report Size" : 0b01110100,
"Report ID" : 0b10000100,
"Report Count" : 0b10010100,
"Push" : 0b10100100,
"Pop" : 0b10110100,
},
"Local": {
"Usage" : 0b00001000,
"Usage Minimum" : 0b00011000,
"Usage Maximum" : 0b00101000,
"Designator Index" : 0b00111000,
"Designator Minimum" : 0b01001000,
"Designator Maximum" : 0b01011000,
"String Index" : 0b01111000,
"String Minimum" : 0b10001000,
"String Maximum" : 0b10011000,
"Delimiter" : 0b10101000,
},
}
collections = {
'PHYSICAL' : 0,
'APPLICATION' : 1,
'LOGICAL' : 2,
}
sensor_mods = {
0x00: 'Mod None',
0x10: 'Mod Change Sensitivity Abs',
0x20: 'Mod Max',
0x30: 'Mod Min',
0x40: 'Mod Accuracy',
0x50: 'Mod Resolution',
0x60: 'Mod Threshold High',
0x70: 'Mod Threshold Low',
0x80: 'Mod Calibration Offset',
0x90: 'Mod Calibration Multiplier',
0xa0: 'Mod Report Interval',
0xb0: 'Mod Frequency Max',
0xc0: 'Mod Period Max',
0xd0: 'Mod Change Sensitivity Range Percent',
0xe0: 'Mod Change Sensitivity Rel Percent',
0xf0: 'Mod Vendor Reserved',
}
inv_hid = {}
hid_type = {}
for type, items in list(hid_items.items()):
for k, v in list(items.items()):
inv_hid[v] = k
hid_type[k] = type
usages = parse_hut.parse()
usage_pages = {}
inv_usage_pages = {}
inv_usages = {}
for usage, (name, filename, usage_list) in usages.items():
inv_usage_pages[usage] = name
usage_pages[name] = usage
for k, v in list(usage_list.items()):
inv_usages[(usage << 16) | k] = v
inv_collections = dict([(v, k) for k, v in list(collections.items())])
|
pnavarro/neutron
|
neutron/tests/functional/agent/test_l3_agent.py
|
Python
|
apache-2.0
| 53,684
| 0.000112
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import os.path
import mock
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import testtools
import webob
import webob.dec
import webob.exc
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.callbacks import events
from neutron.callbacks import manager
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.openstack.common import uuidutils
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
from neutron.tests.functional.agent.linux import helpers
from neutron.tests.unit.agent.l3 import test_agent as test_l3_agent
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
METADATA_REQUEST_TIMEOUT = 60
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
class L3AgentTestFramework(base.BaseLinuxTestCase):
def setUp(self):
super(L3AgentTestFramework, self).setUp()
mock.patch('neutron.agent.l3.agent.L3PluginApi').start()
# TODO(pcm): Move this to BaseTestCase, if we find that more tests
# use this mechanism.
self._callback_manager = manager.CallbacksManager()
mock.patch.object(registry, '_get_callback_manager',
return_value=self._callback_manager).start()
self.agent = self._configure_agent('agent1')
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host):
conf = self._get_config_opts()
l3_agent_main.register_opts(conf)
cfg.CONF.set_override('debug', False)
agent_config.setup_logging()
conf.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
conf.set_override('router_delete_namespaces', True)
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
conf.set_override('ovs_integration_bridge', br_int.br_name)
conf.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
conf.set_override('external_pids',
get_temp_file_path('external/pids'))
conf.set_override('host', host)
agent = neutron_l3_agent.L3NATAgentWithStateReport(host, conf)
mock.patch.object(ip_lib, '_arping').start()
return agent
def generate_router_info(self, enable_ha, ip_version=4, extra_routes=True,
enable_fip=True, enable_snat=True,
dual_stack=False, v6_ext_gw_with_sub=True):
if ip_version == 6 and not dual_stack:
enable_snat = False
enable_fip = False
extra_routes = False
if not v6_ext_gw_with_sub:
self.agent.conf.set_override('ipv6_gateway',
'fe80::f816:3eff:fe2e:1')
return test_l3_agent.prepare_router_data(ip_version=ip_version,
enable_snat=enable_snat,
enable_floating_ip=enable_fip,
enable_ha=enable_ha,
|
extra_routes=extra_routes,
dual_stack=dual_stack,
|
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub))
def manage_router(self, agent, router):
self.addCleanup(self._delete_router, agent, router['id'])
ri = self._create_router(agent, router)
return ri
def _create_router(self, agent, router):
agent._process_added_router(router)
return agent.router_info[router['id']]
def _delete_router(self, agent, router_id):
agent._router_removed(router_id)
def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
host=None):
fip = {'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': fip_address,
'fixed_ip_address': fixed_address,
'host': host}
router.router[l3_constants.FLOATINGIP_KEY].append(fip)
def _add_internal_interface_by_subnet(self, router, count=1,
ip_version=4,
ipv6_subnet_modes=None,
interface_id=None):
return test_l3_agent.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
def _namespace_exists(self, namespace):
ip = ip_lib.IPWrapper(namespace=namespace)
return ip.netns.exists(namespace)
def _metadata_proxy_exists(self, conf, router):
pm = external_process.ProcessManager(
conf,
router.router_id,
router.ns_name)
return pm.active
def device_exists_with_ips_and_mac(self, expected_device, name_getter,
namespace):
ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
return ip_lib.device_exists_with_ips_and_mac(
name_getter(expected_device['id']), ip_cidrs,
expected_device['mac_address'], namespace)
@staticmethod
def _port_first_ip_cidr(port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def get_device_mtu(self, target_device, name_getter, namespace):
device = ip_lib.IPDevice(name_getter(target_device), namespace)
return device.link.mtu
def get_expected_keepalive_configuration(self, router):
router_id = router.router_id
ha_device_name = router.get_ha_device_name()
ha_device_cidr = self._port_first_ip_cidr(router.ha_port)
external_port = router.get_ex_gw_port()
ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
external_device_name = router.get_external_device_name(
external_port['id'])
external_device_cidr = self._port_first_ip_cidr(external_port)
|
samedder/azure-cli
|
src/azure-cli-nspkg/setup.py
|
Python
|
mit
| 1,582
| 0.000632
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -------
|
-------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
VERSION = "3
|
.0.1+dev"
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-nspkg',
version=VERSION,
description='Microsoft Azure CLI Namespace Package',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=[
'azure-nspkg>=2.0.0'
],
packages=[
'azure',
'azure.cli',
],
)
|
bbaronSVK/plugin.video.stream-cinema
|
resources/lib/params.py
|
Python
|
gpl-3.0
| 420
| 0
|
from __future__ import
|
print_function, unicode_literals
import sys
from resources.lib.kodiutils import params as decode
class Params:
handle = int(sys.argv[1]) if len(sys.argv) > 1 else -1
orig_args = sys.argv[2] if len(sys.argv) > 2 else ''
args = decode(sys.argv[2]) if len(sys.argv) > 2 else {}
resume = sys.argv[3][7:] != 'false' if len(sys.argv) > 3 else False
url = None
params = Pa
|
rams()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/string_tests.py
|
Python
|
mit
| 58,093
| 0.001635
|
"""
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import support
from collections import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123]
def __str__(self): return '{0} {1} {2}'.format(*self.seq)
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class BaseTest(unittest.TestCase):
# These tests are for buffers of values (bytes) and not
# specific to character interpretation, used for bytes objects
# and various string implementations
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtyp
|
e(value))
for (key, value) in obj.items()
])
else:
r
|
eturn obj
# check that obj.method(*args) returns result
def checkequal(self, result, obj, methodname, *args):
result = self.fixtype(result)
obj = self.fixtype(obj)
args = self.fixtype(args)
realresult = getattr(obj, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if obj is realresult:
try:
class subtype(self.__class__.type2test):
pass
except TypeError:
pass # Skip this if we can't subclass
else:
obj = subtype(obj)
realresult = getattr(obj, methodname)(*args)
self.assertIsNot(obj, realresult)
# check that obj.method(*args) raises exc
def checkraises(self, exc, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(obj, methodname),
*args
)
# call obj.method(*args) without any checks
def checkcall(self, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
getattr(obj, methodname)(*args)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxsize, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxsize, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, self.fixtype(''))),
len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxsize, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxsize, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.
|
jim-pansn/graph-tool
|
doc/demos/animation_zombies.py
|
Python
|
gpl-3.0
| 5,436
| 0.001288
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This simple example on how to do animations using graph-tool. Here we do a
# simple simulation of an S->I->R->S epidemic model, where each vertex can be in
# one of the following states: Susceptible (S), infected (I), recovered (R). A
# vertex in the S state becomes infected either spontaneously with a probability
# 'x' or because a neighbour is infected. An infected node becomes recovered
# with probability 'r', and a recovered vertex becomes again susceptible with
# probability 's'.
# DISCLAIMER: The following code is definitely not the most efficient approach
# if you want to simulate this dynamics for very large networks, and/or for very
# long times. The main purpose is simply to highlight the animation capabilities
# of graph-tool.
from graph_tool.all import *
from numpy.random import *
import sys, os, os.path
import cairo
seed(42)
seed_rng(42)
# We need some Gtk and gobject functions
from gi.repository import Gtk, Gdk, GdkPixbuf, GObject
# We will use the karate-club network
g = collection.data["karate"]
pos = g.vp["pos"] # layout positions
# We will filter out vertices which are in the "Recovered" state, by masking
# them using a property map.
removed = g.new_vertex_property("bool")
# SIRS dynamics parameters:
x = 0.001 # spontaneous outbreak probability
r = 0.1 # I->R probability
s = 0.01 # R->S probability
# (Note that the S->I transition happens simultaneously for every vertex with a
# probability equal to the fraction of non-recovered neighbours which are
# infected.)
S = 0
I = 1
R = 2
# Initialize all vertices to the S state
state = g.new_vertex_property("int")
state.a = S
# Images used to draw the nodes. They need to be loaded as cairo surfaces.
Simg = cairo.ImageSurface.create_from_png("face-grin.png")
Simg_fear = cairo.ImageSurface.create_from_png("face-surprise.png")
Iimg = cairo.ImageSurface.create_from_png("zombie.png")
vertex_sfcs = g.new_vertex_property("object")
for v in g.vertices():
vertex_sfcs[v] = Simg
# Newly infected nodes will be highlighted in red
newly_infected = g.new_vertex_property("bool")
# If True, the frames will be dumped to disk as images.
offscreen = sys.argv[1] == "offscreen" if len(sys.argv) > 1 else False
max_count = 500
if offscreen and not os.path.exists("./frames"):
os.mkdir("./frames")
# This creates a GTK+ window with the initial graph layout
if not offscreen:
win = GraphWindow(g, pos, geometry=(500, 400),
|
vertex_size=42,
vertex_anchor=0,
edge_color=[0.6, 0.6, 0.
|
6, 1],
edge_sloppy=True,
vertex_surface=vertex_sfcs,
vertex_halo=newly_infected,
vertex_halo_size=1.2,
vertex_halo_color=[0.8, 0, 0, 0.6])
else:
count = 0
win = Gtk.OffscreenWindow()
win.set_default_size(500, 400)
win.graph = GraphWidget(g, pos,
vertex_size=42,
vertex_anchor=0,
edge_sloppy=True,
edge_color=[0.6, 0.6, 0.6, 1],
vertex_surface=vertex_sfcs,
vertex_halo=newly_infected,
vertex_halo_color=[0.8, 0, 0, 0.6])
win.add(win.graph)
# This function will be called repeatedly by the GTK+ main loop, and we use it
# to update the state according to the SIRS dynamics.
def update_state():
newly_infected.a = False
removed.a = False
# visit the nodes in random order
vs = list(g.vertices())
shuffle(vs)
for v in vs:
if state[v] == I:
if random() < r:
state[v] = R
elif state[v] == S:
if random() < x:
state[v] = I
else:
ns = list(v.out_neighbours())
if len(ns) > 0:
w = ns[randint(0, len(ns))] # choose a random neighbour
if state[w] == I:
state[v] = I
newly_infected[v] = True
elif random() < s:
state[v] = S
if state[v] == R:
removed[v] = True
if state[v] == S:
if I in [state[w] for w in v.out_neighbours()]:
vertex_sfcs[v] = Simg_fear
else:
vertex_sfcs[v] = Simg
else:
vertex_sfcs[v] = Iimg
# Filter out the recovered vertices
g.set_vertex_filter(removed, inverted=True)
# The following will force the re-drawing of the graph, and issue a
# re-drawing of the GTK window.
win.graph.regenerate_surface()
win.graph.queue_draw()
# if doing an offscreen animation, dump frame to disk
if offscreen:
global count
pixbuf = win.get_pixbuf()
pixbuf.savev(r'./frames/zombies%06d.png' % count, 'png', [], [])
if count > max_count:
sys.exit(0)
count += 1
# We need to return True so that the main loop will call this function more
# than once.
return True
# Bind the function above as an 'idle' callback.
cid = GObject.idle_add(update_state)
# We will give the user the ability to stop the program by closing the window.
win.connect("delete_event", Gtk.main_quit)
# Actually show the window, and start the main loop.
win.show_all()
Gtk.main()
|
ivansoban/ILEngine
|
thirdparty/assimp/port/PyAssimp/pyassimp/postprocess.py
|
Python
|
mit
| 23,509
| 0.012676
|
# <hr>Calculates the tangents and bitangents for the imported meshes.
#
# Does nothing if a mesh does not have normals. You might want this post
# processing step to be executed if you plan to use tangent space calculations
# such as normal mapping applied to the meshes. There's a config setting,
# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify
# a maximum smoothing angle for the algorithm. However, usually you'll
# want to leave it at the default value.
#
aiProcess_CalcTangentSpace = 0x1
## <hr>Identifies and joins identical vertex data sets within all
# imported meshes.
#
# After this step is run, each mesh contains unique vertices,
# so a vertex may be used by multiple faces. You usually want
# to use this post processing step. If your application deals with
# indexed geometry, this step is compulsory or you'll just waste rendering
# time. <b>If this flag is not specified<b>, no vertices are referenced by
# more than one face and <b>no index buffer is required<b> for rendering.
#
aiProcess_JoinIdenticalVertices = 0x2
## <hr>Converts all the imported data to a left-handed coordinate space.
#
# By default the data is returned in a right-handed coordinate space (which
# OpenGL prefers). In this space, +X points to the right,
# +Z points towards the viewer, and +Y points upwards. In the DirectX
# coordinate space +X points to the right, +Y points upwards, and +Z points
# away from the viewer.
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftH
|
anded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_MakeLeftHanded = 0x4
## <hr>Triangulates all faces of all meshes.
#
# By default the imported mesh data might contain faces with more than 3
# indices. For rendering you'll usually want all faces to be triangles.
# This post processing step splits up faces with more than 3 indices into
# triangles. Line and point primitives are #not# mod
|
ified! If you want
# 'triangles only' with no other kinds of primitives, try the following
# solution:
# <ul>
# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li>
# <li>Ignore all point and line meshes when you process assimp's output<li>
# <ul>
#
aiProcess_Triangulate = 0x8
## <hr>Removes some parts of the data structure (animations, materials,
# light sources, cameras, textures, vertex components).
#
# The components to be removed are specified in a separate
# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful
# if you don't need all parts of the output structure. Vertex colors
# are rarely used today for example... Calling this step to remove unneeded
# data from the pipeline as early as possible results in increased
# performance and a more optimized output data structure.
# This step is also useful if you want to force Assimp to recompute
# normals or tangents. The corresponding steps don't recompute them if
# they're already there (loaded from the source asset). By using this
# step you can make sure they are NOT there.
#
# This flag is a poor one, mainly because its purpose is usually
# misunderstood. Consider the following case: a 3D model has been exported
# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
# optimize the data because of these nasty little vertex colors.
# Most apps don't even process them, so it's all for nothing. By using
# this step, unneeded components are excluded as early as possible
# thus opening more room for internal optimizations.
#
aiProcess_RemoveComponent = 0x10
## <hr>Generates normals for all faces of all meshes.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there. Face normals are shared between all points
# of a single face, so a single point can have multiple normals, which
# forces the library to duplicate vertices in some cases.
# #aiProcess_JoinIdenticalVertices is #senseless# then.
#
# This flag may not be specified together with #aiProcess_GenSmoothNormals.
#
aiProcess_GenNormals = 0x20
## <hr>Generates smooth normals for all vertices in the mesh.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there.
#
# This flag may not be specified together with
# #aiProcess_GenNormals. There's a configuration option,
# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify
# an angle maximum for the normal smoothing algorithm. Normals exceeding
# this limit are not smoothed, resulting in a 'hard' seam between two faces.
# Using a decent angle here (e.g. 80 degrees) results in very good visual
# appearance.
#
aiProcess_GenSmoothNormals = 0x40
## <hr>Splits large meshes into smaller sub-meshes.
#
# This is quite useful for real-time rendering, where the number of triangles
# which can be maximally processed in a single draw-call is limited
# by the video driverhardware. The maximum vertex buffer is usually limited
# too. Both requirements can be met with this step: you may specify both a
# triangle and vertex limit for a single mesh.
#
# The split limits can (and should!) be set through the
# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt>
# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and
# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>.
#
# Note that splitting is generally a time-consuming task, but only if there's
# something to split. The use of this step is recommended for most users.
#
aiProcess_SplitLargeMeshes = 0x80
## <hr>Removes the node graph and pre-transforms all vertices with
# the local transformation matrices of their nodes.
#
# The output scene still contains nodes, however there is only a
# root node with children, each one referencing only one mesh,
# and each mesh referencing one material. For rendering, you can
# simply render all meshes in order - you don't need to pay
# attention to local transformations and the node hierarchy.
# Animations are removed during this step.
# This step is intended for applications without a scenegraph.
# The step CAN cause some problems: if e.g. a mesh of the asset
# contains normals and another, using the same material index, does not,
# they will be brought together, but the first meshes's part of
# the normal list is zeroed. However, these artifacts are rare.
# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property
# can be set to normalize the scene's spatial dimension to the -1...1
# range.
#
aiProcess_PreTransformVertices = 0x100
## <hr>Limits the number of bones simultaneously affecting a single vertex
# to a maximum value.
#
# If any vertex is affected by more than the maximum number of bones, the least
# important vertex weights are removed and the remaining vertex weights are
# renormalized so that the weights still sum up to 1.
# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in
# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to
# supply your own limit to the post processing step.
#
# If you intend to perform the skinning in hardware, this post processing
# step might be of interest to you.
#
aiProcess_LimitBoneWeights = 0x200
## <hr>Validates the imported scene data structure.
# This makes sure that all indices are valid, all animations and
# bones are linked correctly, all material references are correct .. etc.
#
# It is recommended that you capture Assimp's log output if you use this flag,
# so you can easily find out what's wrong if a file fails the
# validation. The validator is quite strict and will find #all#
# inconsistencies in the data structure... It is recommended that plugin
# developers use it to debug their loaders. There are two types of
# validation failures:
# <ul>
# <li>Error: There's something wrong with the imported data. Further
# postprocessing is
|
0--key/lib
|
portfolio/Python/scrapy/inkshop/cartridgesavecouk.py
|
Python
|
apache-2.0
| 2,521
| 0.004363
|
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http import FormRequest
from productloader import load_product
import re
class CartridgeSave(BaseSpider):
name = 'cartridgesave.co.uk'
allowed_domains = ['cartridgesave.co.uk', 'www.cartridgesave.co.uk']
start_urls = ('http://www.cartridgesave.co.uk',)
def __init__(self, *args, **kwargs):
super(CartridgeSave, self).__init__(*args, **kwargs)
self.URL_BASE = 'http://www.cartridgesave.co.uk'
self.product_name_re = re.compile('.*/(.*?)\.html')
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
res = {}
try:
# name = hxs.select('//div[@id="specification"]/ul/li[position()=1]').re('.* \((.*)\)')[0]
url = response.url
name = self.product_name_re.search(url).groups()[0]
price = hxs.select('.//span[@class="ex_vat_price"]/text()').re('\xa3(.*)')[0]
|
res['url'] = url
res['description'] = name
res['price'] = price
|
res['sku'] = res['description']
yield load_product(res, response)
except IndexError:
return
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
#categories
hxs = HtmlXPathSelector(response)
# printer brands
printers_brands = hxs.select('//div[@id="manufacturers"]//li/a/@href').extract()
for url in printers_brands:
url = urljoin_rfc(self.URL_BASE, url)
yield Request(url)
# printer list
printers_list = hxs.select('//ul[@class="printer_list"]//li/a/@href').extract()
for url in printers_list:
url = urljoin_rfc(self.URL_BASE, url)
yield Request(url)
# next page
# next_page =
# if next_page:
# url = urljoin_rfc(URL_BASE, next_page[0])
# yield Request(url)
# products
products = hxs.select('//div[@class="group_products"]//li/a[not(@class="lowest_price info")]/@href').extract()
for product in products:
product = urljoin_rfc(self.URL_BASE, product)
yield Request(product, callback=self.parse_product)
|
Bolt64/my_code
|
twitter_bot/get_ip.py
|
Python
|
mit
| 881
| 0.010216
|
#!/usr/bin/p
|
ython3
"""
A script to get the public ip address from http://checkip.dyndns.org
"""
import urllib.error
import urllib.request
import re
import time
def contact_server():
"""
Try to get publ
|
ic ip address
"""
ipv4_address_pattern=re.compile(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
ip_addresses=[]
try:
try:
with urllib.request.urlopen("http://checkip.dyndns.org") as urlobject:
ip_addresses=[addr for addr in ipv4_address_pattern.findall(str(urlobject.read()))]
return ip_addresses
except ConnectionResetError:
return None
except urllib.error.URLError:
return None
def get_ip():
"""
Keep running in a loop until an ip_address is obtained
"""
result=None
while not result:
time.sleep(0.1)
result=contact_server()
return result[0]
|
fifengine/fifengine
|
tests/swig_tests/action_tests.py
|
Python
|
lgpl-2.1
| 4,655
| 0.02986
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
#
|
Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from builtins import range
from .swig_test_utils import *
from fife.extensions.serializers.xmlanimation import loadXMLAnimation
class ActionTests(unittest.TestCase):
def setUp(self):
template = 'tests/data/wolf_walk/wolf_walk_%s.xml'
dirnames = ['e', 'ne', 'n', 'nw', 'w', 'sw',
|
's', 'se']
files = [template % dirname for dirname in dirnames]
self.engine = getEngine()
self.map = self.engine.getModel().createMap("map001")
self.grid = self.engine.getModel().getCellGrid("square")
self.layer = self.map.createLayer("Layer001", self.grid)
self.layer.setWalkable(True)
self.layer.createCellCache()
self.target = fife.Location(self.layer)
self.obj = fife.Object("object001", 'plaa')
fife.ObjectVisual.create(self.obj)
self.pather = fife.RoutePather()
self.obj.setPather(self.pather)
self.action = self.obj.createAction('walk')
fife.ActionVisual.create(self.action)
for index, direction in enumerate(dirnames):
degree = 45 * index
self.action.get2dGfxVisual().addAnimation(degree, loadXMLAnimation(self.engine, files[index]))
self.ground = fife.Object("ground", 'plaa')
image = self.engine.getImageManager().load('tests/data/earth_1.png')
fife.ObjectVisual.create(self.ground)
self.ground.get2dGfxVisual().addStaticImage(0, image.getHandle())
self.ground.img = self.engine.getImageManager().get(image.getHandle())
for y in range(-2,3):
for x in range(-2,3):
inst = self.layer.createInstance(self.ground, fife.ModelCoordinate(x,y))
fife.InstanceVisual.create(inst)
self.inst = self.layer.createInstance(self.obj, fife.ModelCoordinate(-2,-2))
fife.InstanceVisual.create(self.inst)
def tearDown(self):
self.engine.destroy()
def _testWalkingAction(self):
self.inst.move('walk', self.target, 0.05)
self.engine.initializePumping()
backend = self.engine.renderBackend
for i in range(360):
self.inst.getLocation().getLayerCoordinates()
self.target.getLayerCoordinates()
if self.inst.getLocation().getLayerCoordinates() == self.target.getLayerCoordinates():
break
self.inst.update()
action = self.inst.currentAction
angle = 0 #self.inst.orientation
animation = action.getAnimationByAngle(angle)
timestamp = self.inst.actionRuntime % animation.duration
image = animation.getFrameByTimestamp( timestamp )
if image:
image.render(fife.Rect(0,0,image.width,image.height),255)
self.engine.pump()
self.engine.finalizePumping()
def testWalkAround(self):
rb = self.engine.getRenderBackend()
viewport = fife.Rect(0, 0, rb.getWidth(), rb.getHeight())
cam = self.map.addCamera("foo", viewport)
cam.setCellImageDimensions(self.ground.img.getWidth(), self.ground.img.getHeight())
cam.setRotation(45)
cam.setTilt(40)
renderer = fife.InstanceRenderer.getInstance(cam)
renderer.activateAllLayers(self.map)
self.engine.initializePumping()
self.target.setLayerCoordinates(fife.ModelCoordinate(2,-2))
self.inst.move('walk', self.target, 0.9)
targets = (
(2,0), (2,-1), (2,-2), (1,-2),
(0,-2), (-1,-2), (-2,-2), (-2,-1),
(-2,0), (-2,1), (-2,2), (-1,2),
(0,2), (1,2), (2,2), (2,1))
for target in targets:
l = self.inst.getLocation()
l.setLayerCoordinates(fife.ModelCoordinate(0,0))
self.inst.setLocation(l)
self.target.setLayerCoordinates(fife.ModelCoordinate(*target))
self.inst.move('walk', self.target, 0.9)
for i in range(10):
self.engine.pump()
self.engine.finalizePumping()
self.map.removeCamera("foo")
TEST_CLASSES = [ActionTests]
if __name__ == '__main__':
unittest.main()
|
scotfu/uppir
|
test_simplexorrequestor.py
|
Python
|
mit
| 2,272
| 0.011884
|
# on success, nothing is printed
import simplexorrequestor
# I'm keeping some of these datastructures tiny in order to make the output
# more readable if an error is discovered
mirrorinfolist = [{'name':'mirror1'}, {'name':'mirror2'}, {'name':'mirror3'}, {'name':'mirror4'}, {'name':'mirror5'}]
blocklist = [12,34]
# this would usually be richer, but I only need these fields
manifestdict = {'blockcount':64, 'hashalgorithm':'noop',
'blockhashlist':['']*64}
rxgobj = simplexorrequestor.RandomXORRequestor(mirrorinfolist, blocklist, manifestdict, 2)
request1 = rxgobj.get_next_xorrequest()
request2 =
|
rxgobj.get_next_xorrequest()
# success!
rxgobj.notify_success(request1,'a')
request3 = rxgobj.get_next_xorrequest()
# so request1 and request3 should be for the same mirror...
assert(request1[0] == request3[0])
# failure..
rxgobj.notify_failure(
|
request2)
request4 = rxgobj.get_next_xorrequest()
assert(request2[0] != request4[0])
# so request2 and request4 should be for different mirrors...
# success!
rxgobj.notify_success(request3,'b')
# we're out of blocks to request from the first mirror...
rxgobj.notify_success(request4,chr(2))
# we're out of blocks to request from the first mirror...
request5 = rxgobj.get_next_xorrequest()
assert(request5[0] != request3[0])
assert(request5[0] == request4[0])
# we should have requested from the same mirror we tried before
rxgobj.notify_success(request5,chr(4))
# this should be ()
request6 = rxgobj.get_next_xorrequest()
assert(request6 == ())
# okay, now it's time to see if we get the right answer... 'a' ^ chr(2) == 'c'
answer1 = rxgobj.return_block(12)
# 'b' ^ chr(4) == 'f'
answer2 = rxgobj.return_block(34)
assert(answer1 == 'c')
assert(answer2 == 'f')
# Now let's try this where we chew through all of the mirrors to ensure we get
# the right exception
mirrorinfolist = [{'name':'mirror1'}, {'name':'mirror2'}, {'name':'mirror3'}]
rxgobj = simplexorrequestor.RandomXORRequestor(mirrorinfolist, blocklist, manifestdict, 2)
request1 = rxgobj.get_next_xorrequest()
request2 = rxgobj.get_next_xorrequest()
rxgobj.notify_failure(request1)
try:
rxgobj.notify_failure(request2)
except simplexorrequestor.InsufficientMirrors:
pass
else:
print "Should be notified of insufficient mirrors!"
|
lavish205/olympia
|
src/olympia/amo/tasks.py
|
Python
|
bsd-3-clause
| 2,217
| 0
|
import datetime
from django.apps import apps
from django.core.mail import EmailMessage, EmailMultiAlternatives
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.celery import ta
|
sk
from olympia.amo.utils import get_email_backend
from olympia.bandwagon.models import Collection
log = olympia.core.logger.getLogger('z.task')
@task
def send_email(recipient, subject, message, from_email=None,
html_message=None, attachments=None, real_email=False,
cc=None, headers=None, max_retries=3, reply_to=None,
**kwargs):
backend = EmailMultiAlternatives if html_message else EmailMess
|
age
connection = get_email_backend(real_email)
result = backend(subject, message, from_email, to=recipient, cc=cc,
connection=connection, headers=headers,
attachments=attachments, reply_to=reply_to)
if html_message:
result.attach_alternative(html_message, 'text/html')
try:
result.send()
return True
except Exception as e:
log.exception('send_mail() failed with error: %s, retrying' % e)
return send_email.retry(exc=e, max_retries=max_retries)
@task
def set_modified_on_object(app_label, model_name, pk, **kw):
"""Sets modified on one object at a time."""
model = apps.get_model(app_label, model_name)
obj = model.objects.get(pk=pk)
try:
log.info('Setting modified on object: %s, %s' % (model_name, pk))
obj.update(modified=datetime.datetime.now(), **kw)
except Exception as e:
log.error('Failed to set modified on: %s, %s - %s' %
(model_name, pk, e))
@task
def delete_logs(items, **kw):
log.info('[%s@%s] Deleting logs' % (len(items), delete_logs.rate_limit))
ActivityLog.objects.filter(pk__in=items).exclude(
action__in=amo.LOG_KEEP).delete()
@task
def delete_anonymous_collections(items, **kw):
log.info('[%s@%s] Deleting anonymous collections' %
(len(items), delete_anonymous_collections.rate_limit))
Collection.objects.filter(type=amo.COLLECTION_ANONYMOUS,
pk__in=items).delete()
|
peterwilletts24/MetCalcs
|
setup.py
|
Python
|
gpl-3.0
| 459
| 0.010893
|
from distutils.core import setup
setup(
name='MetCalcs',
version=
|
'0.1.1',
author='Peter D. Willetts',
author_email='peterwilletts24@gmail.com',
packages=['metcalcs',],
#scripts=[],
license='LICENSE.txt',
description='Some functions for calculating met parameters from sounding variables, and from parcel ascents',
long_description=open('README.txt').read(),
install_requires=[
"numpy
|
",
"scipy",]
)
|
joaduo/mepinta
|
core/python_core/mepinta/pipelineview/actiontree/tests/test_ActionTreeManager.py
|
Python
|
gpl-3.0
| 2,762
| 0.00181
|
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo, mepinta@joaquinduo.com.ar
This file is part of Mepinta under GPL 3
'''
import unittest
from mepinta.pipelineview.actiontree.data_model import ActionTree
from mepinta.pipelineview.actiontree.ActionTreeManager import ActionTreeManager
from pprint import pprint, pformat
from mepinta.context.MepintaContext import MepintaContext
from pipeline_backend.logging.logging import LOG_DEBUG
class Test(unittest.TestCase):
def print_verify(self, answers, tree):
print '='*8
print tree
count = max(answers.keys()) if answers else 1
answers[count + 1] = tree.buildTree(string=True)
def test_adding_actions(self):
import plugins.python.processors.actiontree.UndoableGraph.generator.EmptyGraph as EmptyGraph
answers = {}
tree = ActionTree()
mngr = ActionTreeManager()
mngr.addAction(tree, EmptyGraph)
print tree.actions_graph.pline.getTopology()
def test_tree(self):
answers = {}
tree = ActionTree()
mngr = ActionTreeManager()
self.print_verify(answers, tree)
mngr.undoAction(tree)
self.print_verify
|
(answers, tree)
mngr.redoAction(tree)
self.print_verify(answers, tree)
mngr.addAction(tree, 'actiontree.UndoableGraph.generator.EmptyGraph')
self.print_verify(answers, tree)
mngr.addAction(tree, 'actiontre
|
e.UndoableGraph.generator.EmptyGraph')
self.print_verify(answers, tree)
mngr.addAction(tree, 'actiontree.UndoableGraph.generator.EmptyGraph')
act3 = tree.current_action
self.print_verify(answers, tree)
mngr.undoAction(tree)
self.print_verify(answers, tree)
mngr.redoAction(tree)
self.print_verify(answers, tree)
mngr.undoAction(tree)
self.print_verify(answers, tree)
mngr.addAction(tree, 'actiontree.UndoableGraph.generator.EmptyGraph')
act4 = tree.current_action
self.print_verify(answers, tree)
print mngr.setCurrentAction(tree, act3)
mngr.addAction(tree, 'actiontree.UndoableGraph.generator.EmptyGraph')
self.print_verify(answers, tree)
# print mngr.setCurrentAction(tree, act3)
mngr.addAction(tree, 'actiontree.UndoableGraph.generator.EmptyGraph')
self.print_verify(answers, tree)
# print mngr.setCurrentAction(tree, act3)
mngr.addAction(tree, 'actiontree.UndoableGraph.generator.EmptyGraph')
self.print_verify(answers, tree)
print mngr.setCurrentAction(tree, act4)
mngr.eval(tree)
if __name__ == "__main__":
MepintaContext('python', log_level=LOG_DEBUG)
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
mwest1066/PrairieLearn
|
elements/pl-integer-input/pl-integer-input.py
|
Python
|
agpl-3.0
| 8,803
| 0.001022
|
import lxml.html
from html import escape
import chevron
import math
import prairielearn as pl
import numpy as np
import random
WEIGHT_DEFAULT = 1
CORRECT_ANSWER_DEFAULT = None
LABEL_DEFAULT = None
SUFFIX_DEFAULT = None
DISPLAY_DEFAULT = 'inline'
def prepare(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
required_attribs = ['answers-name']
optional_attribs = ['weight', 'correct-answer', 'label', 'suffix', 'display']
pl.check_attribs(element, required_attribs, optional_attribs)
name = pl.get_string_attrib(element, 'answers-name')
correct_answer = pl.get_integer_attrib(element, 'correct-answer', CORRECT_ANSWER_DEFAULT)
if correct_answer is not None:
if name in data['correct_answers']:
raise Exception('duplicate correct_answers variable name: %s' % name)
data['correct_answers'][name] = correct_answer
def render(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
label = pl.get_string_attrib(element, 'label', LABEL_DEFAULT)
suffix = pl.get_string_attrib(element, 'suffix', SUFFIX_DEFAULT)
display = pl.get_string_attrib(element, 'display', DISPLAY_DEFAULT)
if data['panel'] == 'question':
editable = data['editable']
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
# Get info strings
info_params = {'format': True}
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
info = chevron.render(f, info_params).strip()
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
info_params.pop('format', None)
info_params['shortformat'] = True
shortinfo = chevron.render(f, info_params).strip()
html_params = {
'question': True,
'name': name,
'label': label,
'suffix': suffix,
'editable': editable,
'info': info,
'shortinfo': shortinfo,
'uuid': pl.get_uuid()
}
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score
|
>= 1:
html_params['correct'] =
|
True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
if display == 'inline':
html_params['inline'] = True
elif display == 'block':
html_params['block'] = True
else:
raise ValueError('method of display "%s" is not valid (must be "inline" or "block")' % display)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'submission':
parse_error = data['format_errors'].get(name, None)
html_params = {
'submission': True,
'label': label,
'parse_error': parse_error,
'uuid': pl.get_uuid()
}
if parse_error is None:
# Get submitted answer, raising an exception if it does not exist
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
raise Exception('submitted answer is None')
# If answer is in a format generated by pl.to_json, convert it
# back to a standard type (otherwise, do nothing)
a_sub = pl.from_json(a_sub)
html_params['suffix'] = suffix
html_params['a_sub'] = '{:d}'.format(a_sub)
else:
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'answer':
a_tru = pl.from_json(data['correct_answers'].get(name, None))
if a_tru is not None:
html_params = {'answer': True, 'label': label, 'a_tru': '{:d}'.format(a_tru), 'suffix': suffix}
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
else:
html = ''
else:
raise Exception('Invalid panel type: %s' % data['panel'])
return html
def parse(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
# Get submitted answer or return parse_error if it does not exist
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
data['format_errors'][name] = 'No submitted answer.'
data['submitted_answers'][name] = None
return
# Convert to integer
try:
a_sub_parsed = pl.string_to_integer(a_sub)
if a_sub_parsed is None:
raise ValueError('invalid submitted answer (wrong type)')
if not np.isfinite(a_sub_parsed):
raise ValueError('invalid submitted answer (not finite)')
data['submitted_answers'][name] = pl.to_json(a_sub_parsed)
except Exception:
data['format_errors'][name] = 'Invalid format. The submitted answer was not an integer.'
data['submitted_answers'][name] = None
def grade(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
# Get weight
weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)
# Get true answer (if it does not exist, create no grade - leave it
# up to the question code)
a_tru = pl.from_json(data['correct_answers'].get(name, None))
if a_tru is None:
return
# Get submitted answer (if it does not exist, score is zero)
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
return
# If submitted answer is in a format generated by pl.to_json, convert it
# back to a standard type (otherwise, do nothing)
a_sub = pl.from_json(a_sub)
# Cast both submitted and true answers as integers.
a_tru = int(a_tru)
a_sub = int(a_sub)
if a_tru == a_sub:
data['partial_scores'][name] = {'score': 1, 'weight': weight}
else:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
def test(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)
# Get correct answer
a_tru = data['correct_answers'][name]
# If correct answer is in a format generated by pl.to_json, convert it
# back to a standard type (otherwise, do nothing)
a_tru = pl.from_json(a_tru)
result = random.choices(['correct', 'incorrect', 'invalid'], [5, 5, 1])[0]
if result == 'correct':
data['raw_submitted_answers'][name] = str(a_tru)
data['partial_scores'][name] = {'score': 1, 'weight': weight}
el
|
rmotr-group-projects/itp-w1-highest-number-cubed
|
tests/test_main.py
|
Python
|
mit
| 443
| 0
|
imp
|
ort unittest
from highest_number_cubed import highest_number_cubed
class TestHighestNumberCubed(unittest.TestCase):
def test_three(self):
self.assertEqual(highest_number_cubed(30), 3)
def test_two(self):
self.assertEqual(highest_number_cubed(12), 2)
|
def test_one(self):
self.assertEqual(highest_number_cubed(3), 1)
def test_big(self):
self.assertEqual(highest_number_cubed(12000), 22)
|
tommy-u/chaco
|
chaco/scales_tick_generator.py
|
Python
|
bsd-3-clause
| 1,682
| 0.004162
|
""" Defines the ScalesTickGenerator class.
"""
from numpy import array
from traits.api import Any
from enable.font_metrics_provider import font_metrics_provider
from ticks import AbstractTickGenerator
# Use the new scales/ticks library
from scales.api import ScaleSystem
class ScalesTickGenerator(AbstractTickGenerator):
scale = Any #Instance(ScaleSystem, args=())
font = Any
def _scale_default(self):
return ScaleSystem()
def get_ticks(self, data_low, data_high, bounds_low, bounds_high, interval,
use_endpoints=False, scale=None):
if interval != "auto":
ticks = self.scale.ticks(data_low, data_high, (data_high - data_low) / interval)
else:
ticks = self.scale.ticks(data_low, data_high)
return ticks
def get_ticks_and_labels(self, data_low, data_high, bounds_low, bounds_high,
orientation = "h"):
# TODO: add support for Interval
# TODO: add support for vertical labels
metrics = font_metrics_provider()
if self.font is not None and hasattr(metrics, "set_font"):
metrics.set_font(self.font)
test_str = "0
|
123456789-+"
charsize = metrics.get_full_text_extent(test_str)[0] / len(test_str)
numchars = (bounds_high - bounds_low) / charsize
tmp = zip(*se
|
lf.scale.labels(data_low, data_high, numlabels=8, char_width=numchars))
# Check to make sure we actually have labels/ticks to show before
# unpacking the return tuple into (tick_array, labels).
if len(tmp) == 0:
return array([]), []
else:
return array(tmp[0]), tmp[1]
|
vlegoff/tsunami
|
src/secondaires/navigation/editeurs/shedit/__init__.py
|
Python
|
bsd-3-clause
| 10,053
| 0.001802
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'shedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
"""
from textwrap import dedent
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.description import Description
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.flag import Flag
from primaires.interpreteur.editeur.tableau import Tableau
from primaires.interpreteur.editeur.uniligne import Uniligne
from .edt_carte import EdtCarte
class EdtShedit(Presentation):
"""Classe définissant l'éditeur de modèle de navires 'shedit'.
"""
nom = "shedit"
def __init__(self, personnage, modele):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, modele)
if personnage and modele:
self.construire(modele)
def __getnewargs__(self):
return (None, None)
def construire(self, modele):
"""Construction de l'éditeur"""
# nom
nom = self.ajouter_choix("nom", "n", Uniligne, modele, "nom")
nom.parent = self
nom.prompt = "Nom du navire : "
nom.apercu = "{objet.nom}"
nom.aide_courte = \
"Entrez le |ent|nom|ff| du navire ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nNom actuel : |bc|{objet.nom}|ff|"
# Description
description = self.ajouter_choix("description", "d",
Description, modele, "description")
description.parent = self
description.apercu = "{objet.description." \
"paragraphes_indentes}"
description.aide_courte = \
"| |tit|" + "Description du navire {}".format(
modele.cle).ljust(76) + "|ff||\n" + self.opts.separateur
# Description à la vente
description_vente = self.ajouter_choix("description à la vente", "ve",
Description, modele, "description_vente")
description_vente.parent = self
description_vente.apercu = "{objet.description_vente." \
"paragraphes_indentes}"
description_vente.aide_courte = \
"| |tit|" + "Description en vente du navire {}".format(
modele.cle).ljust(76) + "|ff||\n" + self.opts.separateur
# Genre masculin
masculin = self.ajouter_choix("genre masculin", "g", Flag, modele,
"masculin")
masculin.parent = self
# Descriptions indépendantes
independantes = self.ajouter_choix("descriptions indépendantes", "in", Flag,
modele, "descriptions_independantes")
independantes.parent = self
# Facteurs d'orientation
facteurs = self.ajouter_choix("facteurs d'orientation", "f", Tableau,
modele, "facteurs_orientations",
|
((("Allure", ["vent debout", "au près", "bon plein",
"largue", "grand largue", "vent arrière"]),
("Facteur", "flottant"))))
facteurs.parent = self
facteurs.apercu = "{valeur}"
facteurs.aide_courte = dedent("""
Entrez |cmd|/|ff| pour revenir à la fenêtre parente.
|
Entrez une allure, un signe |cmd|/|ff| et son facteur influençant
la vitesse. Si ce facteur est négatif, le navire va "culer".
Si ce facteur est positif, la vitesse (dépendant du vent)
sera multipliée par le facteur de l'allure précisé. Vent
debout est une allure qui fait culer le navire. Le facteur
augmente ensuite, restant faible (ou nul) pour le près, davantage
pour le bon plein puis le largue, qui est souvent la
meilleure allure. Le facteur diminue ensuite généralement
pour le grand largue et le vent arrière.
{valeur}""".strip("\n"))
# Peut conquérir
peut_conquerir = self.ajouter_choix("peut conquérir", "co", Flag,
modele, "peut_conquerir")
peut_conquerir.parent = self
# Niveau
niveau = self.ajouter_choix("niveau", "i", Entier, modele, "niveau")
niveau.parent = self
niveau.apercu = "{objet.niveau}"
niveau.prompt = "Niveau du navire : "
niveau.aide_courte = \
"Entrez |ent|le niveau|ff| du navire ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\n" \
"Le niveau détermine le nombre d'XP données en cas de " \
"récompense (quand\nun personnage conquit le navire par " \
"exemple). Une fraction du niveau\nest donné en XP secondaire " \
"du niveau navigation.\n\n" \
"Niveau actuel : {objet.niveau}"
# Prix
prix = self.ajouter_choix("prix unitaire", "u", Entier, modele,
"m_valeur", 1)
prix.parent = self
prix.apercu = "{objet.m_valeur}"
prix.prompt = "Prix unitaire du navire : "
prix.aide_courte = \
"Entrez |ent|le prix unitaire|ff| du navire ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\n" \
"Prix unitaire actuel : |bc|{objet.m_valeur}|ff|"
# Durée de construction
duree = self.ajouter_choix("durée de construction", "r", Entier,
modele, "duree_construction", 1)
duree.parent = self
duree.apercu = "{objet.duree_construction} minute(s)"
duree.prompt = "Durée de construction du nagvire (en minutes) : "
duree.aide_courte = \
"Entrez |ent|la durée de construction|ff| du navire ou " \
"|cmd|/|ff| pour\nrevenir à la fenêtre parente.\n\n" \
"Cette durée, exprimée en minutes, est celle qui doit " \
"s'écouler entre le\nmoment ou un personnage achète le navire " \
"dans un chantier naval et le moment\noù le chantier naval " \
"place son nouveau navire dans le port.\n\n" \
"Durée de construction actuelle : " \
"|bc|{objet.duree_construction}|ff| minute(s)"
# Poids max
poids_max = self.ajouter_choix("poids maximum", "p", Entier, modele,
"poids_max", 1)
poids_max.parent = self
poids_max.apercu = "{objet.poids_max} kg"
poids_max.prompt = "Poids maximum avant de sombrer (en kg) : "
poids_max.aide_courte = \
"Entrez |ent|le poids maximum|ff| du navire avan
|
pyhmsa/pyhmsa
|
pyhmsa/fileformat/xmlhandler/condition/calibration.py
|
Python
|
mit
| 965
| 0.006218
|
"""
XML handler for calibrations
"""
# Standard library modules.
# Third party modules.
# Local modules.
from pyhmsa.spec.condition.calibration import \
(CalibrationConstant, CalibrationLinear,
CalibrationPolynomial, CalibrationExplicit)
from pyhmsa.fileformat.xmlhandler.condition.condition import _ConditionXMLHandler
# Globals and constants variables.
class CalibrationConstantXMLHandler(_ConditionXMLHandler):
def
|
__init__(self, version):
super().__init__(CalibrationConstant, version)
class CalibrationLinearXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(CalibrationLinear, version)
class CalibrationPolynomialXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(CalibrationPolynomial, version)
class CalibrationExplicitXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init
|
__(CalibrationExplicit, version)
|
yafeunteun/wikipedia-spam-classifier
|
revscoring/revscoring/languages/dutch.py
|
Python
|
mit
| 3,534
| 0
|
from .features import Dictionary, RegexMatches, Stemmed, Stopwords
name = "dutch"
try:
import enchant
dictionary = enchant.Dict("nl")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'nl'. " +
"Consider installing 'myspell-nl'.")
dictionary = Dictionary(name + ".dictionary", dictionary.check)
"""
:class:`~revscoring.languages.features.Dictionary` features via
:class:`enchant.Dict` "nl". Provided by `myspell-nl`
"""
try:
from nltk.corpus import stopwords as nltk_stopwords
stopwords = set(nltk_stopwords.words('dutch'))
except LookupError:
raise ImportError("Could not load stopwords for {0}. ".format(__name__) +
"You may need to install the nltk 'stopwords' " +
"corpora. See http://www.nltk.org/data.html")
stopwords = Stopwords(name + ".stopwords", stopwords)
"""
:class:`~revscoring.languages.features.Stopwords` features provided by
:func:`nltk.corpus.stopwords` "dutch"
"""
try:
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("dutch")
except ValueError:
raise ImportError("Could not load stemmer for {0}. ".format(__name__))
stemmed = Stemmed(name + ".stemmed", stemmer.stem)
"""
:class:`~revscoring.languages.features.Stemmed` word features via
:class:`nltk.stem.snowball.SnowballStemmer` "dutch"
"""
badword_regexes = [
r"aars",
r"an(aal|us)\w*",
r"balhaar",
r"drol(len)?",
r"fack(en|ing|s)?", "facking",
r"flikkers?",
r"focking",
r"ge(ile?|lul)",
r"geneukt",
r"hoer(en?)?",
r"homos?",
r"kaka?",
r"kak(hoofd|ken)",
r"k[ae]nker",
r"klootzak(ken)?",
r"klote",
r"kont(gat|je)?",
r"pedo",
r"penis(sen)?",
r"peop",
r"piemels?",
r"pijpen",
r"pik",
r"pimel",
r"pipi",
r"poep(chinees?|en|hoofd)?",
r"poep(ie|je|sex|te?)s?",
r"porno?",
r"neuke?",
r"neuken(de)?",
r"neukt(en?)?",
r"stron(d|t)",
r"suck(s|t)?",
r"zuigt",
r"sukkels?",
r"ter(ing|ten)", "tetten",
r"tieten",
r"vagina",
r"verekte",
r"verkracht",
r"dikzak",
r"dildo",
r"mon?g(olen|ool)?", "mooiboy",
r"negers?",
r"shit",
r"sperma",
r"kut(jes?)?",
r"stelletje",
r"losers?",
r"lul(len)?",
r"reet",
r"scheet", "scheten", r"schijt",
r"diaree",
r"slet",
r"lekkerding",
r"likken"
]
badwords = RegexMatches(name + ".badwords", badword_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features
|
via a list of
badword detecting regexes.
"""
informal_regexes = [
r"aap(jes)?",
r"banaan",
r"bent",
r"b
|
oe(it)?",
r"doei"
r"dombo",
r"domme",
r"eigelijk",
r"godverdomme",
r"groetjes",
r"gwn",
r"hoi",
r"hal+o+",
r"heb",
r"hee+[jyl]", r"heee+?l",
r"houd?",
r"(?:hoi+)+",
r"hoor",
r"izan",
r"jij",
r"jou",
r"jullie",
r"kaas",
r"klopt",
r"kots",
r"kusjes",
r"le?kke?re?",
r"maarja",
r"mama",
r"nou",
r"oma",
r"ofzo",
r"oke",
r"sexy?",
r"snap",
r"stink(en|t)",
r"stoer",
r"swag",
r"swek",
r"vies", "vieze",
r"vind",
r"vuile",
r"xxx",
r"yeah",
r"zielig",
r"zooi",
r"yolo",
r"zeg"
]
informals = RegexMatches(name + ".informals", informal_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
informal word detecting regexes.
"""
|
PragmaticMates/django-clever-selects
|
example/example/urls.py
|
Python
|
mit
| 1,638
| 0.007326
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from views import HomeView, SimpleChainView, MultipleChainView, ModelChainView, EditCarView, DeleteCarView, \
AjaxChainedNames, AjaxChainedCountries, AjaxChainedCities, AjaxChainedModels, AjaxChainedColors
urlpatterns = patterns('',
# Examples:
url(r'^ajax/chained-names/$', AjaxChainedNames.as_view(), name='ajax_chained_names'),
url(r'^ajax/chained-countries/$', AjaxChainedCountries.as_vi
|
ew(), name='ajax_chained_countries'),
url(r'^ajax/chained-cities/$', AjaxChainedCities.as_view(), name='ajax_chained_cities'),
url(r'^ajax/chained-brand-models/$', AjaxChainedModels.as_view(), name='ajax_chained_models'),
url(r'^ajax/chained-colors/$', AjaxChainedColors.as_view(), name='ajax_chained_colors'),
url(r'^simple-chain/$', SimpleChainView.as_view(), name='simple_chain'),
url(r'^multiple-chain/$', MultipleChainView.as_view(), name='multiple_chain'),
url(r'^mo
|
del-chain/$', ModelChainView.as_view(), name='model_chain'),
url(r'^edit-car/(?P<pk>[-\d]+)/$', EditCarView.as_view(), name='edit_car'),
url(r'^delete-car/(?P<pk>[-\d]+)/$', DeleteCarView.as_view(), name='delete_car'),
url(r'^$', HomeView.as_view(), name='home'),
# url(r'^example/', include('example.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
wetneb/dissemin
|
deposit/migrations/0008_userpreferences.py
|
Python
|
agpl-3.0
| 1,117
| 0.003581
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-23 19:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('deposit', '0007_remove_urls_and_add_oairecord'),
]
operations = [
migrations.CreateModel(
name='UserPreferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_repository', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='last_us
|
ed_by', to='deposit.Repository')),
('preferred_repository', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='preferrend_by', to='deposit.Repository')),
('user', models.OneToOneField(on_delete=django.db.models.dele
|
tion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
jeremy-bernon/Lilith
|
lilith/internal/effectivemu.py
|
Python
|
gpl-3.0
| 2,350
| 0.00383
|
##########################################################################
#
# This file is part of Lilith
# made by J. Bernon and B. Dumont
#
# Web page: http://lpsc.in2p3.fr/projects-th/lilith/
#
# In case of questions email bernon@lpsc.in2p3.fr dum33@ibs.re.kr
#
#
# Lilith is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lilith is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lilith. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
from ..errors import EffectiveMuError
from warnings import warn
def compute_effective_mu(user_mu, mu, alpha):
"""Compute mu_eff(mu,alpha) based on the general template"""
effective_mu = {}
mutemplate = mu["mutemplate"]
prod_modes = ["ggH", "VBF", "WH", "ZH", "ttH"]
alpha_names = alpha.keys()
template_names = []
for template in mutemplate:
template_names.append(template["extra"]["syst"])
try:
alpha_names.index(template["extra"]["syst"])
except ValueError:
raise EffectiveMuError(
'systematic uncertainty '+template["extra"]["syst"]+' not provided in user input file')
# for name in [name for name in alpha_names if name not in template_names]:
# warn('systematic uncertainty '+name+
# ' in user input has no experimental equivalent: will be ignored',Warning,stacklevel=3)
for prod, decay in user_mu:
mu_eff = user_mu[
|
prod,decay]
for template in mutemplate:
alpha_i = alpha[template["extra"]["syst"]]["val"]
alpha_0 = template["alpha0"]
mu_eff += template["phi"]*(alpha_i-alpha_0)
for pprime in prod_modes:
mu_eff += user_mu[
|
pprime,decay]*template[prod,pprime]*(alpha_i-alpha_0)
effective_mu[prod,decay] = mu_eff
return effective_mu
|
tonybaloney/st2
|
contrib/runners/mistral_v2/tests/unit/test_mistral_v2_policy.py
|
Python
|
apache-2.0
| 11,045
| 0.00335
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import uuid
import mock
import yaml
from mistralclient.api.v2 import action_executions
from mistralclient.api.v2 import executions
from mistralclient.api.v2 import workflows
from oslo_config import cfg
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from mistral_v2 import MistralRunner
import st2common
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import policiesregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as action_constants
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.policy import Policy
from st2common.runners import base as runners
from st2common.services import action as action_service
from st2common.transport.liveaction import LiveActionPublisher
from st2common.transport.publishers import CUDPublisher
from st2common.util import loader
from st2tests import DbTestCase
from st2tests import fixturesloader
from st2tests.mocks.liveaction import MockLiveActionPublisher
MISTRAL_RUNNER_NAME = 'mistral_v2'
TEST_PACK = 'mistral_tests'
TEST_PACK_PATH = fixturesloader.get_fixtures_packs_base_path() + '/' + TEST_PACK
PACKS = [
TEST_PACK_PATH,
fixturesloader.get_fixtures_packs_base_path() + '/core'
]
# Non-workbook with a single workflow
WF1_META_FILE_NAME = 'workflow_v2.yaml'
WF1_META_FILE_PATH = TEST_PACK_PATH + '/actions/' + WF1_META_FILE_NAME
WF1_META_CONTENT = loader.load_meta_file(WF1_META_FILE_PATH)
WF1_NAME = WF1_META_CONTENT['pack'] + '.' + WF1_META_CONTENT['name']
WF1_ENTRY_POINT = TEST_PACK_PATH + '/actions/' + WF1_META_CONTENT['entry_point']
WF1_ENTRY_POINT_X = WF1_ENTRY_POINT.replace(WF1_META_FILE_NAME, 'xformed_' + WF1_META_FILE_NAME)
WF1_SPEC = yaml.safe_load(MistralRunner.get_workflow_definition(WF1_ENTRY_POINT_X))
WF1_YAML = yaml.safe_dump(WF1_SPEC, default_flow_style=False)
WF1 = workflows.Workflow(None, {'name': WF1_NAME, 'definition': WF1_YAML})
MISTRAL_EXECUTION = {'id': str(uuid.uuid4()), 'state': 'RUNNING', 'workflow_name': WF1_NAME}
WF1_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
@mock.patch.object(
CUDPublisher,
'publish_update',
mock.MagicMock(return_value=None))
@mock.patch.object(
CUDPublisher,
'publish_create',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_create))
@mock.patch.object(
LiveActionPublisher,
'publish_state',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_state))
class MistralRunnerPolicyTest(DbTestCase):
@classmethod
def setUp
|
Class(cls):
super(MistralRunnerPolicyTest, cls).setUpClass()
# Override the retry configuration here o
|
therwise st2tests.config.parse_args
# in DbTestCase.setUpClass will reset these overrides.
cfg.CONF.set_override('retry_exp_msec', 100, group='mistral')
cfg.CONF.set_override('retry_exp_max_msec', 200, group='mistral')
cfg.CONF.set_override('retry_stop_max_msec', 200, group='mistral')
cfg.CONF.set_override('api_url', 'http://0.0.0.0:9101', group='auth')
def setUp(self):
super(MistralRunnerPolicyTest, self).setUp()
# Start with a clean database for each test.
self._establish_connection_and_re_create_db()
# Register runners.
runnersregistrar.register_runners()
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
# Register policies required for the tests.
policiesregistrar.register_policy_types(st2common)
policies_registrar = policiesregistrar.PolicyRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
policies_registrar.register_from_pack(pack)
@classmethod
def get_runner_class(cls, runner_name):
return runners.get_runner(runner_name).__class__
def _drop_all_other_policies(self, test_policy):
policy_dbs = [policy_db for policy_db in Policy.get_all() if policy_db.ref != test_policy]
for policy_db in policy_dbs:
Policy.delete(policy_db, publish=False)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
action_executions.ActionExecutionManager, 'update',
mock.MagicMock(return_value=None))
def test_cancel_on_task_action_concurrency(self):
# Delete other policies in the test pack to avoid conflicts.
required_policy = 'mistral_tests.cancel_on_concurrency'
self._drop_all_other_policies(required_policy)
# Get threshold from the policy.
policy = Policy.get_by_ref(required_policy)
threshold = policy.parameters.get('threshold', 0)
self.assertGreater(threshold, 0)
# Launch instances of the workflow up to threshold.
for i in range(0, threshold):
liveaction = LiveActionDB(action=WF1_NAME, parameters={'friend': 'friend' + str(i)})
liveaction, execution1 = action_service.request(liveaction)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Check number of running instances
running = LiveAction.count(
action=WF1_NAME, status=action_constants.LIVEACTION_STATUS_RUNNING)
self.assertEqual(running, threshold)
# Mock the mistral runner cancel method to assert cancel is called.
mistral_runner_cls = self.get_runner_class('mistral_v2')
with mock.patch.object(mistral_runner_cls, 'cancel', mock.MagicMock(return_value=None)):
# Launch another instance of the workflow with mistral callback defined
# to indicate that this is executed under a workflow.
callback = {
'source': MISTRAL_RUNNER_NAME,
'url': 'http://127.0.0.1:8989/v2/action_executions/12345'
}
params = {'friend': 'grande animalerie'}
liveaction2 = LiveActionDB(action=WF1_NAME, parameters=params, callback=callback)
liveaction2, execution2 = action_service.request(liveaction2)
action_executions.ActionExecutionManager.update.assert_called_once_with(
'12345',
output='{"error": "Execution canceled by user."}',
state='CANCELLED'
)
liveaction2 = LiveAction.get_by_id(str(liveaction2.id))
self.assertEqual(liveaction2.status, action_constants.LIVEACTION_STATUS_CANCELED)
# Assert cancel has been called.
mistral_runner_cls.cancel.assert_called_once_with()
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mo
|
hellsgate1001/graphs
|
hack_plot/migrations/0010_auto_20150705_2020.py
|
Python
|
mit
| 1,309
| 0.002292
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hack_plot', '0009_auto_20150703_2236'),
]
operations = [
migrations.CreateModel(
name='SshHackLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('longitude', models.DecimalField(max_digits=10, decimal_places=6)),
('latitude', models.DecimalField(max_digits=10, decimal_places=6)),
],
options={
},
|
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='sshhacklocation',
unique_together=set([('longitude', 'latitude')]),
),
migrations.RemoveField(
model_name='sshhackip',
name='latitude'
|
,
),
migrations.RemoveField(
model_name='sshhackip',
name='longitude',
),
migrations.AddField(
model_name='sshhackip',
name='location',
field=models.ForeignKey(default=1, to='hack_plot.SshHackLocation'),
preserve_default=False,
),
]
|
Zulan/PBStats
|
CvGameCoreDLL/update_interface_docstrings.py
|
Python
|
gpl-2.0
| 5,421
| 0.00535
|
#!/usr/bin/env python
# The function declaration of Boost::Python objects are hardcoded
# as string. This Script should show changes/update this strings.
#
# (Example of such issue in Civ4:BTS is CyPlayer::initUnit.)
#
import os
import glob
import pdb
# pdb.set_trace()
REWRITE_FILES = True
VERBOSE = 1
DEBUG_WRITE = False # Write into /run/shm/[CvFoobar].tmp.cpp
file_cache = dict()
def get_function(line):
if "python::init" in line: # Constructor line is without name.
return None
b = line.find("&")
if b == -1: raise Exception("No begin of function name found.")
e = line.find(",", b)
if e == -1: e = line.find(")", b)
if e == -1: raise Exception("No end of function name found.")
return (line[b+1:e], b+1, e)
def get_doc(line):
# Restrict on substring after second ','
c1 = line.find(",")
c2 = line.find(",", c1+1)
if c2 == -1: c2 = line.find(")", c1+1)
b = line.rfind(", \"", c2)
if b == -1: return (", (\"\"", c2+1, c2+1)
e = line.find("\"", b+3)
if e == -1: return (", (\"\"", c2+1, c2+1)
return (line[b+2:e+1], b+2, e+1)
def get_cpp_filename(func_name):
""" Return filename WITHOUT extension. """
return func_name[:func_name.find(":")]
def get_sub_funcname(func_name):
""" Return (right most) substring [b] of [a]::[b]. """
return func_name[func_name.rfind("::")+2:]
def get_cpp_file(fname, explizit_fname=None):
if fname in file_cache:
return file_cache[fname]
if VERBOSE: print("\tLoad %s..." % (fname))
try:
with open(fname+".cpp", "r") as f:
file_cache[fname] = f.readlines()
except
|
IOError:
if fname == "CvInfos": # Prevent rekursion
raise Exception("CvInfos.cpp missing")
info = get_cpp_file("CvInfos")
file_cache[fname] = info
return file_cache[fname]
def clean_args(sArgs):
""" Convert boost::python::list& into list, etc. """
sArgs = sArgs.replace("&", "")
colon_pos
|
= sArgs.find("::")
while colon_pos > -1:
b = max([
sArgs.rfind(" ", 0, colon_pos),
sArgs.rfind("(", 0, colon_pos)])
lArgs = [c for c in sArgs]
# print("Remove %i %i %s" % (b+1, colon_pos+2, lArgs[b+1:colon_pos+2]))
lArgs[b+1:colon_pos+2] = []
sArgs = "".join(lArgs)
colon_pos = sArgs.find("::")
sArgs = sArgs.replace(" /*", " (")
sArgs = sArgs.replace("*/ ", ") ")
return sArgs
def get_new_doc(func_name):
cpp_file = get_cpp_file(get_cpp_filename(func_name))
loc_func_name = get_sub_funcname(func_name)
search_pat = "::%s" % (loc_func_name)
if VERBOSE: print("Search %s" % (search_pat))
for iLine in range(len(cpp_file)):
line = cpp_file[iLine]
if search_pat in line:
# Check if declaration is in one line
n = 1
while n < 5 and len(line.split("(")) != len(line.split(")")):
line[-1] = [] # Remove '\n'
line += cpp_file[iLine+n]
n += 1
args = line[line.find("("):line.find(")")+1]
# ret_arg = line[:line.find(" ")]
ret_arg = line[:line.rfind(" ", 0, line.find("::"))]
args = clean_args(args)
ret_arg = clean_args(ret_arg)
return "%s %s" % (ret_arg, args)
# "Function not found"
return None
def save_comment_string(old_doc, new_doc):
""" Transfer comment after ' - ' string in old doc string. """
com = " - "
if com in old_doc and com not in new_doc:
new_doc = new_doc + old_doc[old_doc.find(com):old_doc.rfind("\"")]
return new_doc
def update(line):
if VERBOSE: print(line)
tFunc = get_function(line)
if tFunc is None:
return line
tDoc = get_doc(line)
sOldDoc = tDoc[0]
sNewDoc = get_new_doc(tFunc[0])
if sNewDoc is None:
if VERBOSE: print("\t Function not found: %s" %(tFunc[0]))
return line
sNewDoc = save_comment_string(sOldDoc, sNewDoc)
# print("----- %s\n%s\n\"%s\"\n" % (tFunc[0], sOldDoc, sNewDoc))
# Replace ')' with ', ' if line without doc string get new one.
end = line[tDoc[1]-1]
end2 = ""
if end == ")":
end = ", "
end2 = ")"
newLine = "%s%s\"%s\"%s%s\r\n" % (line[:tDoc[1]-1], end, sNewDoc,
end2, line[tDoc[2]])
return newLine
if __name__ == "__main__":
interfaces = glob.glob("Cy*Interface*.cpp")
# interfaces = ['CyAreaInterface.cpp']
for interface in interfaces:
print("Handle " + interface)
with open(interface, "r") as f:
txt = f.readlines()
new_txt = []
for line in txt:
if ".def(" in line:
line = update(line)
new_txt.append(line)
if new_txt != txt:
# print(new_txt)
out = "".join(new_txt)
out = out.replace('\r\r', '\r') # Source unknown.
if REWRITE_FILES:
print("Detect change... and rewrite file " + interface)
with open(interface, "w") as f:
f.write(out)
if DEBUG_WRITE:
dfile = "/run/shm/" + interface.replace(".", ".tmp.")
print("Detect change... and write file " + dfile)
with open(dfile, "w") as f:
f.write(out)
|
balloob/home-assistant
|
homeassistant/components/rainmachine/__init__.py
|
Python
|
apache-2.0
| 15,955
| 0.001818
|
"""Support for RainMachine devices."""
import asyncio
from datetime import timedelta
import logging
from regenmaschine import Client
from regenmaschine.errors import RainMachineError
import voluptuous as vol
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import verify_domain_control
from .const import (
CONF_ZONE_RUN_TIME,
DATA_CLIENT,
DATA_PROGRAMS,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DATA_ZONES,
DATA_ZONES_DETAILS,
DEFAULT_ZONE_RUN,
DOMAIN,
PROGRAM_UPDATE_TOPIC,
SENSOR_UPDATE_TOPIC,
ZONE_UPDATE_TOPIC,
)
_LOGGER = logging.getLogger(__name__)
CONF_PROGRAM_ID = "program_id"
CONF_SECONDS = "seconds"
CONF_ZONE_ID = "zone_id"
DATA_LISTENER = "listener"
DEFAULT_ATTRIBUTION = "Data provided by Green Electronics LLC"
DEFAULT_ICON = "mdi:water"
DEFAULT_SCAN_INTERVAL = timedelta(seconds=60)
DEFAULT_SSL = True
SERVICE_ALTER_PROGRAM = vol.Schema({vol.Required(CONF_PROGRAM_ID): cv.positive_int})
SERVICE_ALTER_ZONE = vol.Schema({vol.Required(CONF_ZONE_ID): cv.positive_int})
SERVICE_PAUSE_WATERING = vol.Schema({vol.Required(CONF_SECONDS): cv.positive_int})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema(
{vol.Required(CONF_PROGRAM_ID): cv.positive_int}
)
SERVICE_START_ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN): cv.positive_int,
}
)
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema(
{vol.Required(CONF_PROGRAM_ID): cv.positive_int}
)
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_ID): cv.positive_int})
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.119")
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_IP_ADDRESS]
if CONF_ZONE_RUN_TIME in config_entry.data:
# If a zone run time exists in the config entry's data, pop it and move it to
# options:
data = {**config_entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**config_entry.options,
CONF_ZONE_RUN_TIME: data.pop(CONF_ZONE_RUN_TIME),
}
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(session=websession)
try:
await client.load_local(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
port=config_entry.data[CONF_PORT],
ssl=config_entry.data.get(CONF_SSL, DEFAULT_SSL),
)
except RainMachineError as err:
_LOGGER.error("An error occurred: %s", err)
|
raise ConfigEntryNotReady from err
else:
# regenmaschine can load multiple controllers at once, but we only grab the one
# we loaded above:
controller = next(iter(client.controllers.values()))
rainmachine = RainMachine(hass, config_entry, controller)
# Update the data object, which at this point (prior to any sensors registering
# "interest" in the API), will focus on grabbing the latest program and zone data
|
:
await rainmachine.async_update()
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ("binary_sensor", "sensor", "switch"):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
@_verify_domain_control
async def disable_program(call):
"""Disable a program."""
await rainmachine.controller.programs.disable(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def disable_zone(call):
"""Disable a zone."""
await rainmachine.controller.zones.disable(call.data[CONF_ZONE_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def enable_program(call):
"""Enable a program."""
await rainmachine.controller.programs.enable(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def enable_zone(call):
"""Enable a zone."""
await rainmachine.controller.zones.enable(call.data[CONF_ZONE_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def pause_watering(call):
"""Pause watering for a set number of seconds."""
await rainmachine.controller.watering.pause_all(call.data[CONF_SECONDS])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def start_program(call):
"""Start a particular program."""
await rainmachine.controller.programs.start(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def start_zone(call):
"""Start a particular zone for a certain amount of time."""
await rainmachine.controller.zones.start(
call.data[CONF_ZONE_ID], call.data[CONF_ZONE_RUN_TIME]
)
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def stop_all(call):
"""Stop all watering."""
await rainmachine.controller.watering.stop_all()
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def stop_program(call):
"""Stop a program."""
await rainmachine.controller.programs.stop(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def stop_zone(call):
"""Stop a zone."""
await rainmachine.controller.zones.stop(call.data[CONF_ZONE_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def unpause_watering(call):
"""Unpause watering."""
await rainmachine.controller.watering.unpause_all()
await rainmachine.async_update_programs_and_zones()
for service, method, schema in [
("disable_program", disable_program, SERVICE_ALTER_PROGRAM),
("disable_zone", disable_zone, SERVICE_ALTER_ZONE),
("enable_program", enable_program, SERVICE_ALTER_PROGRAM),
("enable_zone", enable_zone, SERVICE_ALTER_ZONE),
("pause_watering", pause_watering, SERVICE_PAUSE_WATERING),
("start_program", start_program, SERVICE_START_PROGRAM_SCHEMA),
("start_zone", start_zone, SERVICE_START_ZONE_SCHEMA),
("stop_all", stop_all, {}),
("stop_program", stop_program, SERVICE_STOP_PROGRAM_SCHEMA),
("stop_zone", stop_zone, SERVICE_STOP_ZONE_SCHEMA),
("unpause_watering", unpause_watering, {}),
]:
hass.services.async_register(DOMAIN, service, method, schema=schema)
hass.data[DOMAIN][DATA_LISTENER] = config_entry.add_update_listener(
async_reload_entry
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
cancel_listener = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id
|
cernops/cloudbase-init
|
cloudbaseinit/tests/metadata/services/test_ec2service.py
|
Python
|
apache-2.0
| 5,279
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import posixpath
import unittest
from oslo.config import cfg
from six.moves.urllib import error
from cloudbaseinit.metadata.services import base
from cloudbaseinit.metadata.services import ec2service
CONF = cfg.CONF
class EC2ServiceTest(unittest.TestCase):
def setUp(self):
CONF.set_override('retry_count_interval', 0)
self._service = ec2service.EC2Service()
@mock.patch('cloudbaseinit.utils.network.check_metadata_ip_route')
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'.get_host_name')
def _test_load(self, mock_get_host_name, mock_check_metadata_ip_route,
side_effect):
mock_get_host_name.side_effect = [side_effect]
response = self._service.load()
mock_check_metadata_ip_route.assert_called_once_with(
CONF.ec2_metadata_base_url)
mock_get_host_name.assert_called_once()
if side_effect is Exception:
self.assertFalse(response)
else:
self.assertTrue(response)
def test_load(self):
self._test_load(side_effect=None)
def test_load_exception(self):
self._test_load(side_effect=Exception)
@mock.patch('six.moves.urllib.request.urlopen')
def _test_get_response(self, mock_urlopen, ret_value):
req = mock.MagicMock()
mock_urlopen.side_effect = [ret_value]
|
is_instance = isinstance(ret_value, error.HTTPError)
if is_instance and ret_value.code == 404:
self.assertRaises(base.NotExistingMetadataException,
self._service._get_
|
response, req)
elif is_instance and ret_value.code != 404:
self.assertRaises(error.HTTPError,
self._service._get_response, req)
else:
response = self._service._get_response(req)
self.assertEqual(ret_value, response)
mock_urlopen.assert_called_once_with(req)
def test_get_response(self):
self._test_get_response(ret_value=None)
def test_get_response_error_404(self):
err = error.HTTPError("http://169.254.169.254/", 404,
'test error 404', {}, None)
self._test_get_response(ret_value=err)
def test_get_response_error_other(self):
err = error.HTTPError("http://169.254.169.254/", 409,
'test error 409', {}, None)
self._test_get_response(ret_value=err)
@mock.patch('six.moves.urllib.request.Request')
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_response')
def test_get_data(self, mock_get_response, mock_Request):
response = self._service._get_data('fake')
fake_path = posixpath.join(CONF.ec2_metadata_base_url, 'fake')
mock_Request.assert_called_once_with(fake_path)
mock_get_response.assert_called_once_with(mock_Request())
self.assertEqual(mock_get_response.return_value.read.return_value,
response)
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_cache_data')
def test_get_host_name(self, mock_get_cache_data):
response = self._service.get_host_name()
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/local-hostname' % self._service._metadata_version)
self.assertEqual(mock_get_cache_data.return_value, response)
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_cache_data')
def test_get_instance_id(self, mock_get_cache_data):
response = self._service.get_instance_id()
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/instance-id' % self._service._metadata_version)
self.assertEqual(mock_get_cache_data.return_value, response)
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_cache_data')
def test_get_public_keys(self, mock_get_cache_data):
mock_get_cache_data.side_effect = ['key=info', 'fake key']
response = self._service.get_public_keys()
expected = [
mock.call('%s/meta-data/public-keys' %
self._service._metadata_version),
mock.call('%(version)s/meta-data/public-keys/%('
'idx)s/openssh-key' %
{'version': self._service._metadata_version,
'idx': 'key'})]
self.assertEqual(expected, mock_get_cache_data.call_args_list)
self.assertEqual(['fake key'], response)
|
nylas/sync-engine
|
inbox/sendmail/base.py
|
Python
|
agpl-3.0
| 13,544
| 0.000074
|
import pkg_resources
from datetime import datetime
import re
from inbox.api.validation import (
get_recipients, get_attachments, get_thread, get_message)
from inbox.api.err import InputError
from inbox.contacts.process_mail import update_contacts_from_message
from inbox.models import Message, Part
from inbox.models.action_log import schedule_action
from inbox.sqlalchemy_ext.util import generate_public_id
VERSION = pkg_resources.get_distribution('inbox-sync').version
class SendMailException(Exception):
"""
Raised when sending fails.
Parameters
----------
message: string
A descriptive error message.
http_code: int
An appropriate HTTP error code for the particular type of failure.
server_error: string, optional
The error returned by the mail server.
failures: dict, optional
If sending only failed for some recipients, information on the specific
failures.
"""
def __init__(self, message, http_code, server_error=None, failures=None):
self.message = message
self.http_code = http_code
self.server_error = server_error
self.failures = failures
super(SendMailException, self).__init__(
message, http_code, server_error, failures)
def get_sendmail_client(account):
from inbox.sendmail import module_registry
sendmail_mod = module_registry.get(account.provider)
sendmail_cls = getattr(sendmail_mod, sendmail_mod.SENDMAIL_CLS)
sendmail_client = sendmail_cls(account)
return sendmail_client
def create_draft_from_mime(account, raw_mime, db_session):
our_uid = generate_public_id() # base-36 encoded string
new_headers = ('X-INBOX-ID: {0}-0\r\n'
'Message-Id: <{0}-0@mailer.nylas.com>\r\n'
'User-Agent: NylasMailer/{1}\r\n').format(our_uid, VERSION)
new_body = new_headers + raw_mime
with db_session.no_autoflush:
msg = Message.create_from_synced(account, '', '',
datetime.utcnow(), new_body)
if msg.from_addr and len(msg.from_addr) > 1:
raise InputError("from_addr field can have at most one item")
if msg.reply_to and len(msg.reply_to) > 1:
raise InputError("reply_to field can have at most one item")
if msg.subject is not None and not \
isinstance(msg.subject, basestring):
raise InputError('"subject" should be a string')
if not isinstance(msg.body, basestring):
raise InputError('"body" should be a string')
if msg.references or msg.in_reply_to:
msg.is_reply = True
thread_cls = account.thread_cls
msg.thread = thread_cls(
subject=msg.subject,
recentdate=msg.received_date,
na
|
mespace=account.namespace,
|
subjectdate=msg.received_date)
msg.is_created = True
msg.is_sent = True
msg.is_draft = False
msg.is_read = True
db_session.add(msg)
db_session.flush()
return msg
def block_to_part(block, message, namespace):
inline_image_uri = r'cid:{}'.format(block.public_id)
is_inline = re.search(inline_image_uri, message.body) is not None
# Create a new Part object to associate to the message object.
# (You can't just set block.message, because if block is an
# attachment on an existing message, that would dissociate it from
# the existing message.)
part = Part(block=block)
part.content_id = block.public_id if is_inline else None
part.namespace_id = namespace.id
part.content_disposition = 'inline' if is_inline else 'attachment'
part.is_inboxapp_attachment = True
return part
def create_message_from_json(data, namespace, db_session, is_draft):
""" Construct a Message instance from `data`, a dictionary representing the
POST body of an API request. All new objects are added to the session, but
not committed."""
# Validate the input and get referenced objects (thread, attachments)
# as necessary.
to_addr = get_recipients(data.get('to'), 'to')
cc_addr = get_recipients(data.get('cc'), 'cc')
bcc_addr = get_recipients(data.get('bcc'), 'bcc')
from_addr = get_recipients(data.get('from'), 'from')
reply_to = get_recipients(data.get('reply_to'), 'reply_to')
if from_addr and len(from_addr) > 1:
raise InputError("from_addr field can have at most one item")
if reply_to and len(reply_to) > 1:
raise InputError("reply_to field can have at most one item")
subject = data.get('subject')
if subject is not None and not isinstance(subject, basestring):
raise InputError('"subject" should be a string')
body = data.get('body', '')
if not isinstance(body, basestring):
raise InputError('"body" should be a string')
blocks = get_attachments(data.get('file_ids'), namespace.id, db_session)
reply_to_thread = get_thread(data.get('thread_id'), namespace.id,
db_session)
reply_to_message = get_message(data.get('reply_to_message_id'),
namespace.id, db_session)
if reply_to_message is not None and reply_to_thread is not None:
if reply_to_message not in reply_to_thread.messages:
raise InputError('Message {} is not in thread {}'.
format(reply_to_message.public_id,
reply_to_thread.public_id))
with db_session.no_autoflush:
account = namespace.account
dt = datetime.utcnow()
uid = generate_public_id()
to_addr = to_addr or []
cc_addr = cc_addr or []
bcc_addr = bcc_addr or []
blocks = blocks or []
if subject is None:
# If this is a reply with no explicitly specified subject, set the
# subject from the prior message/thread by default.
# TODO(emfree): Do we want to allow changing the subject on a reply
# at all?
if reply_to_message is not None:
subject = reply_to_message.subject
elif reply_to_thread is not None:
subject = reply_to_thread.subject
subject = subject or ''
message = Message()
message.namespace = namespace
message.is_created = True
message.is_draft = is_draft
message.from_addr = from_addr if from_addr else \
[(account.name, account.email_address)]
# TODO(emfree): we should maybe make received_date nullable, so its
# value doesn't change in the case of a drafted-and-later-reconciled
# message.
message.received_date = dt
message.subject = subject
message.body = body
message.to_addr = to_addr
message.cc_addr = cc_addr
message.bcc_addr = bcc_addr
message.reply_to = reply_to
# TODO(emfree): this is different from the normal 'size' value of a
# message, which is the size of the entire MIME message.
message.size = len(body)
message.is_read = True
message.is_sent = False
message.public_id = uid
message.version = 0
message.regenerate_nylas_uid()
# Set the snippet
message.snippet = message.calculate_html_snippet(body)
# Associate attachments to the draft message
for block in blocks:
message.parts.append(block_to_part(block, message, namespace))
update_contacts_from_message(db_session, message, namespace)
if reply_to_message is not None:
message.is_reply = True
_set_reply_headers(message, reply_to_message)
thread = reply_to_message.thread
message.reply_to_message = reply_to_message
elif reply_to_thread is not None:
message.is_reply = True
thread = reply_to_thread
# Construct the in-reply-to and references headers from the last
# message currently in the thread.
previous_messages = [m for m in thread.messages if not m.is_draft]
if previous_messages:
|
RAPD/RAPD
|
src/utils/lock.py
|
Python
|
agpl-3.0
| 1,987
| 0.002013
|
"""
Helper for keeping processes singletons
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016-2018 Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
|
__created__ = "2016-03-02"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import fcntl
import os
def lock_file(file_path):
"""
Method to make sure only one instance is running on this machine.
If file_path is False, no locking will occur
If file_path is not False and is already locked, an error will be thrown
If file_path is not F
|
alse and can be locked, a False will be returned
Keyword arguments
file_path -- potential file for maintaining lock
"""
# If file_path is a path, try to lock
if file_path:
# Create the directory for file_path if it does not exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
global _file_handle
_file_handle = open(file_path, "w")
try:
fcntl.lockf(_file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
return False
except IOError:
#raise Exception("%s is already locked, unable to run" % file_path)
return True
# If file_path is False, always return False
else:
return True
def close_lock_file():
"""Close the _file_handle handle."""
_file_handle.close()
|
jocave/snapcraft
|
snapcraft/tests/test_commands_prime.py
|
Python
|
gpl-3.0
| 5,460
| 0
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import os.path
import fixtures
from snapcraft.main import main
from snapcraft import tests
class PrimeCommandTestCase(tests.TestCase):
yaml_template = """name: prime-test
version: 1.0
summary: test prime
description: if the prime is succesful the state file will be updated
confinement: strict
parts:
{parts}"""
yaml_part = """ prime{:d}:
plugin: nil"""
def make_snapcraft_yaml(self, n=1):
parts = '\n'.join([self.yaml_part.format(i) for i in range(n)])
super().make_snapcraft_yaml(self.yaml_template.format(parts=parts))
parts = []
for i in range(n):
part_dir = os.path.join(self.parts_dir, 'prime{}'.format(i))
state_dir = os.path.join(part_dir, 'state')
parts.append({
'part_dir': part_dir,
'state_dir': state_dir,
})
return parts
def test_prime_invalid_part(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml()
with self.assertRaises(SystemExit) as raised:
main(['prime', 'no-prime', ])
self.assertEqual(1, raised.exception.code)
self.assertEqual(
fake_logger.output,
"The part named 'no-prime' is not defined in 'snapcraft.yaml'\n")
def test_prime_defaults(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
parts = self.make_snapcraft_yaml()
main(['prime'])
self.assertTrue(os.path.exists(self.snap_dir),
'Expected a prime directory')
self.assertTrue(
os.path.exists(
os.path.join(self.snap_dir, 'meta', 'snap.yaml')),
'Expected a snap.yaml')
self.assertTrue(os.path.exists(self.stage_dir),
'Expected a stage directory')
self.assertTrue(os.path.exists(self.parts_dir),
'Expected a parts directory')
self.assertTrue(os.path.exists(parts[0]['part_dir']),
'Expected a part directory for the build0 part')
self.verify_state('build0', parts[0]['state_dir'], 'prime')
def test_prime_one_part_only_from_3(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
parts = self.make_snapcraft_yaml(n=3)
main(['prime', 'prime1'])
self.assertFalse(
os.path.exists(
os.path.join(self.snap_dir, 'meta', 'snap.yaml')),
'There should not be a snap.yaml')
self.assertTrue(os.path.exists(self.snap_dir),
'Expected a prime directory')
self.assertTrue(os.path.exists(self.stage_dir),
'Expected a stage directory')
self.assertTrue(os.path.exists(self.parts_dir),
'Expected a parts directory')
self.assertTrue(os.path.exists(parts[1]['part_dir']),
'Expected a part directory for the prime1 part')
|
self.verify_state('prime1', parts[1]['state_dir'], 'prime')
for i in [0, 2]:
self.assertFalse(os.path.exists(parts[i]['part_dir']),
'Pulled wrong part')
self.assertFalse(os.path.exists(parts[i]['state_dir']),
'Expected for only to be a state f
|
ile for build1')
def test_prime_ran_twice_is_a_noop(self):
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
parts = self.make_snapcraft_yaml()
main(['prime'])
self.assertEqual(
'Preparing to pull prime0 \n'
'Pulling prime0 \n'
'Preparing to build prime0 \n'
'Building prime0 \n'
'Staging prime0 \n'
'Priming prime0 \n',
fake_logger.output)
self.assertTrue(os.path.exists(self.stage_dir),
'Expected a stage directory')
self.assertTrue(os.path.exists(self.parts_dir),
'Expected a parts directory')
self.assertTrue(os.path.exists(parts[0]['part_dir']),
'Expected a part directory for the build0 part')
self.verify_state('build0', parts[0]['state_dir'], 'prime')
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
main(['prime'])
self.assertEqual(
'Skipping pull prime0 (already ran)\n'
'Skipping build prime0 (already ran)\n'
'Skipping stage prime0 (already ran)\n'
'Skipping prime prime0 (already ran)\n',
fake_logger.output)
|
TheCheshireFox/foxy-player
|
foxy_player/foxy_player_api/migrations/0010_auto_20170416_1306.py
|
Python
|
apache-2.0
| 419
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-16 10:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
|
('foxy_player_api', '0009_auto_20170415_2318'),
]
operations = [
migrations.AlterUniqueTogether(
name='playlisttra
|
cks',
unique_together=set([]),
),
]
|
rocky/python-uncompyle6
|
uncompyle6/semantics/customize26_27.py
|
Python
|
gpl-3.0
| 2,225
| 0.002247
|
# Copyright (c) 2019 2021 by Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Isolate Python 2.6 and 2.7 version-specific semantic actions here.
"""
from uncompyle6.semantics.consts import TABLE_DIRECT
def customize_for_version26_27(self, version):
########################################
# Python 2.6+
# except <cond
|
ition> as <var>
# vs. older:
# except <condition> , <var>
#
# For 2.6 we use the older syntax which
# matches how we parse this in bytecode
########################################
if version > (2, 6):
TABLE_DIRECT.update({
'except_cond2': ( '%|except %c as %c:\n', 1, 5 ),
# When a generator is a singl
|
e parameter of a function,
# it doesn't need the surrounding parenethesis.
'call_generator': ('%c%P', 0, (1, -1, ', ', 100)),
})
else:
TABLE_DIRECT.update({
'testtrue_then': ( 'not %p', (0, 22) ),
})
# FIXME: this should be a transformation
def n_call(node):
mapping = self._get_mapping(node)
key = node
for i in mapping[1:]:
key = key[i]
pass
if key.kind == 'CALL_FUNCTION_1':
# A function with one argument. If this is a generator,
# no parenthesis is needed.
args_node = node[-2]
if args_node == 'expr':
n = args_node[0]
if n == 'generator_exp':
node.kind = 'call_generator'
pass
pass
self.default(node)
self.n_call = n_call
|
jlongever/redfish-client-python
|
on_http_redfish_1_0/models/computer_system_1_0_0_system_type.py
|
Python
|
apache-2.0
| 2,513
| 0.002388
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ComputerSystem100SystemType(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ComputerSystem100SystemType - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
|
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value
|
.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
krafczyk/spack
|
var/spack/repos/builtin/packages/perl-font-ttf/package.py
|
Python
|
lgpl-2.1
| 1,568
| 0.001913
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston
|
, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlFontTtf(PerlPackage):
"""Perl module for TrueType Font hacking"""
homepage = "http://search.cpan.org/~bhallissy/Font-TTF-1.06/lib/Font/TTF.pm"
url = "http://search.cpan.org/CPAN/authors/id/B/BH/BHALLISSY/Font-TTF-1.06.tar.gz"
version('1.06', '241b59310ad4450e6e050d5e790f1b21')
|
kagel/foobnix
|
foobnix/helpers/dialog_entry.py
|
Python
|
gpl-3.0
| 12,111
| 0.004625
|
#-*- coding: utf-8 -*-
'''
Created on 24 авг. 2010
@author: ivan
'''
from gi.repository import Gtk
import logging
from foobnix.fc.fc import FC
from foobnix.helpers.image import ImageBase
from foobnix.util.const import SITE_LOCALE, ICON_FOOBNIX
from foobnix.util.localization import foobnix_localization
from foobnix.gui.service.path_service import get_foobnix_resourse_path_by_name
foobnix_localization()
def responseToDialog(entry, dialog, response):
dialog.response(response)
def file_selection_dialog(title, current_folder=None):
chooser = Gtk.FileSelection(title)
chooser.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(True)
paths = None
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_selections()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no files selected')
chooser.destroy()
return paths
def file_chooser_dialog(title, current_folder=None):
chooser = Gtk.FileChooserDialog(title, action=Gtk.FILE_CHOOSER_ACTION_OPEN, buttons=("folder-open", Gtk.ResponseType.OK))
chooser.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(True)
paths = None
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_filenames()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no files selected')
chooser.destroy()
return paths
def directory_chooser_dialog(title, current_folder=None):
chooser = Gtk.FileChooserDialog(title, action=Gtk.FileChooserAction.SELECT_FOLDER, buttons=("folder-open", Gtk.ResponseType.OK))
chooser.set_default_response(Gtk.ResponseType.OK)
chooser.set_select_multiple(True)
paths = None
if current_folder:
chooser.set_current_folder(current_folder)
response = chooser.run()
if response == Gtk.ResponseType.OK:
paths = chooser.get_filenames()
elif response == Gtk.ResponseType.CANCEL:
logging.info('Closed, no directory selected')
chooser.destroy()
return paths
def one_line_dialog(dialog_title, parent=None, entry_text=None, message_text1=None, message_text2=None):
dialog = Gtk.MessageDialog(
parent,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(dialog_title)
if message_text1:
dialog.set_markup(message_text1)
if message_text2:
dialog.format_secondary_markup(message_text2)
entry = Gtk.Entry()
'''set last widget in action area as default widget (button OK)'''
dialog.set_default_response(Gtk.ResponseType.OK)
'''activate default widget after Enter pressed in entry'''
entry.set_activates_default(True)
if entry_text:
entry.set_text(entry_text)
dialog.vbox.pack_start(entry, True, True, 0)
dialog.show_all()
dialog.run()
text = entry.get_text()
dialog.destroy()
return text if text else None
def two_line_dialog(dialog_title, parent=None, message_text1=None,
message_text2=None, entry_text1="", entry_text2=""):
dialog = Gtk.MessageDialog(
parent,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(dialog_title)
if message_text1:
dialog.set_markup(message_text1)
if message_text2:
dialog.format_secondary_markup(message_text2)
login_entry = Gtk.Entry()
if entry_text1:
login_entry.set_text(entry_text1)
login_entry.show()
password_entry = Gtk.Entry()
if entry_text2:
password_entry.set_text(entry_text2)
password_entry.show()
hbox = Gtk.VBox()
hbox.pack_start(login_entry, False, False, 0)
hbox.pack_start(password_entry, False, False, 0)
dialog.vbox.pack_start(hbox, True, True, 0)
dialog.show_all()
'''set last widget in action area as default widget (button OK)'''
dialog.set_default_response(Gtk.ResponseType.OK)
'''activate default widget after Enter pressed in entry'''
login_entry.set_activates_default(True)
password_entry.set_activates_default(True)
dialog.run()
login_text = login_entry.get_text()
password_text = password_entry.get_text()
dialog.destroy()
return [login_text, password_text] if (login_text and password_text) else [None,None]
def info_dialog(title, message, parent=None):
dialog = Gtk.MessageDialog(
parent,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(title)
dialog.set_markup(title)
dialog.format_secondary_markup(message)
dialog.show_all()
dialog.run()
dialog.destroy()
def info_dialog_with_link(title, version, link):
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(title)
dialog.set_markup(title)
dialog.format_secondary_markup("<b>" + version + "</b>")
link = Gtk.LinkButton(link, link)
link.show()
dialog.vbox.pack_end(link, True, True, 0)
dialog.show_all()
dialog.run()
dialog.destroy()
def info_dialog_with_link_and_donate(version):
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
None)
dialog.set_icon_from_file(get_foobnix_resourse_path_by_name(ICON_FOOBNIX))
dialog.set_title(_("New foobnix release avaliable"))
dialog.set_markup(_("New foobnix release avaliable"))
dialog.format_secondary_markup("<b>" + version + "</b>")
card = Gtk.LinkButton("http://foobnix.com/%s/download.html"%SITE_LOCALE, _("Download and Donate"))
#terminal = Gtk.LinkButton("http://www.foobnix.com/donate/eng#terminal", _("Download and Donate by Webmoney or Payment Terminal"
|
))
# link = Gtk.LinkButton("http://www.foobn
|
ix.com/support?lang=%s"%SITE_LOCALE, _("Download"))
frame = Gtk.Frame(label="Please donate and download")
vbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
vbox.set_homogeneous(True)
vbox.pack_start(card, True, True)
#vbox.pack_start(terminal, True, True)
vbox.pack_start(link, True, True)
frame.add(vbox)
image = ImageBase("images/foobnix-slogan.jpg")
dialog.vbox.pack_start(image, True, True)
dialog.vbox.pack_start(frame, True, True)
dialog.vbox.pack_start(Gtk.Label(_("We hope you like the player. We will make it even better.")), True, True)
version_check = Gtk.CheckButton(_("Check for new foobnix release on start"))
version_check.set_active(FC().check_new_version)
dialog.vbox.pack_start(version_check, True, True)
dialog.show_all()
dialog.run()
FC().check_new_version = version_check.get_active()
FC().save()
di
|
logpai/logparser
|
logparser/LogSig/LogSig.py
|
Python
|
mit
| 12,489
| 0.003043
|
"""
Description : This file implements the LogSig algorithm for log parsing
Author : LogPAI team
License : MIT
"""
from datetime import datetime
import random
import math
import time
import operator
import re
import os
import pandas as pd
import hashlib
class Para:
def __init__(self, path, rex, savePath, groupNum, logformat):
self.path = path
self.rex = rex
self.savePath = savePath
self.groupNum = groupNum # partition into k groups
self.logformat = logformat
class LogParser:
def __init__(self, indir, outdir, groupNum, log_format, rex=[], seed=0):
self.para = Para(path=indir, rex=rex, savePath=outdir, groupNum=groupNum, logformat=log_format)
self.wordLL = []
self.loglineNum = 0
self.termpairLLT = []
self.logNumPerGroup = []
self.groupIndex = dict() # each line corresponding to which group
self.termPairLogNumLD = []
self.logIndexPerGroup = []
self.seed = seed
def loadLog(self):
""" Load datasets and use regular expression to split it and remove some columns
"""
print('Loading logs...')
headers, regex = self.generate_logformat_regex(self.para.logformat)
self.df_log = self.log_to_dataframe(os.path.join(self.para.path, self.logname), regex, headers,
self.para.logformat)
for idx, line in self.df_log.iterrows():
line = line['Content']
if self.para.rex:
for currentRex in self.para.rex:
line = re.sub(currentRex, '', line)
|
wordSeq = line.strip().split()
self.wordLL.append(tuple(wordSeq))
def termpairGene(self):
print('Generating term pairs...')
i = 0
for wordL in self.wordLL:
wordLT = []
for j in range(len(wordL)):
for k in range(j + 1, len(wordL), 1):
if wordL[j] != '[$]' and wordL[k] != '[$]':
termpair = (wordL[j], wordL[k])
wordLT.append(termpair)
self.termpairLLT.append(wordLT)
i += 1
# termPairLogNumLD, used to account the occurrence of each termpair of each group
for i in range(self.para.groupNum):
newDict = dict()
self.termPairLogNumLD.append(newDict)
# initialize the item value to zero
self.logNumPerGroup.append(0)
# divide logs into initial groupNum groups randomly, the group number of each log is stored in the groupIndex
self.loglineNum = len(self.wordLL)
random.seed(self.seed)
for i in range(self.loglineNum):
ran = random.randint(0, self.para.groupNum - 1) # group number from 0 to k-1
self.groupIndex[i] = ran
self.logNumPerGroup[ran] += 1 # count the number of loglines per group
# count the frequency of each termpairs per group
i = 0
for termpairLT in self.termpairLLT:
j = 0
for key in termpairLT:
currGroupIndex = self.groupIndex[i]
if key not in self.termPairLogNumLD[currGroupIndex]:
self.termPairLogNumLD[currGroupIndex][key] = 1
else:
self.termPairLogNumLD[currGroupIndex][key] += 1
j += 1
i += 1
def LogMessParti(self):
""" Use local search, for each log, find the group that it should be moved to.
in this process, termpairs occurange should also make some changes and logNumber
of corresponding should be changed
"""
print('Log message partitioning...')
changed = True
while changed:
changed = False
i = 0
for termpairLT in self.termpairLLT:
curGroup = self.groupIndex[i]
alterGroup = potenFunc(curGroup, self.termPairLogNumLD, self.logNumPerGroup, i, termpairLT,
self.para.groupNum)
if curGroup != alterGroup:
changed = True
self.groupIndex[i] = alterGroup
# update the dictionary of each group
for key in termpairLT:
# minus 1 from the current group count on this key
self.termPairLogNumLD[curGroup][key] -= 1
if self.termPairLogNumLD[curGroup][key] == 0:
del self.termPairLogNumLD[curGroup][key]
# add 1 to the alter group
if key not in self.termPairLogNumLD[alterGroup]:
self.termPairLogNumLD[alterGroup][key] = 1
else:
self.termPairLogNumLD[alterGroup][key] += 1
self.logNumPerGroup[curGroup] -= 1
self.logNumPerGroup[alterGroup] += 1
i += 1
def signatConstr(self):
""" Calculate the occurancy of each word of each group, and for each group, save the words that
happen more than half all log number to be candidateTerms(list of dict, words:frequency),
"""
print('Log message signature construction...')
# create the folder to save the resulted templates
if not os.path.exists(self.para.savePath):
os.makedirs(self.para.savePath)
wordFreqPerGroup = []
candidateTerm = []
candidateSeq = []
self.signature = []
# save the all the log indexs of each group: logIndexPerGroup
for t in range(self.para.groupNum):
dic = dict()
newlogIndex = []
newCandidate = dict()
wordFreqPerGroup.append(dic)
self.logIndexPerGroup.append(newlogIndex)
candidateSeq.append(newCandidate)
# count the occurence of each word of each log per group
# and save into the wordFreqPerGroup, which is a list of dictionary,
# where each dictionary represents a group, key is the word, value is the occurence
lineNo = 0
for wordL in self.wordLL:
groupIndex = self.groupIndex[lineNo]
self.logIndexPerGroup[groupIndex].append(lineNo)
for key in wordL:
if key not in wordFreqPerGroup[groupIndex]:
wordFreqPerGroup[groupIndex][key] = 1
else:
wordFreqPerGroup[groupIndex][key] += 1
lineNo += 1
# calculate the halfLogNum and select those words whose occurence is larger than halfLogNum
# as constant part and save into candidateTerm
for i in range(self.para.groupNum):
halfLogNum = math.ceil(self.logNumPerGroup[i] / 2.0)
dic = dict((k, v) for k, v in wordFreqPerGroup[i].items() if v >= halfLogNum)
candidateTerm.append(dic)
# scan each logline's each word that also is a part of candidateTerm, put these words together
# as a new candidate sequence, thus, each raw log will have a corresponding candidate sequence
# and count the occurence of these candidate sequence of each group and select the most frequent
# candidate sequence as the signature, i.e. the templates
lineNo = 0
for wordL in self.wordLL:
curGroup = self.groupIndex[lineNo]
newCandiSeq = []
for key in wordL:
if key in candidateTerm[curGroup]:
newCandiSeq.append(key)
keySeq = tuple(newCandiSeq)
if keySeq not in candidateSeq[curGroup]:
candidateSeq[curGroup][keySeq] = 1
else:
candidateSeq[curGroup][keySeq] += 1
lineNo += 1
for i in range(self.para.groupNum):
sig = max(candidateSeq[i].items(), key=operator.itemgetter(1))[0]
|
|
diogocs1/comps
|
web/addons/email_template/wizard/email_template_preview.py
|
Python
|
apache-2.0
| 3,851
| 0.002597
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "email.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('email.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).d
|
efault_get(cr, uid, fields, context=context)
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a D
|
efault
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
crazcalm/PyTN_talk_proposal
|
recipies/recipe1/fib.py
|
Python
|
mit
| 2,169
| 0.004149
|
"""
The Fibonacci Sequence is the series of numbers:
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, ...
The next number is found by adding up the two numbers before it.
The 2 is found by adding the two numbers before it (1+1)
Similarly, the 3 is found by adding the two numbers before it (1+2),
And the 5 is (2+3),
and so on!
"""
"""
Using lists to return a list of fib numbers
"""
def fib1(limit=10):
"""
Returns a list of fib numbers
"""
nth_number = limit
if limit <= 1:
answer = [0]
elif limit == 2:
answer = [0,1]
else:
fib_num = [0,1]
while len(fib_num) < nth_number:
fib1 = fib_num[len(fib_num)-2]
fib2 = fib_num[len(fib_num)-1]
fib3 = fib2 + fib1
fib_num.append(fib3)
answer = fib_num
return answer
"""
How to return a specific fib number.
"""
def fib2(nth_num=10):
"""
Returns the nth fib number
"""
# Base cases
fib1 = 0
fib2 = 1
if nth_num <= 1:
answer = fib1
elif nth_num == 2:
answer = fib2
else:
current_fib = 2
while nth_num - current_fib
|
> 0:
fib1, fib2
|
= fib2, fib1 + fib2
current_fib = current_fib + 1
answer = fib2
return answer
"""
Solve with generators
"""
def fib3(nth_num=10):
"""
A generator that yields fib numbers
"""
# Base case
fib1 = 0
fib2 = 1
if nth_num <= 1:
yield fib1
elif nth_num == 2:
yield fib1
yield fib2
else:
yield fib1
yield fib2
current_fib = 2
while nth_num - current_fib > 0:
fib1, fib2 = fib2, fib1 + fib2
yield fib2
current_fib = current_fib + 1
def fib_list(limit=10):
answer = []
for fib_num in fib3(limit):
answer.append(fib_num)
return answer
def nth_fib_num(nth_num=10):
answer = 0
for fib_num in fib3(nth_num):
answer = fib_num
return answer
if __name__ == "__main__":
print(fib1(10))
print(fib2(10))
print(fib_list(10))
print(nth_fib_num(10))
|
chipaca/snapcraft
|
tests/unit/plugins/v1/python/test_sitecustomize.py
|
Python
|
gpl-3.0
| 5,261
| 0.00076
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017,2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from textwrap import dedent
from testtools.matchers import Contains, FileContains
from snapcraft.plugins.v1 import _python
from ._basesuite import PythonBaseTestCase
def _create_site_py(base_dir):
site_py = os.path.join(base_dir, "usr", "lib", "pythontest", "site.py")
os.makedirs(os.path.dirname(site_py))
open(site_py, "w").close()
def _create_user_site_packages(base_dir):
user_site_dir = os.path.join(base_dir, "lib", "pythontest", "site-packages")
os.makedirs(user_site_dir)
class SiteCustomizeTestCase(PythonBaseTestCase):
def setUp(self):
super().setUp()
self.expected_sitecustomize = dedent(
"""\
import site
import os
snap_dir = os.getenv("SNAP")
snapcraft_stage_dir = os.getenv("SNAPCRAFT_STAGE")
snapcraft_part_install = os.getenv("SNAPCRAFT_PART_INSTALL")
# Do not include snap_dir during builds as this will include
# snapcraft's in-snap site directory.
if snapcraft_stage_dir is not None and snapcraft_part_install is not None:
site_directories = [snapcraft_stage_dir, snapcraft_part_install]
else:
site_directories = [snap_dir]
for d in site_directories:
if d:
site_dir = os.path.join(d, "lib/pythontest/site-packages")
site.addsitedir(site_dir)
if snap_dir:
site.ENABLE_USER_SITE = False"""
)
def test_generate_sitecustomize_staged(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the staging area
self._create_python_binary(stage_dir)
# Create a site.py in both staging and install areas
_create_site_py(stage_dir)
_create_site_py(install_dir)
# Create a user site dir in install area
_create_user_site_packages(install_dir)
_python.generate_sitecustomize(
"test", stage_dir=stage_dir, install_dir=install_dir
)
site_path = os.path.join(
install_dir, "usr", "lib", "pythontest", "sitecustomize.py"
)
self.assertThat(site_path, FileContains(self.expected_sitecustomize))
def test_generate_sitecustomize_installed(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the installed area
self._create_python_binary(install_dir)
# Create a site.py in both staging and install areas
_create_site_py(stage_dir)
_create_site_py(install_dir)
# Create a user site dir in install area
_create_user_site_packages(install_dir)
_python.generate_sitecustomize(
"test", stage_dir=stage_dir, install_dir=install_dir
)
site_path = os.path.join(
install_dir, "usr", "lib", "pythontest", "sitecustomize.py"
)
self.assertThat(site_path, FileContains(self.expected_sitecustomize))
def test_generate_sitecustomize_missing_user_site_raises(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the installed area
self._create_python_binary(install_dir)
# Create a site.py in both staging and install areas
_create_site_py(stage_dir)
_create_site_py(install_dir)
# Do NOT create a user site dir, and attempt to generate sitecustomize.
raised = self.assertRaises(
_python.errors.MissingUserSitePackagesError,
_python.generate_sitecustomize,
"test",
stage_dir=stage_dir,
install_dir=install_dir,
)
self.assertThat(str(raised), Contains("Unable to find user site packages"))
def test_generate_sitecustomize_missing_site_py_raises(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary
|
in the staging area
self._create_python_binary(stage_dir)
# Create a site.py, but only in install area (not staging area)
_create_site_py(install_dir)
# Create a user site dir in install area
_create_user_site_packages(install_dir)
raised = self.assertRaises(
_python.errors.MissingSitePyError,
_python.generate_sitecustomize,
"test",
|
stage_dir=stage_dir,
install_dir=install_dir,
)
self.assertThat(str(raised), Contains("Unable to find site.py"))
|
thaim/ansible
|
lib/ansible/plugins/callback/syslog_json.py
|
Python
|
mit
| 3,681
| 0.004075
|
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: syslog_json
callback_type: notification
requirements:
- whitelist in configuration
short_description: sends JSON events to syslog
version_added: "1.9"
description:
- This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format
- Before 2.9 only environment variables were available for configuration
options:
server:
description: syslog server that will receive the event
env:
- name: SYSLOG_SERVER
default: localhost
ini:
- section: callback_syslog_json
key: syslog_server
port:
description: port on which the syslog server is listening
env:
- name: SYSLOG_PORT
default: 514
ini:
- section: callback_syslog_json
key: syslog_port
facility:
description: syslog facility to log as
env:
- name: SYSLOG_FACILITY
default: user
ini:
- section: callback_syslog_json
key: syslog_facility
'''
import os
import json
import logging
import logging.handlers
import socket
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
logs ansible-playbook and ansib
|
le runs to a syslog server in json format
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'syslog_json'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.set_options()
syslog_host = self.get_option("server")
syslog_port = int(self.get_option("port"))
syslog_facility = self.get_option("facility")
self.logger = logging.getLogger('ansibl
|
e logger')
self.logger.setLevel(logging.DEBUG)
self.handler = logging.handlers.SysLogHandler(
address=(syslog_host, syslog_port),
facility=syslog_facility
)
self.logger.addHandler(self.handler)
self.hostname = socket.gethostname()
def runner_on_failed(self, host, res, ignore_errors=False):
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def runner_on_ok(self, host, res):
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def runner_on_skipped(self, host, item=None):
self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
def runner_on_unreachable(self, host, res):
self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def runner_on_async_failed(self, host, res, jid):
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def playbook_on_import_for_host(self, host, imported_file):
self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)
|
myadventure/myadventure-api
|
app/models/__init__.py
|
Python
|
apache-2.0
| 27
| 0
|
"""
Initialize models
"""
| ||
zmap/ztag
|
ztag/annotations/FtpCesarFtpd.py
|
Python
|
apache-2.0
| 1,652
| 0.003632
|
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpCesarFtpd(Annotation):
protoc
|
ol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
impl_re = re.compile("^220[- ]CesarFTP 0\.\d+", re.IGNORECASE)
version_re = re.compile("CesarFTP (\d+\.\d+)([a-z])?", re.IGNORECASE)
tests = {
"FtpCesarFtpd_1": {
"glo
|
bal_metadata": {
"os": OperatingSystem.WINDOWS,
},
"local_metadata": {
"product": "Cesar FTP",
"version": "0.99",
"revision": "g"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
if self.impl_re.search(banner):
meta.global_metadata.os = OperatingSystem.WINDOWS
meta.local_metadata.product = "Cesar FTP"
version = self.version_re.search(banner).group(1)
meta.local_metadata.version = version
rev = self.version_re.search(banner).group(2)
meta.local_metadata.revision = rev
return meta
""" Tests
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"220 CesarFTP 0.99g Server Welcome !\r\n"
"""
|
kmaiti/AWSAutoScalingWithF5andCloudFormation
|
aws-autoscale-ec2-instance-modify.py
|
Python
|
gpl-3.0
| 6,954
| 0.012511
|
#!/usr/bin/env python
"""
Purpose : Extract next sequence number of auto-scaled instance and set new tag to self instance. Script will be running from new instance.
will take input from command line instead of from json file
Future Plan :
will associate instance to a role based IAM profile
Usage :
python ec2-autoscale-instance-modify.py -a <your aws access_key> -s <aws secret key> -g <auto scale group that used in cloudformation file> -r <region> -n <min_server_number> -c <customer> -t <uat/plab/prod> -p <appname> -d <domainname ie example.net>
"""
__author__ = "kama maiti"
__copyright__ = "Copyright 2016, AWS autoscaled instance tag modification project"
__credits__ = ["kamal maiti"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "kamal maiti"
__email__ = "kamal.maiti@gmail.com"
__status__ = "production/Non-production"
import re
import argparse
import boto.ec2.autoscale
from boto.ec2 import EC2Connection
import shlex, subprocess
akey = ""
skey = ""
ag = ""
rg = ""
min_num = ""
def find_server_number(str):
#Assuming first match only with consecutive three digits
match = []
match = re.findall(r'\d\d\d', str)
if match:
return match #will return a list containg server number
else:
return match #will return blank list
def main():
arg_parser = argparse.ArgumentParser(description='Read autoscale instance')
arg_parser.add_argument('-a', dest='akey',help='Provide AWS_ACCESS_KEY')
arg_parser.add_argument('-s', dest='skey',help='Provide AWS_SECRET_ACCESS_KEY')
arg_parser.add_argument('-g', dest='ag',help='Provide User provided autoscale group name')
arg_parser.add_argument('-r', dest='rg',help='Provide region name')
arg_parser.add_argument('-n', dest='min_num',help='Minimum Server name')
arg_parser.add_argument('-c', dest='customer',help='Name of the customer in short')
arg_parser.add_argument('-t', dest='servertype',help='Type of the server ie prod or uat or plab')
arg_parser.add_argument('-p', dest='purpose',help='Purpose of the Server')
arg_parser.add_argument('-d', dest='domain',help='Domain name that will be appended to server name')
args = arg_parser.parse_args()
#print(args)
access_key = args.akey
secret_key = args.skey
region = args.rg
group_name = str(args.ag)
min_server_num = int(args.min_num)
customer = str(args.customer)
servertype = str(args.servertype)
purpose = str(args.purpose)
domain = str(args.domain)
#created two objects below. One for autocale connection and another for ec2 instance
as_conn = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
try:
groups = as_conn.get_all_groups()
all_groups = [group.name for group in groups]
for g in all_groups:
if group_name in g: #searching autocaling group that we are concerned with. Note all autoscalling group name should be unique
FOUND_GROUP = g #FOUND_GROUP will save exact AG name. Note that exact AG name is not same as user provided name. It'll check if group_name is subset of g
FOUND_GROUP_WITH_DES = as_conn.get_all_groups(names=[FOUND_GROUP])[0]
instance_ids = [i.instance_id for i in FOUND_GROUP_WITH_DES.instances]
#reservations = ec2_conn.get_all_instances(instance_ids)
instances = ec2_conn.get_only_instances(instance_ids)
#instances = [i for r in reservations for i in r.instances]
lNameTag = []
#collect all tags of all instances and sort Name tags and save them in list.
for i,j in enumerate(instances):
a = instances[i].tags
lNameTag.append(a['Name'])
#process each instances and take their server number in one list
lServerNum = []
if not lNameTag: #checking if list is empty or not. If empty then this is first instance whose server num will be min_server_num
next_number = min_server_num
else:
for server in lNameTag: #iterating each value of "Name" tag
if not find_server_number(server): #if method find_server_number returns null list
next_number = min_server_num
else:
val = find_server_number(server) #got value like [u'101']. Below comand will remove [],' and u
actual_num=str(val).strip('[]').strip('u').strip('\'')
lServerNum.append(int(actual_num)) #actual_num is string, need to convert to int
if not lServerNum: #check if list of server number is blank or not
next_number = min_server_num
else:
maximum_number = max(lServerNum) #used max function to find out maximum number in the list
next_number = maximum_number + 1
#Now we need to save this next_number in a file so that we can collect it and send to other commands.
with open('/tmp/serverno','w') as fd: #created a file and save the number as string. Then read it and used later
fd.write(str(next_number))
with open('/tmp/serverno','r') as fd:
num=fd.read()
#Will modify tag of current instance. Let's build a new tag.
delm = "-" #Delimeter that will be used to join multiple string
seq = ( customer, servertype, purpose, num, domain) #created a tuple
new_tag = delm.join(seq) #joined tuple strings
with open('/tmp/nodename','w') as fd:
fd.write(str(new
|
_tag))
#will extract current instance ID using curl. ie curl http://169.254.169.254/latest/meta-data/instance-id
#
cmd = 'curl http://169.254.169.254/latest/meta-data/instance-id'
#shlex is simple lexical analyser for sp
|
litting a large string into tokens
args = shlex.split(cmd) #args will have value like : ['curl', 'http://169.254.169.254/latest/meta-data/instance-id']
output,error = subprocess.Popen(args,stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() #out and error are saved in variable. communicate will execute comamnd
#o="i-fd96291f" #used for testing
cur_instance_reservation = ec2_conn.get_all_instances(instance_ids=output)
cur_instance = cur_instance_reservation[0].instances[0]
cur_instance.add_tag('Name', new_tag)
finally:
as_conn.close()
ec2_conn.close()
if __name__ == '__main__':
main()
|
cwyark/micropython
|
tests/cpydiff/types_bytes_subscrstep.py
|
Python
|
mit
| 145
| 0
|
"""
|
categories: Types,bytes
description: Bytes subscr with st
|
ep != 1 not implemented
cause: Unknown
workaround: Unknown
"""
print(b'123'[0:3:2])
|
back-to/streamlink
|
tests/test_plugins.py
|
Python
|
bsd-2-clause
| 1,609
| 0.000622
|
import imp
import os.path
import pkgutil
import six
import unittest
import streamlink.plugins
from streamlink import Streamlink
class PluginTestMeta(type):
def __new__(mcs, name, bases, dict):
plugin_path = os.path.dirname(streamlink.plugins.__file__)
plugins = []
for loader, pname, ispkg in pkgutil.iter_modules([plugin_path]):
file, pathname, desc = imp.find_module(pname, [plugin_path])
module = imp.load_module(pname, file, pathname, desc)
if hasattr(module, "__plugin__"):
plugins.append((pname))
|
session = Streamlink()
def gentest(pname):
def load_plugin_test(self):
# Reset file variable to ensure it is still open when doing
# load_plugin else python might open the plugin source .py
# using ascii encoding instead of utf-8.
# See also open() call here: imp._HackedGetData.get_d
|
ata
file, pathname, desc = imp.find_module(pname, [plugin_path])
session.load_plugin(pname, file, pathname, desc)
# validate that can_handle_url does not fail
session.plugins[pname].can_handle_url("http://test.com")
return load_plugin_test
for pname in plugins:
dict['test_{0}_load'.format(pname)] = gentest(pname)
return type.__new__(mcs, name, bases, dict)
@six.add_metaclass(PluginTestMeta)
class TestPlugins(unittest.TestCase):
"""
Test that each plugin can be loaded and does not fail when calling can_handle_url.
"""
|
iSTB/python-schemata
|
rst_cleaner.py
|
Python
|
mit
| 1,813
| 0.006067
|
#!/usr/bin/python3
"""
Cleans-up Sphinx-only constructs (ie from README.rst),
so that *PyPi* can format it properly.
To check for remaining errors, install ``sphinx`` and run::
python setup.py --long-description | sed -file 'this_file.sed' | rst2html.py --halt=warning
"""
impor
|
t re
import sys, io
def yield_sphinx_only_markup(lines):
"""
:param file_inp: a `filename` or ``sys.stdin``?
:param file_out: a `filename` or ``sys.stdout`?`
|
"""
substs = [
## Selected Sphinx-only Roles.
#
(r':abbr:`([^`]+)`', r'\1'),
(r':ref:`([^`]+)`', r'`\1`_'),
(r':term:`([^`]+)`', r'**\1**'),
(r':dfn:`([^`]+)`', r'**\1**'),
(r':(samp|guilabel|menuselection):`([^`]+)`', r'``\2``'),
## Sphinx-only roles:
# :foo:`bar` --> foo(``bar``)
# :a:foo:`bar` XXX afoo(``bar``)
#
#(r'(:(\w+))?:(\w+):`([^`]*)`', r'\2\3(``\4``)'),
(r':(\w+):`([^`]*)`', r'\1(``\2``)'),
## Sphinx-only Directives.
#
(r'\.\. doctest', r'code-block'),
(r'\.\. plot::', r'.. '),
(r'\.\. seealso', r'info'),
(r'\.\. glossary', r'rubric'),
(r'\.\. figure::', r'.. '),
## Other
#
(r'\|version\|', r'x.x.x'),
]
regex_subs = [ (re.compile(regex, re.IGNORECASE), sub) for (regex, sub) in substs ]
def clean_line(line):
try:
for (regex, sub) in regex_subs:
line = regex.sub(sub, line)
except Exception as ex:
print("ERROR: %s, (line(%s)"%(regex, sub))
raise ex
return line
for line in lines:
yield clean_line(line)
|
espressopp/espressopp
|
testsuite/AdResS/TDforce/test_TDforce.py
|
Python
|
gpl-3.0
| 16,457
| 0.010634
|
#!/usr/bin/env python
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestTDforce(unittest.TestCase):
def setUp(self):
# set up system
system = espressopp.System()
box = (10, 10, 10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=system.skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=system.skin)
system.storage = espressopp.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
self.system = system
def test_slab(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(8.5, 5.0, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(9.5, 5.0, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.a
|
ddTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=1.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)
# initialize lambda values
|
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# set up TD force
thdforce = espressopp.integrator.TDforce(self.system,vl)
thdforce.addForce(itype=3,filename="table_tf.tab",type=1)
integrator.addExtension(thdforce)
# x coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]
# run ten steps
integrator.run(10)
# x coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[0] for i in range(1,6)]
# run checks (only one particle is in hybrid region, should feel the thermodynamic force, and should hence move along the x direction)
self.assertEqual(before[0], after[0])
self.assertAlmostEqual(after[1], 6.596913, places=5)
self.assertEqual(before[2], after[2])
self.assertEqual(before[3], after[3])
self.assertEqual(before[4], after[4])
def test_fixed_sphere(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.0, 5.5, 5.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(5.0, 6.5, 5.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(5.0, 7.5, 5.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(5.0, 8.5, 5.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(5.0, 9.5, 5.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=1.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=True)
# initialize lambda values
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system,vl,ftpl)
integrator.addExtension(adress)
espressopp.tools.AdressDecomp(self.system, integrator)
# set up TD force
thdforce = espressopp.integrator.TDforce(self.system,vl)
thdforce.addForce(itype=3,filename="table_tf.tab",type=1)
integrator.addExtension(thdforce)
# y coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]
# run ten steps
integrator.run(10)
# y coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[1] for i in range(1,6)]
# run checks (as test before, just that particles should move in y-direction given the new setup and the spherical adaptive resolution geometry)
self.assertEqual(before[0], after[0])
self.assertAlmostEqual(after[1], 6.596913, places=5)
self.assertEqual(before[2], after[2])
self.assertEqual(before[3], after[3])
self.assertEqual(before[4], after[4])
def test_single_moving_sphere(self):
# add some particles
particle_list = [
(1, 1, 0, espressopp.Real3D(5.0, 5.0, 5.0), espressopp.Real3D(0.0, 1.0, 0.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(5.0, 6.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(5.0, 7.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 0),
(4, 1, 0, espressopp.Real3D(5.0, 8.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 0),
(5, 1, 0, espressopp.Real3D(5.0, 9.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 0),
(6, 0, 0, espressopp.Real3D(5.0, 5.0, 5.0), espressopp.Real3D(0.0, 1.0, 0.0), 1.0, 1),
(7, 0, 0, espressopp.Real3D(5.0, 6.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 1),
(8, 0, 0, espressopp.Real3D(5.0, 7.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 1),
(9, 0, 0, espressopp.Real3D(5.0, 8.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 1),
(10, 0, 0, espressopp.Real3D(5.0, 9.5, 5.0), espressopp.Real3D(0.0, 0.0, 0.0), 1.0, 1),
]
tuples = [(1,6),(2,7),(3,8),(4,9),(5,10)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'v', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=1.0, dHy=1.0, pids=[1], sphereAdr=True)
|
dimagi/commcare-hq
|
corehq/apps/receiverwrapper/tests/test_submit_errors.py
|
Python
|
bsd-3-clause
| 14,436
| 0.00194
|
import contextlib
import logging
import os
import tempfile
from django.db.utils import InternalError
from django.test import TestCase
from django.test.client import Client
from django.urls import reverse
from botocore.exceptions import ConnectionClosedError
from unittest.mock import patch
from casexml.apps.case.exceptions import IllegalCaseId
from couchforms.models import UnfinishedSubmissionStub
from couchforms.openrosa_response import ResponseNature
from couchforms.signals import successful_form_received
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import WebUser
from corehq.blobs import get_blob_db
from corehq.const import OPENROSA_VERSION_2, OPENROSA_VERSION_3
from corehq.form_processor.models import CommCareCase, XFormInstance
from corehq.form_processor.tests.utils import (
FormProcessorTestUtils,
sharded,
)
from corehq.middleware import OPENROSA_VERSION_HEADER
from corehq.util.test_utils import TestFileMixin, flag_enabled, capture_log_output
FORM_WITH_CASE_ID = 'ad38211be256653bceac8e2156475666'
def tmpfile(mode='w', *args, **kwargs):
fd, path = tempfile.mkstemp(*args, **kwargs)
return (os.fdopen(fd, mode), path)
@sharded
class SubmissionErrorTest(TestCase, TestFileMixin):
file_path = ('data',)
root = os.path.dirname(__file__)
@classmethod
def setUpClass(cls):
super(SubmissionErrorTest, cls).setUpClass()
cls.domain = create_domain("submit-errors")
cls.couch_user = WebUser.create(None, "test", "foobar", None, None)
cls.couch_user.add_domain_membership(cls.domain.name, is_admin=True)
cls.couch_user.save()
|
cls.client = Client()
cls.client.login(**{'username': 'test', 'password': 'foobar'})
cls.url = reverse("receiver_post", args=[cls.domain])
@classmethod
def tearDownClass(cls):
cls.couch_user.delete(cls.domain.name, deleted_by=None)
cls.domain.delete()
super(SubmissionErrorTest, cls).tearDownClass()
def setUp(self):
FormProcessorTestUtils.delete_al
|
l_xforms(self.domain.name)
def tearDown(self):
FormProcessorTestUtils.delete_all_cases_forms_ledgers(self.domain.name)
UnfinishedSubmissionStub.objects.all().delete()
def _submit(self, formname, open_rosa_header=None):
open_rosa_header = open_rosa_header or OPENROSA_VERSION_2
file_path = self.get_path(formname, '')
with open(file_path, "rb") as f:
res = self.client.post(
self.url,
{"xml_submission_file": f},
** {OPENROSA_VERSION_HEADER: open_rosa_header}
)
return file_path, res
def testSubmitBadAttachmentType(self):
res = self.client.post(self.url, {
"xml_submission_file": "this isn't a file"
})
self.assertEqual(400, res.status_code)
#self.assertIn("xml_submission_file", res.content)
def testSubmitDuplicate(self):
file, res = self._submit('simple_form.xml')
self.assertEqual(201, res.status_code)
self.assertIn(" √ ".encode('utf-8'), res.content)
file, res = self._submit('simple_form.xml')
self.assertEqual(201, res.status_code)
_, res_openrosa3 = self._submit('simple_form.xml', open_rosa_header=OPENROSA_VERSION_3)
self.assertEqual(201, res_openrosa3.status_code)
self.assertIn("Form is a duplicate", res.content.decode('utf-8'))
# make sure we logged it
[log] = XFormInstance.objects.get_forms_by_type(self.domain.name, 'XFormDuplicate', limit=1)
self.assertIsNotNone(log)
self.assertIn("Form is a duplicate", log.problem)
with open(file, 'rb') as f:
self.assertEqual(f.read(), log.get_xml())
def _test_submission_error_post_save(self, openrosa_version):
evil_laugh = "mwa ha ha!"
with failing_signal_handler(evil_laugh):
file, res = self._submit("simple_form.xml", openrosa_version)
if openrosa_version == OPENROSA_VERSION_3:
self.assertEqual(422, res.status_code)
self.assertIn(ResponseNature.POST_PROCESSING_FAILURE.encode('utf-8'), res.content)
else:
self.assertEqual(201, res.status_code)
self.assertIn(ResponseNature.SUBMIT_SUCCESS.encode('utf-8'), res.content)
form_id = 'ad38211be256653bceac8e2156475664'
form = XFormInstance.objects.get_form(form_id, self.domain.name)
self.assertTrue(form.is_normal)
self.assertTrue(form.initial_processing_complete)
stubs = UnfinishedSubmissionStub.objects.filter(
domain=self.domain, xform_id=form_id, saved=True
).all()
self.assertEqual(1, len(stubs))
def test_submission_error_post_save_2_0(self):
self._test_submission_error_post_save(OPENROSA_VERSION_2)
def test_submission_error_post_save_3_0(self):
self._test_submission_error_post_save(OPENROSA_VERSION_3)
# make sure that a re-submission has the same response
self._test_submission_error_post_save(OPENROSA_VERSION_3)
def _test_submit_bad_data(self, bad_data):
f, path = tmpfile(mode='wb', suffix='.xml')
with f:
f.write(bad_data)
with open(path, 'rb') as f:
with capture_log_output('', logging.WARNING) as logs:
res = self.client.post(self.url, {
"xml_submission_file": f
})
self.assertEqual(422, res.status_code)
self.assertIn('Invalid XML', res.content.decode('utf-8'))
# make sure we logged it
[log] = XFormInstance.objects.get_forms_by_type(self.domain.name, 'SubmissionErrorLog', limit=1)
self.assertIsNotNone(log)
self.assertIn('Invalid XML', log.problem)
self.assertEqual(bad_data, log.get_xml())
self.assertEqual(log.form_data, {})
return logs.get_output()
def test_submit_bad_xml(self):
log_output = self._test_submit_bad_data(b'\xad\xac\xab\xd36\xe1\xab\xd6\x9dR\x9b')
self.assertRegexpMatches(log_output, r"Problem receiving submission.*")
def test_submit_bad_device_log(self):
log_output = self._test_submit_bad_data(
"malformed xml dvice log</log></log_subreport></device_report>".encode('utf8')
)
self.assertRegexpMatches(log_output, r"Badly formed device log.*")
def test_missing_xmlns(self):
file, res = self._submit('missing_xmlns.xml')
self.assertEqual(422, res.status_code)
message = "Form is missing a required field: XMLNS"
self.assertIn(message, res.content.decode('utf-8'))
# make sure we logged it
[log] = XFormInstance.objects.get_forms_by_type(self.domain.name, 'SubmissionErrorLog', limit=1)
self.assertIsNotNone(log)
self.assertIn(message, log.problem)
with open(file, 'rb') as f:
self.assertEqual(f.read(), log.get_xml())
@flag_enabled('DATA_MIGRATION')
def test_data_migration(self):
file, res = self._submit('simple_form.xml')
self.assertEqual(503, res.status_code)
message = "Service Temporarily Unavailable"
self.assertIn(message, res.content.decode('utf-8'))
def test_error_saving_normal_form(self):
sql_patch = patch(
'corehq.form_processor.backends.sql.processor.FormProcessorSQL.save_processed_models',
side_effect=InternalError
)
with sql_patch:
with self.assertRaises(InternalError):
_, res = self._submit('form_with_case.xml')
stubs = UnfinishedSubmissionStub.objects.filter(
domain=self.domain, saved=False, xform_id=FORM_WITH_CASE_ID
).all()
self.assertEqual(1, len(stubs))
form = XFormInstance.objects.get_form(FORM_WITH_CASE_ID, self.domain)
self.assertTrue(form.is_error)
self.assertTrue(form.initial_processing_complete)
def _test_case_processing_error(self, openrosa_version):
with patch('casexml.apps.case.xform._get_or_upda
|
tinyogre/zklock
|
zklocktest.py
|
Python
|
lgpl-3.0
| 1,271
| 0.010228
|
#
# To test this, first have a runnin
|
g l
|
ocal zookeeper installation
# (See http://zookeeper.apache.org/)
# Next, open a couple of shells
# Run this in the first one, watch the output. It will create a lock and hold it for 20 seconds.
# Run it again in the second one, watch that it doesn't acquire the lock until the fist instance exits,
# and then holds the lock itself for 20 seconds after acquiring it.
#
# You can speed things up by killing the first instance after you
# start the second. The second should immediately acquire the lock.
#
import zklock, time
# You can specify a host to connect(). The default is localhost and
# the default ZooKeeper port.
zklock.connect()
# This creates a global lock named 'test'. Any other zklock connected
# to the same ZooKeeper instance and trying to create a lock of the
# same name will be blocked while this program holds the lock named 'test'
z = zklock.Lock('test')
try:
if z.acquire():
print "zklocktest: Lock acquired"
time.sleep(20)
z.release()
except:
z.release()
with zklock.ScopedLock("scoped_lock_test", block=False) as z:
if z.acquired:
print "Locked!"
time.sleep(20)
else:
print "Could not obtain lock!"
print "zklocktest: Exiting"
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/ApiClient.py
|
Python
|
apache-2.0
| 11,500
| 0.004609
|
#!/usr/bin/env python
"""Wordnik.com's Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates."""
import sys
import os
import re
import urllib.request, urllib.parse, urllib.error
import http.client
import json
import datetime
import mimetypes
import base64
from .models import *
from groupdocs.FileStream import FileStream
from groupdocs import version
class RequestSigner(object):
def __init__(self):
if type(self) == RequestSigner:
raise Exception("RequestSigner is an abstract class and cannot be instantiated.")
def signUrl(self, url):
raise NotImplementedError
def signContent(self, requestBody, headers):
raise NotImplementedError
class DefaultRequestSigner(RequestSigner):
def signUrl(self, url):
return url
def signContent(self, requestBody, headers):
return requestBody
class ApiClient(object):
"""Generic API client for Swagger client library builds"""
def __init__(self, requestSigner=None):
self.signer = requestSigner if requestSigner != None else DefaultRequestSigner()
self.cookie = None
self.headers = {'Groupdocs-Referer': '/'.join((version.__pkgname__, version.__version__))}
self.__debug = False
def setDebug(self, flag, logFilepath=None):
self.__debug = flag
self.__logFilepath = logFilepath
def addHeaders(self, **headers):
self.headers = headers
def callAPI(self, apiServer, resourcePath, method, queryPara
|
ms, postData,
|
headerParams=None, returnType=str):
if self.__debug and self.__logFilepath:
stdOut = sys.stdout
logFile = open(self.__logFilepath, 'a')
sys.stdout = logFile
url = apiServer + resourcePath
headers = {}
if self.headers:
for param, value in self.headers.items():
headers[param] = value
if headerParams:
for param, value in headerParams.items():
headers[param] = value
isFileUpload = False
if not postData:
headers['Content-type'] = 'text/html'
elif isinstance(postData, FileStream):
isFileUpload = True
if postData.contentType:
headers['Content-type'] = 'application/octet-stream'
if postData.size:
headers['Content-Length'] = str(postData.size)
else:
headers['Content-type'] = 'application/json'
if self.cookie:
headers['Cookie'] = self.cookie
data = None
if queryParams:
# Need to remove None values, these should not be sent
sentQueryParams = {}
for param, value in queryParams.items():
if value != None:
sentQueryParams[param] = value
if sentQueryParams:
url = url + '?' + urllib.parse.urlencode(sentQueryParams)
if method in ['POST', 'PUT', 'DELETE']:
if isFileUpload:
data = postData.inputStream
elif not postData:
data = ""
elif type(postData) not in [str, int, float, bool]:
data = self.signer.signContent(json.dumps(self.sanitizeForSerialization(postData)), headers)
else:
data = self.signer.signContent(postData, headers)
if self.__debug:
http.client.HTTPConnection.debuglevel = 1
if data and not isFileUpload:
data = data.encode('utf-8')
request = MethodRequest(method=method, url=self.encodeURI(self.signer.signUrl(url)), headers=headers,
data=data)
try:
# Make the request
response = urllib.request.urlopen(request)
if 'Set-Cookie' in response.headers:
self.cookie = response.headers['Set-Cookie']
if response.code == 200 or response.code == 201 or response.code == 202:
if returnType == FileStream:
fs = FileStream.fromHttp(response)
if self.__debug: print("\n", "< Response Body:\n", ">>>stream info: fileName=%s contentType=%s size=%s" % (fs.fileName, fs.contentType, fs.size), "\n", sep="")
return fs if 'Transfer-Encoding' in response.headers or (fs.size != None and int(fs.size) > 0) else None
else:
encoding = response.headers.get_content_charset()
if not encoding: encoding = 'iso-8859-1'
string = response.read().decode(encoding)
if self.__debug: print("\n", "< Response Body:\n", string, "\n", sep="")
try:
data = json.loads(string)
except ValueError: # PUT requests don't return anything
data = None
return data
elif response.code == 404:
return None
else:
encoding = response.headers.get_content_charset()
if not encoding: encoding = 'iso-8859-1'
string = response.read().decode(encoding)
try:
msg = json.loads(string)['error_message']
except ValueError:
msg = string
raise ApiException(response.code, msg)
except urllib.error.HTTPError as e:
raise ApiException(e.code, e.msg)
finally:
if isFileUpload:
try:
postData.inputStream.close()
except Exception as e:
pass
if self.__debug:
http.client.HTTPConnection.debuglevel = 0
if self.__logFilepath:
sys.stdout = stdOut
logFile.close()
def toPathValue(self, obj):
"""Serialize a list to a CSV string, if necessary.
Args:
obj -- data object to be serialized
Returns:
string -- json serialization of object
"""
if type(obj) == list:
return ','.join(obj)
else:
return obj
def sanitizeForSerialization(self, obj):
"""Dump an object into JSON for POSTing."""
if not obj:
return None
elif type(obj) in [str, int, float, bool]:
return obj
elif type(obj) == list:
return [self.sanitizeForSerialization(subObj) for subObj in obj]
elif type(obj) == datetime.datetime:
return obj.isoformat()
else:
if type(obj) == dict:
objDict = obj
else:
objDict = obj.__dict__
return {key: self.sanitizeForSerialization(val)
for (key, val) in objDict.items()
if key != 'swaggerTypes' and val != None}
def deserialize(self, obj, objClass):
"""Derialize a JSON string into an object.
Args:
obj -- string or object to be deserialized
objClass -- class literal for deserialzied object, or string
of class name
Returns:
object -- deserialized object"""
if not obj:
return None
# Have to accept objClass as string or actual type. Type could be a
# native Python type, or one of the model classes.
if type(objClass) == str:
if 'list[' in objClass:
match = re.match('list\[(.*)\]', objClass)
subClass = match.group(1)
return [self.deserialize(subObj, subClass) for subObj in obj]
if (objClass in ['int', 'float', 'dict', 'list', 'str']):
objClass = eval(objClass)
else: # not a native type,
|
odoousers2014/LibrERP
|
task_time_control/project_task.py
|
Python
|
agpl-3.0
| 7,349
| 0.004084
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Pexego Sistemas Informáticos (http://www.pexego.es) All Rights Reserved
# $Jesús Ventosinos Mayor$
# $Javier Colmenero Fernández$
# Copyright (c) 2014 Didotech srl (info at didotech.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.tools.translate import _
from openerp.osv import orm, fields
class project_task_history(orm.Model):
_inherit = "project.task.history"
_columns = {
'state': fields.selection([
('draft', 'New'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Done'),
('working', 'Working'),
('cancelled', 'Cancelled')
], 'State', readonly=True, required=True)
}
class time_control_user_task(orm.Model):
_name = 'time.control.user.task'
_columns = {
'user': fields.many2one('res.users', 'user'),
'work_start': fields.datetime('Work start'),
'work_end': fields.datetime('Work end'),
'started_task': fields.many2one('project.task', 'Started task')
}
class project_task(orm.Model):
_inherit = "project.task"
def _get_users_working(self, cr, uid, ids, field_name, args, context=None):
if context is None:
context = {}
res = {}
user_task_obj = self.pool["time.control.user.task"]
for task in self.browse(cr, uid, ids, context):
stream = ''
user_ids = []
user_task_ids = user_task_obj.search(cr, uid, [('started_task', '=', task.id)])
if user_task_ids:
for user_task in user_task_obj.browse(cr, uid, user_task_ids, context):
if user_task.user.name:
stream += user_task.user.name + u","
user_ids.append(user_task.user.id)
res[task.id] = {'working_users': stream, 'user_is_working': uid in user_ids}
else:
res[task.id] = {'working_users': '', 'user_is_working': False}
return res
_columns = {
'other_users_ids': fields.many2many('res.users', 'project_task_user_rel', 'user_id', 'task_id', 'Other users'),
'state': fields.selection([
('draft', 'New'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Done'),
('working', 'Working'),
('cancelled', 'Cancelled')
], 'State', readonly=True, required=True),
'working_users': fields.
|
function(_get_users_working, method=True, string='Working users', type='char', size=255, multi=True),
'user_is_working': fields.function(_get_users_working, method=True, string='I am working',
|
type='boolean', multi=True)
}
def stop_task(self, cr, uid, task_id, final, user_task, context=None):
if context is None:
context = {}
self.pool['time.control.user.task'].write(cr, uid, user_task.id, {'work_end': final})
context['user_id'] = uid
context['user_task_id'] = user_task.id
#Call wizard:
wizard_id = self.pool["task.time.control.confirm.wizard"].create(cr, uid, {
'task_to_start': task_id,
'user_task': user_task.id,
'started_task': user_task.started_task.id
}, context=context)
return {
'name': _("Confirm Time"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'task.time.control.confirm.wizard',
'res_id': wizard_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def work_start_btn(self, cr, uid, task_ids, context):
start = datetime.now()
user_task_obj = self.pool["time.control.user.task"]
project_task_obj = self.pool['project.task']
user_task_ids = user_task_obj.search(cr, uid, [('user', '=', uid), ('started_task', 'in', task_ids)])
if user_task_ids:
user_task = user_task_obj.browse(cr, uid, user_task_ids)[0]
if user_task.started_task:
if user_task.started_task.id == task_ids[0]:
raise orm.except_orm(_("Warning !"), _("Task is alredy started."))
return self.stop_task(cr, uid, task_ids[0], start, user_task, context)
else:
task = project_task_obj.browse(cr, uid, task_ids)[0]
if task.state == 'draft':
self.do_open(cr, uid, task_ids, context)
project_task_obj.write(cr, uid, task_ids, {'state': 'working'})
user_task_obj.write(cr, uid, user_task_ids, {'work_start': start, 'started_task': task_ids[0]})
else:
task = self.pool.get('project.task').browse(cr, uid, task_ids)[0]
if task.state == 'draft':
self.do_open(cr, uid, task_ids, context)
user_task_obj.create(cr, uid, {
'user': uid,
'work_start': start,
'started_task': task_ids[0]
})
project_task_obj.write(cr, uid, task_ids, {'state': 'working'})
return True
def work_end_btn(self, cr, uid, task_ids, context):
end_datetime = datetime.now()
user_task_obj = self.pool["time.control.user.task"]
user_task_ids = user_task_obj.search(cr, uid, [('user', '=', uid), ('started_task', 'in', task_ids)])
if user_task_ids:
user_task = user_task_obj.browse(cr, uid, user_task_ids[0])
if user_task.started_task.id == task_ids[0]:
finished = self.stop_task(cr, uid, None, end_datetime, user_task, context)
if finished:
return finished
else:
raise orm.except_orm(_("Warning!"), _('Task is not init.'))
else:
raise orm.except_orm(_("Warning!"), _('Task started by another user.'))
else:
raise orm.except_orm(_("Warning!"), _('User has no opened tasks.'))
return True
class project_task_work(orm.Model):
_inherit = "project.task.work"
_columns = {
'work_start': fields.datetime('Work start'),
'work_end': fields.datetime('Work end')
}
|
vinni-au/vega-strike
|
data/bases/university_night.py
|
Python
|
gpl-2.0
| 169
| 0.035503
|
import Base
import sys
import industrial_lib
time_of_
|
day='_night'
(landing_platform,bar
|
,weap) = industrial_lib.MakeCorisc (time_of_day,'bases/bartender_university.py')
|
rizar/attention-lvcsr
|
bin/check_all_fst_weights_are_zero.py
|
Python
|
mit
| 633
| 0.004739
|
#!/usr
|
/bin/env python
"""
Check if an FST has only zero weights.
"""
import argparse
import fst
import sys
def main(args):
L = fst.read(args.fst_file)
for state in L:
for arc in state:
if arc.weight != fst.TropicalWeight(0.0):
sys.stderr.write(
"Nonzero weight in the fst: node {} arc {}".format(state, arc))
exit
|
(1)
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Zero the weight on all transitions in the FST")
parser.add_argument("fst_file", default='-', nargs='?')
args = parser.parse_args()
main(args)
|
lebinh/aq
|
tests/test_sqlite_util.py
|
Python
|
mit
| 2,855
| 0.001051
|
from unittest import TestCase
from aq.sqlite_util import connect, create_table, insert_all
|
class TestSqliteUtil(TestCase):
def test_dict_adapter(self):
with connect(':memory:') as conn:
conn.execute('CREATE TABLE foo (foo)')
conn.execute('INSERT INTO foo (foo) VALUES (?)', ({'bar': 'blah'},))
values = conn.execute('SELECT * FROM foo').fetchone()
self.assertEq
|
ual(len(values), 1)
self.assertEqual(values[0], '{"bar": "blah"}')
def test_create_table(self):
with connect(':memory:') as conn:
create_table(conn, None, 'foo', ('col1', 'col2'))
tables = conn.execute("PRAGMA table_info(\'foo\')").fetchall()
self.assertEqual(len(tables), 2)
self.assertEqual(tables[0][1], 'col1')
self.assertEqual(tables[1][1], 'col2')
def test_insert_all(self):
class Foo(object):
def __init__(self, c1, c2):
self.c1 = c1
self.c2 = c2
columns = ('c1', 'c2')
values = (Foo(1, 2), Foo(3, 4))
with connect(':memory:') as conn:
create_table(conn, None, 'foo', columns)
insert_all(conn, None, 'foo', columns, values)
rows = conn.execute('SELECT * FROM foo').fetchall()
self.assertTrue((1, 2) in rows, '(1, 2) in rows')
self.assertTrue((3, 4) in rows, '(3, 4) in rows')
def test_json_get_field(self):
with connect(':memory:') as conn:
json_obj = '{"foo": "bar"}'
query = "select json_get('{0}', 'foo')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], 'bar')
def test_json_get_index(self):
with connect(':memory:') as conn:
json_obj = '[1, 2, 3]'
query = "select json_get('{0}', 1)".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], 2)
def test_json_get_field_nested(self):
with connect(':memory:') as conn:
json_obj = '{"foo": {"bar": "blah"}}'
query = "select json_get('{0}', 'foo')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], '{"bar": "blah"}')
query = "select json_get(json_get('{0}', 'foo'), 'bar')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], 'blah')
def test_json_get_field_of_null(self):
with connect(':memory:') as conn:
query = "select json_get(NULL, 'foo')"
self.assertEqual(conn.execute(query).fetchone()[0], None)
def test_json_get_field_of_serialized_null(self):
with connect(':memory:') as conn:
json_obj = 'null'
query = "select json_get('{0}', 'foo')".format(json_obj)
self.assertEqual(conn.execute(query).fetchone()[0], None)
|
alexherns/biotite-scripts
|
build_connection_graph.py
|
Python
|
mit
| 1,780
| 0.03427
|
#!/usr/bin/env python2.7
import networkx as nx
import matplotlib.pyplot as plt
import sys, argparse, os, re
parser = argparse.ArgumentParser(description='''Visualizes connections in assembly
using networkx_viewer module.''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
#Required arguments
required = parser.add_argument_group('REQUIRED')
required.add_argument('-c', help= 'connections file', required=True, type=argparse.FileType('r'))
#Optional arguments
optional = parser.add_argument_group('OPTIONAL')
optional.add_argument('-h', action="help", help="show this help message and exit")
optional.add_argument('-o', metavar='<*.png>', type=argparse.FileType('w'))
optional.add_argument('-m', metavar='<int>', type=int, default=0)
args = parser.parse_args()
import networkx_viewer as nv
#Build the graph
G= nx.Graph()
nodes= []
edges= {}
for line in args.c:
line= line.strip().split('\t')
if 'accept' not in line or 'flanking' in line:
continue
attr= {}
#line[0]= re.search('(NODE_\d+)', line[0]).group()
#line[2]= re.search('(NODE_\d+)', line[2]).group()
if line[0]==line[2]:
nodes.append([line[0], {'self':'True', 'fill':'blue', 'direction':" ".join(line[:4]), 'count':line[4]}])
print line[0]+"\tSelf-edge"
continue
if line[0] not in nodes:
nodes.append(line[0])
if line[2] not in nodes:
nodes.append(line[2])
edge= sorted([line[0], line[2]])
lookup= "\t".join(edge)
if lookup in edges:
continue
if 'mid' in [line[1], lin
|
e[3]]:
attr= {'fill':'red'}
attr['direction']= " ".join(line[:4])
attr['count']= line[4]
if int(attr['count'])<args.m:
continue
edge.append(attr)
edges[lookup]= edge
G.add_nodes_from(nodes)
G.add_edges_from(edges.values())
#Draw the graph
|
app= nv.Viewer(G)
app.mainloop()
|
stanford-futuredata/macrobase
|
tools/py_analysis/analyze_cluster.py
|
Python
|
apache-2.0
| 3,142
| 0.005729
|
import pandas as pd
import numpy as np
from sklearn import linear_model, cluster
from collections import defaultdict, Iterable
from itertools import chain, combinations
import operator
import psycopg2
conn = psycopg2.connect("dbname='postgres' user='pbailis' host='localhost'")
cur = conn.cursor()
cols = "hardware_manufacturer,hardware_model,hardware_carrier,android_fw_version,hardware_bootloader"
target = "data_count_minutes"
pred = " < 4"
limit = "LIMIT 10000"
to_select = target+","+cols
sql = """
SELECT %s FROM mapmatch_history H, sf_datasets D WHERE H.dataset_id = D.id AND %s %s %s;""" % (to_select, target, pred, limit)
print sql
cur.execute(sql)
colnames = [desc[0] for desc in cur.description]
cur_score = None
cur_rows = []
df = None
data = pd.DataFrame(cur.fetchall(), columns=colnames)
features = data.drop(target, 1)
pd.set_option('display.max_rows', len(features))
pd.set_option('expand_frame_repr', False)
dummies = pd.get_dummies(features, prefix_sep="//")
print "REGRESSION"
scores = [1./max(r[0], .0001) for r in data.itertuples()]
regr = linear_model.LinearRegression().fit(dummies, scores)
c_with_index = zip(range(0, len(regr.coef_)), regr.coef_)
c_with_index.sort(key = lambda x: x[1])
c_with_index.reverse()
MAX_PRINT = 50
for n in range(0, MAX_PRINT):
(dim, c) = c_with_index[n]
print ": ".join(dummies.columns[dim].split("//")), c
NCLUSTERS = 10#int(np.sqrt(len(data)/2))
print "K MEANS RESULTS (%d CLUSTERS):" % NCLUSTERS
km = cluster.KMeans(n_clusters=NCLUSTERS).fit(dummies)
THRESH = .5
for centerno in range(0, len(km.cluster_centers_)):
center = km.cluster_centers_[centerno]
nmatches = len([i for i in km.labels_ if i == centerno])
target_vals = [data.iloc[i, 0] for i in range(0, len(data)) if km.labels_[i] == centerno]
print "\n"
print "N: %d, Average: %f, Std.: %f" % (nmatches, float(sum(target_vals))/len(target_vals), np.std(target_vals))
for dim in range(0, len(center)):
val = center[dim]
if val > THRESH:
print ": ".join(dummies.columns[dim].split("//")), val
print "\nDBSCAN RESULTS:"
dbscan = cluster.DBSCAN(eps=pow(2
|
, .5)).fit(dummies)
for centerno in range(0, len(dbscan.components_)):
center = dbscan.components_[centerno]
nmatches = len([i for i in dbscan.labels_ if i == centerno])
t
|
arget_vals = [data.iloc[i, 0] for i in range(0, len(data)) if dbscan.labels_[i] == centerno]
if nmatches == 0:
continue
print "\n"
print "N: %d, Average: %f, Std.: %f" % (nmatches, float(sum(target_vals))/len(target_vals), np.std(target_vals))
for dim in range(0, len(center)):
val = center[dim]
if val > THRESH:
print ": ".join(dummies.columns[dim].split("//")), val
'''
birch = cluster.Birch(threshold=1).fit(dummies)
print len(birch.subcluster_centers_)
print
for center in birch.subcluster_centers_:
for dim in range(0, len(center)):
val = center[dim]
if val > THRESH:
print dim, val, dummies.columns[dim].split("//"),
'''
|
sradevski/homeAutomate
|
scripts/lights_controller.py
|
Python
|
mit
| 665
| 0.040602
|
#!/usr/bin/python
import sys
import remote_core as core
import radio_lights
def main(argv):
config = core.load_config()
lights_config_names = {"1":"door_light", "2":"desk_light", "3": "shelf_light"}
if len(argv) == 1 and len(argv[0]) == 2:
if argv[0] == "an":
argv = ["1n", "2n", "3n"]
elif argv[0] == "af":
argv = ["1f", "2f", "3f"]
for item in argv:
if item[-1:] == 'n':
radio_lights.turn_on_single(config["lights"][lights_config_names[item[:1]]])
elif item[-1:] == 'f':
radio_lights.t
|
urn_off_single(config["lights"][lights_config_names[item[:1]]])
core.write_config(config)
if __name__ == "__main__":
main(sys.argv[1:])
| |
ChillarAnand/junction
|
junction/profiles/tests.py
|
Python
|
mit
| 519
| 0
|
from django.test import TestCase
from .models import Profile
from django.contrib.auth.models import User
# models test
class ProfileTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='user1', password='123456')
Prof
|
ile.objects.create(city='noida', contact_no=
|
'1234567890')
def test_create_profile(self):
user = User.objects.get(username='user1')
profile_details = Profile.objects.get(user=user)
self.assertEqual(profile_details.city, 'noida')
|
sentriz/steely
|
steely/utils.py
|
Python
|
gpl-3.0
| 800
| 0
|
import os
import imp
from tinydb import TinyDB
from paths import DB_DIR
def scan_plugins_dir(plugins_dir='plugins'):
"""Scan the given dir for files matching the spec for plugin files"""
for plugin_file in os.listdir(plugins_dir):
plugin_path = os.path.join(plugins_dir, plugin_file)
if (not plugin_file.startswith('_') and
plugin_file.endswith('.py') and
os.path.isfile(plugin_path)
|
):
yield plugin_file, plugin_path
def load_plugin(filename, path):
return imp.load_source(file
|
name, path)
def list_plugins():
for plugin_file, plugin_path in scan_plugins_dir():
yield load_plugin(plugin_file, plugin_path)
def new_database(name):
full_path = os.path.join(DB_DIR, f'{name}.json')
return TinyDB(full_path)
|
Azure/azure-sdk-for-python
|
sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_kql_script_operations.py
|
Python
|
mit
| 24,579
| 0.005167
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
kql_script_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_by_name_request(
kql_script_name: str,
**kwargs: Any
) -> HttpReque
|
st:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
|
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_by_name_request_initial(
kql_script_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_rename_request_initial(
kql_script_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}/rename')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class KqlScriptOperations(object):
"""KqlScriptOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
kql_script_name: str,
kql_script: "_models.KqlScriptResource",
**kwargs: Any
) -> Optional["_models.KqlScriptResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.KqlScriptResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(kql_script, 'KqlScriptResource')
request = build_create_or_update_request_initial(
kql_script_name=kql_script_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.ru
|
devurandom/portage
|
pym/portage/__init__.py
|
Python
|
gpl-2.0
| 21,844
| 0.031038
|
# portage.py -- core Portage functionality
# Copyright 1998-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
VERSION="HEAD"
# ===========================================================================
# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
# ===========================================================================
try:
import sys
import errno
if not hasattr(errno, 'ESTALE'):
# ESTALE may not be defined on some systems, such as interix.
errno.ESTALE = -1
import re
import types
import platform
# Temporarily delete these imports, to ensure that only the
# wrapped versions are imported by portage internals.
import os
del os
import shutil
del shutil
except ImportError as e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
sys.stderr.write(" "+str(e)+"\n\n");
raise
try:
import portage.proxy.lazyimport
import portage.proxy as proxy
proxy.lazyimport.lazyimport(globals(),
'portage.cache.cache_errors:CacheError',
'portage.checksum',
'portage.checksum:perform_checksum,perform_md5,prelink_capable',
'portage.cvstree',
'portage.data',
'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
'uid,userland,userpriv_groups,wheelgid',
'portage.dbapi',
'portage.dbapi.bintree:bindbapi,binarytree',
'portage.dbapi.cpv_expand:cpv_expand',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
'portagetree,portdbapi',
'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
'portage.dbapi.virtual:fakedbapi',
'portage.dep',
'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
'match_from_list,match_to_list',
'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
'portage.eclass_cache',
'portage.exception',
'portage.getbinpkg',
'portage.locks',
'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
'portage.mail',
'portage.manifest:Manifest',
'portage.output',
'portage.output:bold,colorize',
'portage.package.ebuild.doebuild:doebuild,' + \
'doebuild_environment,spawn,spawnebuild',
'portage.package.ebuild.config:autouse,best_from_dict,' + \
'check_config_instance,config',
'portage.package.ebuild.deprecated_profile_check:' + \
'deprecated_profile_check',
'portage.package.ebuild.digestcheck:digestcheck',
'portage.package.ebuild.digestgen:digestgen',
'portage.package.ebuild.fetch:fetch',
'portage.package.ebuild.getmaskingreason:getmaskingreason',
'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.process',
'portage.process:atexit_register,run_exitfuncs',
'portage.update:dep_transform,fixdbentries,grab_updates,' + \
'parse_updates,update_config_files,update_dbentries,' + \
'update_dbentry',
'portage.util',
'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
'apply_recursive_permissions,dump_traceback,getconfig,' + \
'grabdict,grabdict_package,grabfile,grabfile_package,' + \
'map_dictlist_vals,new_protect_filename,normalize_path,' + \
'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
'writemsg_stdout,write_atomic',
'portage.util.digraph:digraph',
'portage.util.env_update:env_update',
'portage.util.ExtractKernelVersion:ExtractKernelVersion',
'portage.util.listdir:cacheddir,listdir',
'portage.util.movefile:movefile',
'portage.util.mtimedb:MtimeDB',
'portage.versions',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
'cpv_getkey@getCPFromCPV,endversion_keys,' + \
'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
'portage.xpak',
'subprocess',
'time',
)
try:
from collections import OrderedDict
except ImportError:
proxy.lazyimport.lazyimport(globals(),
'portage.cache.mappings:OrderedDict')
import portage.const
from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
except ImportError as e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
sys.stderr.write("!!! installation of portage. Pl
|
ease try a rescue portage located in the\n")
sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
sys.stderr.write("!!! a recovery of portage.\n")
sys.stderr.write(" "+str(e)+"\n\n")
raise
if sys.hexversion >= 0x3000000:
basestring = str
long = int
# We use utf_8 encoding everywhere. Previously, we used
# sys.getfilesystemencoding() for th
|
e 'merge' encoding, but that had
# various problems:
#
# 1) If the locale is ever changed then it can cause orphan files due
# to changed character set translation.
#
# 2) Ebuilds typically install files with utf_8 encoded file names,
# and then portage would be forced to rename those files to match
# sys.getfilesystemencoding(), possibly breaking things.
#
# 3) Automatic translation between encodings can lead to nonsensical
# file names when the source encoding is unknown by portage.
#
# 4) It's inconvenient for ebuilds to convert the encodings of file
# names to match the current locale, and upstreams typically encode
# file names with utf_8 encoding.
#
# So, instead of relying on sys.getfilesystemencoding(), we avoid the above
# problems by using a constant utf_8 'merge' encoding for all locales, as
# discussed in bug #382199 and bug #381509.
_encodings = {
'content' : 'utf_8',
'fs' : 'utf_8',
'merge' : 'utf_8',
'repo.content' : 'utf_8',
'stdio' : 'utf_8',
}
if sys.hexversion >= 0x3000000:
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, str):
s = s.encode(encoding, errors)
return s
def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
if isinstance(s, bytes):
s = str(s, encoding=encoding, errors=errors)
return s
else:
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, unicode):
s = s.encode(encoding, errors)
return s
def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
if isinstance(s, bytes):
s = unicode(s, encoding=encoding, errors=errors)
return s
class _unicode_func_wrapper(object):
"""
Wraps a function, converts arguments from unicode to bytes,
and return values to unicode from bytes. Function calls
will raise UnicodeEncodeError if an argument fails to be
encoded with the required encoding. Return values that
are single strings are decoded with errors='replace'. Return
values that are lists of strings are decoded with errors='strict'
and elements that fail to be decoded are omitted from the returned
list.
"""
__slots__ = ('_func', '_encoding')
def __init__(self, func, encoding=_enc
|
Ryezhang/scrapy
|
scrapy/http/request/form.py
|
Python
|
bsd-3-clause
| 7,658
| 0.001306
|
"""
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
|
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.e
|
ncoding)
if self.method == 'POST':
self.headers.setdefault(b'Content-Type', b'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formid=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, formxpath=None, formcss=None, **kwargs):
kwargs.setdefault('encoding', response.encoding)
if formcss is not None:
from parsel.csstranslator import HTMLTranslator
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = _get_form_url(form, kwargs.pop('url', None))
method = kwargs.pop('method', form.method)
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form, url):
if url is None:
action = form.get('action')
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[@id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata_keys = dict(formdata or ()).keys()
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
if not formdata:
formdata = ()
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(@type) or @type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../@checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata_keys]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
if isinstance(formdata, dict):
formdata = formdata.items()
values.extend((k, v) for k, v in formdata if v is not None)
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[@selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
|
andresfcardenas/marketing-platform
|
landing/migrations/0003_auto__add_formtext.py
|
Python
|
bsd-3-clause
| 3,656
| 0.008206
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FormText'
db.create_table(u'landing_formtext', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
('text', self.gf('django.db.models.fields.TextField')(max_length=200)),
))
db.send_create_signal(u'landing', ['FormText'])
def backwards(self, orm):
# Deleting model 'FormText'
db.delete_table(u'landing_formtext')
models = {
u'landing.formtext': {
'Meta': {'object_name': 'FormText'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'landing.function': {
'Meta': {'object_name': 'Function'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'landing.landingregister': {
'Meta': {'object_name': 'LandingRegister'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'landing.mainimage': {
'Description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
'Meta': {'object_name': 'MainImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'landing.product': {
'Meta': {'object_name': 'Product'},
'description': ('django.db.models.fields.TextField'
|
, [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'landing.slogan': {
'Meta': {'object_name': 'Slogan'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slogan
|
': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'landing.testimonial': {
'Meta': {'object_name': 'Testimonial'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['landing']
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/product_bidding_category_constant_service/client.py
|
Python
|
apache-2.0
| 18,954
| 0.001636
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import (
product_bidding_category_constant,
)
from google.ads.googleads.v8.services.types import (
product_bidding_category_constant_service,
)
from .transports.base import (
ProductBiddingCategoryConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ProductBiddingCategoryConstantServiceGrpcTransport
class ProductBiddingCategoryConstantServiceClientMeta(type):
"""Metaclass for the ProductBiddingCategoryConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ProductBiddingCategoryConstantServiceTransport]]
_transport_registry[
"grpc"
] = ProductBiddingCategoryConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ProductBiddingCategoryConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ProductBiddingCategoryConstantServiceClient(
metaclass=ProductBiddingCategoryConstantServiceClientMeta
):
"""Service to fetch Product Bidding Categories."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.com
|
pile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
|
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProductBiddingCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProductBiddingCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ProductBiddingCategoryConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
ProductBiddingCategoryConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def product_bidding_category_constant_path(
country_code: str, level: str, id: str,
) -> str:
"""Return a fully-qualified product_bidding_category_constant string."""
return "productBiddingCategoryConstants/{country_code}~{level}~{id}".format(
country_code=country_code, level=level, id=id,
)
@staticmethod
def parse_product_bidding_category_constant_path(
path: str,
) -> Dict[str, str]:
"""Parse a product_bidding_category_constant path into its component segments."""
m = re.match(
r"^productBiddingCategoryConstants/(?P<country_code>.+?)~(?P<level>.+?)~(?P<id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Pa
|
DataONEorg/d1_python
|
gmn/src/d1_gmn/app/urls.py
|
Python
|
apache-2.0
| 8,003
| 0.00025
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""URL to view mapping."""
import d1_common.utils.filesystem
import d1_common.utils.ulog
import django.urls
import django.views.static
import d1_gmn.app.views.external
import d1_gmn.app.views.get_package
import d1_gmn.app.views.gmn
import d1_gmn.app.views.internal
# from django.urls import path
# from django.views.generic import TemplateView
# Return 404 and 500 as UI page when DEBUG=False
handler404 = "d1_gmn.app.views.internal.error_404"
handler500 = "d1_gmn.app.view
|
s.internal.error_500"
urlpatterns = [
# Django's URL dispatcher does not take HTTP method into account, so in the
# cases where the DataONE REST API specifies different methods as different
# methods against the same URL, the methods are dispatched to the same view
# function, which checks the method and
|
dispatches to the appropriate handler.
# Tier 1: Core API (MNCore)
# MNCore.ping() - GET /monitor/ping
django.urls.re_path(
r"^v[12]/monitor/ping/?$",
d1_gmn.app.views.external.get_monitor_ping,
kwargs={"allowed_method_list": ["GET"]},
name="get_monitor_ping",
),
# MNCore.getLogRecords() - GET /log
django.urls.re_path(
r"^v[12]/log/?$",
d1_gmn.app.views.external.get_log,
kwargs={"allowed_method_list": ["GET"]},
name="get_log",
),
# MNCore.getCapabilities() - GET /node
# Also available via Apache redirect from /
django.urls.re_path(
r"^v[12]/(?:node/?)?$",
d1_gmn.app.views.external.get_node,
kwargs={"allowed_method_list": ["GET"]},
name="get_node",
),
# Tier 1: Read API (MNRead)
# MNRead.get() - GET /object/{did}
django.urls.re_path(
r"^v[12]/object/(.+)$",
d1_gmn.app.views.external.dispatch_object,
kwargs={"allowed_method_list": ["GET", "HEAD", "PUT", "DELETE"]},
name="dispatch_object",
),
# MNRead.getSystemMetadata() - GET /meta/{did}
django.urls.re_path(
r"^v[12]/meta/(.+)$",
d1_gmn.app.views.external.get_meta,
kwargs={"allowed_method_list": ["GET"]},
name="get_meta",
),
# MNStorage.updateSystemMetadata() - PUT /meta
django.urls.re_path(
r"^v2/meta$",
d1_gmn.app.views.external.put_meta,
kwargs={"allowed_method_list": ["PUT"]},
name="put_meta",
),
# MNRead.describe() - HEAD /object/{did}
# (handled by object dispatcher)
# MNRead.getChecksum() - GET /checksum/{did}
django.urls.re_path(
r"^v[12]/checksum/(.+)$",
d1_gmn.app.views.external.get_checksum,
kwargs={"allowed_method_list": ["HEAD", "GET"]},
name="get_checksum",
),
# MNRead.listObjects() - GET /object
django.urls.re_path(
r"^v[12]/object/?$",
d1_gmn.app.views.external.dispatch_object_list,
kwargs={"allowed_method_list": ["GET", "POST"]},
name="dispatch_object_list",
),
# MNRead.synchronizationFailed() - POST /error
django.urls.re_path(
r"^v[12]/error/?$",
d1_gmn.app.views.external.post_error,
kwargs={"allowed_method_list": ["POST"]},
name="post_error",
),
# MNRead.getReplica() - GET /replica/{did}
django.urls.re_path(
r"^v[12]/replica/(.+)/?$",
d1_gmn.app.views.external.get_replica,
kwargs={"allowed_method_list": ["GET"]},
name="get_replica",
),
# Tier 2: Authorization API (MNAuthorization)
# MNAuthorization.isAuthorized() - GET /isAuthorized/{did}
django.urls.re_path(
r"^v[12]/isAuthorized/(.+)/?$",
d1_gmn.app.views.external.get_is_authorized,
kwargs={"allowed_method_list": ["GET"]},
name="get_is_authorized",
),
# MNStorage.systemMetadataChanged() - POST /refreshSystemMetadata/{did}
django.urls.re_path(
r"^v[12]/dirtySystemMetadata/?$",
d1_gmn.app.views.external.post_refresh_system_metadata,
kwargs={"allowed_method_list": ["POST"]},
name="post_refresh_system_metadata",
),
# Tier 3: Storage API (MNStorage)
# MNStorage.create() - POST /object
# (handled by object dispatcher)
# MNStorage.update() - PUT /object/{did}
# (handled by object dispatcher)
# MNStorage.generateIdentifier()
django.urls.re_path(
r"^v[12]/generate/?$",
d1_gmn.app.views.external.post_generate_identifier,
kwargs={"allowed_method_list": ["POST", "PUT"]},
name="post_generate_identifier",
),
# MNStorage.delete() - DELETE /object/{did}
# (handled by object dispatcher)
# MNStorage.archive() - PUT /archive/{did}
django.urls.re_path(
r"^v[12]/archive/(.+)/?$",
d1_gmn.app.views.external.put_archive,
kwargs={"allowed_method_list": ["delete", "PUT"]},
name="put_archive",
),
# Tier 4: Replication API (MNReplication)
# MNReplication.replicate() - POST /replicate
django.urls.re_path(
r"^v[12]/replicate/?$",
d1_gmn.app.views.external.post_replicate,
kwargs={"allowed_method_list": ["POST"]},
name="post_replicate",
),
# Package API
# MNPackage.getPackage() - GET /package
django.urls.re_path(
r"^v2/packages/(?P<package_type>.+)/(?P<pid>.+)/?$",
d1_gmn.app.views.get_package.get_package,
kwargs={"allowed_method_list": ["GET"]},
name="get_package",
),
#
# Web UI
#
# Redirect / to /home
django.urls.re_path(
r"^$",
d1_gmn.app.views.internal.root,
kwargs={"allowed_method_list": ["GET"]},
name="root",
),
django.urls.re_path(
r"^home/?$",
d1_gmn.app.views.internal.home,
kwargs={"allowed_method_list": ["GET"]},
name="home",
),
django.urls.re_path(
r"^templates/home.xsl$",
d1_gmn.app.views.internal.home_xslt,
kwargs={"allowed_method_list": ["GET"]},
name="home_xslt",
),
django.urls.re_path(
r"^templates/clipboard/(.+)/?$",
d1_gmn.app.views.internal.clipboard,
kwargs={"allowed_method_list": ["GET"]},
name="clipboard",
),
#
# GMN vendor specific extensions
#
django.urls.re_path(
r"^gmn/object/?$",
d1_gmn.app.views.gmn.get_object_list_json,
kwargs={"allowed_method_list": ["GET"]},
name="get_object_list_json",
),
django.urls.re_path(
r"^gmn/echo/session/?$",
d1_gmn.app.views.gmn.echo_session,
kwargs={"allowed_method_list": ["GET"]},
name="echo_session",
),
django.urls.re_path(
r"^gmn/echo/request/?$",
d1_gmn.app.views.gmn.echo_request,
kwargs={"allowed_method_list": ["GET"]},
name="echo_request_object",
),
]
if django.conf.settings.STATIC_SERVER:
urlpatterns.append(
django.urls.re_path(
r"^static/(?P<path>.*)$",
django.views.static.serve,
kwargs={
# 'static': d1_common.util.abs_path('.'),
"document_root": d1_common.utils.filesystem.abs_path("./static"),
"show_indexes": True,
"allowed_method_list": ["GET"],
},
)
)
|
fbsder/zephyr
|
scripts/support/runner/jlink.py
|
Python
|
apache-2.0
| 3,770
| 0
|
# Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for debugging with JLink.'''
from os import path
import os
from .core import ZephyrBinaryRunner, get_env_or_bail
DEFAULT_JLINK_GDB_PORT = 2331
class JLinkBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for the J-Link GDB server.'''
def __init__(self, device,
gdbserver='JLinkGDBServer', iface='swd', elf_name=None,
gdb=None, gdb_port=DEFAULT_JLINK_GDB_PORT, tui=None,
debug=False):
super(JLinkBinaryRunner, self).__init__(debug=debug)
self.device = device
self.gdbserver_cmd = [gdbserver]
self.iface = iface
self.elf_name = elf_name
self.gdb_cmd = [gdb] if gdb is not None else None
self.gdb_port = gdb_port
self.tui_arg = [tui] if tui is not None else []
def replaces_shell_script(shell_script, command):
return (command in {'debug', 'debugserver'} and
shell_script == 'j
|
link.sh')
def create_from_env(command, debug):
'''Create runner from environment.
Required:
- JLINK_DEVICE: device name
Required for 'debug':
|
- GDB: gdb to use
- O: build output directory
- KERNEL_ELF_NAME: zephyr kernel binary in ELF format
Optional for 'debug':
- TUI: if present, passed to gdb server used to flash
Optional for 'debug', 'debugserver':
- JLINK_GDBSERVER: default is JLinkGDBServer
- GDB_PORT: default is 2331
- JLINK_IF: default is swd
'''
device = get_env_or_bail('JLINK_DEVICE')
gdb = os.environ.get('GDB', None)
o = os.environ.get('O', None)
elf = os.environ.get('KERNEL_ELF_NAME', None)
elf_name = None
if o is not None:
if elf is not None:
elf_name = path.join(o, elf)
tui = os.environ.get('TUI', None)
gdbserver = os.environ.get('JLINK_GDBSERVER', 'JLinkGDBServer')
gdb_port = int(os.environ.get('GDB_PORT',
str(DEFAULT_JLINK_GDB_PORT)))
iface = os.environ.get('JLINK_IF', 'swd')
return JLinkBinaryRunner(device, gdbserver=gdbserver,
iface=iface, elf_name=elf_name,
gdb=gdb, gdb_port=gdb_port, tui=tui,
debug=debug)
def print_gdbserver_message(self):
print('JLink GDB server running on port {}'.format(self.gdb_port))
def run(self, command, **kwargs):
if command not in {'debug', 'debugserver'}:
raise ValueError('{} is not supported'.format(command))
server_cmd = (self.gdbserver_cmd +
['-port', str(self.gdb_port),
'-if', self.iface,
'-device', self.device,
'-silent',
'-singlerun'])
if command == 'debugserver':
self.print_gdbserver_message()
self.check_call(server_cmd)
else:
if self.gdb_cmd is None:
raise ValueError('Cannot debug; gdb is missing')
if self.elf_name is None:
raise ValueError('Cannot debug; elf is missing')
client_cmd = (self.gdb_cmd +
self.tui_arg +
[self.elf_name] +
['-ex', 'target remote :{}'.format(self.gdb_port),
'-ex', 'monitor halt',
'-ex', 'load',
'-ex', 'monitor reset'])
self.print_gdbserver_message()
self.run_server_and_client(server_cmd, client_cmd)
|
Dingo5733/djangoblog19
|
src/trydjango19/settings.py
|
Python
|
mit
| 3,453
| 0.001448
|
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.
|
template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.applic
|
ation'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
|
chemelnucfin/tensorflow
|
tensorflow/python/data/experimental/ops/batching.py
|
Python
|
apache-2.0
| 13,674
| 0.003145
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.dense_to_sparse_batch")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(
batch_size=2, row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.SparseTensor`. Each element of this dataset must have the same rank as
`row_shape`, and must have size less than or equal to `row_shape` in each
dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _DenseToSparseBatchDataset(dataset, batch_size, row_shape)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch()")
@tf_export(v1=["data.experimental.map_and_batch_with_legacy_function"])
def map_and_batch_with_legacy_function(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not
work with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map_and_batch` as this method will not be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to proces
|
s in parallel. If not
specified, `batch_size * num_paral
|
lel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError("The `num_parallel_batches` and `num_parallel_calls` "
"arguments are mutually exclusive.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder,
use_legacy_function=True)
return _apply_fn
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by "
"`tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data "
"optimizations will take care of using the fused implementation.")
@tf_export("data.experimental.map_and_batch")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. However, by fusing the two transformations together, the
implementation can be more efficient. Surfacing this transformation in the API
is temporary. Once automatic input pipeline optimization is implemented,
the fusing of `map` and `batch` will happen automatically and this API will be
deprecated.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.d
|
StackStorm/st2
|
st2tests/integration/orquesta/test_wiring_functions_task.py
|
Python
|
apache-2.0
| 3,825
| 0.000523
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from integration.orquesta import base
from st2common.constants import action as action_constants
class FunctionsWiringTest(base.TestWorkflowExecution):
def test_task_functions_in_yaql(self):
wf_name = "examples.orquesta-test-yaql-task-functions"
expected_output = {
"last_task4_result": "False",
"task9__1__parent": "task8__1",
"task9__2__parent": "task8__2",
"that_task_by_name": "task1",
"this_task_by_name": "task1",
"this_task_no_arg": "task1",
}
expected
|
_result = {"output": expec
|
ted_output}
self._execute_workflow(
wf_name, execute_async=False, expected_result=expected_result
)
def test_task_functions_in_jinja(self):
wf_name = "examples.orquesta-test-jinja-task-functions"
expected_output = {
"last_task4_result": "False",
"task9__1__parent": "task8__1",
"task9__2__parent": "task8__2",
"that_task_by_name": "task1",
"this_task_by_name": "task1",
"this_task_no_arg": "task1",
}
expected_result = {"output": expected_output}
self._execute_workflow(
wf_name, execute_async=False, expected_result=expected_result
)
def test_task_nonexistent_in_yaql(self):
wf_name = "examples.orquesta-test-yaql-task-nonexistent"
expected_output = None
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% task(\"task0\") %>'. ExpressionEvaluationException: "
'Unable to find task execution for "task0".'
),
"task_transition_id": "continue__t0",
"task_id": "task1",
"route": 0,
}
]
expected_result = {"output": expected_output, "errors": expected_errors}
self._execute_workflow(
wf_name,
execute_async=False,
expected_status=action_constants.LIVEACTION_STATUS_FAILED,
expected_result=expected_result,
)
def test_task_nonexistent_in_jinja(self):
wf_name = "examples.orquesta-test-jinja-task-nonexistent"
expected_output = None
expected_errors = [
{
"type": "error",
"message": (
"JinjaEvaluationException: Unable to evaluate expression "
"'{{ task(\"task0\") }}'. ExpressionEvaluationException: "
'Unable to find task execution for "task0".'
),
"task_transition_id": "continue__t0",
"task_id": "task1",
"route": 0,
}
]
expected_result = {"output": expected_output, "errors": expected_errors}
self._execute_workflow(
wf_name,
execute_async=False,
expected_status=action_constants.LIVEACTION_STATUS_FAILED,
expected_result=expected_result,
)
|
Alwnikrotikz/pmx
|
scripts/mdsetup_407.py
|
Python
|
lgpl-3.0
| 22,643
| 0.019035
|
#!/usr/bin/env python
# pmx Copyright Notice
# ============================
#
# The pmx source code is copyrighted, but you can freely use and
# copy it as long as you don't change or remove any of the copyright
# notices.
#
# ----------------------------------------------------------------------
# pmx is Copyright (C) 2006-2013 by Daniel Seeliger
#
# All Rights Reserved
#
# Permission to use, copy, modify, distribute, and distribute modified
# versions of this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both the copyright notic
|
e and
# this permission notice appear in supporting documentation, and that
# the name of Daniel Seeliger not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# DANIEL SEELIGER DISCLAIMS ALL WARRANTIES WITH REGARD
|
TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL DANIEL SEELIGER BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ----------------------------------------------------------------------
import sys,os,shutil
import commands
from glob import glob
from pmx import *
from pmx.ndx import *
from pmx.library import _one_letter
from pmx.odict import *
from pmx.forcefield import MDP
from pmx.forcefield2 import ITPFile, Topology
from pmx.parser import *
def run_command( func, string ):
s = func.__name__+'(): '+ string
err_file = func.__name__+'_ERROR.log'
status, out = commands.getstatusoutput( string )
if status != 0:
print >>sys.stderr, 'ERROR in %s ' % func.__name__
print >>sys.stderr, 'Output written to %s' % err_file
fp = open(err_file,'w')
print >>fp, s
print >>fp, out
fp.close()
sys.exit(1)
else:
print "%-90s" % s, ': ok'
chain_ids = 'ABCDEFGHIJKLMNOPQRSTVWXYZ'
class MD:
## def __init__(self, pdb_in = None, ff = 'amber99sbmut', water = 'tip3p', conc = .15,
## box_type = 'dodecahedron', box_size = 1.2, vsite = False, princ = False, **kwargs):
def __init__(self, **kwargs):
self.ff = 'amber99sb'
self.water = 'tip3p'
self.conc = .15
self.box_type = 'triclinic'
self.box_size = 1.2
self.vsite = False
self.princ = False
self.md_in_conf = 'md_in.gro'
self.amberize_input = True
for key, val in kwargs.items():
setattr(self,key,val)
def setup(self):
if self.amberize_input:
self.amberize_pdb(self.pdb_in)
# self.generate_topology( self.pdb_in)
# self.renumber_pdb( )
self.generate_topology( self.pdb_in)
self.generate_sim_box( )
self.fill_sim_box( )
self.tpr_from_box()
self.add_ions( )
self.make_em_tpr()
self.run_em()
self.pdb_from_em() # -> min.gro
#self.generate_topology('min.gro') # -> gmx.gro
#self.make_em_tpr('gmx.gro')
#shutil.copy('gmx.gro',self.md_in_conf)
#self.md_in_conf = 'gmx.gro'
self.compact_repr( 'min.gro', self.md_in_conf)
self.clean_backups()
def write_residue_map( self, model ):
fp = open('residue_map.txt','w')
for r in model.residues:
if r.chain_id == ' ':
chain = '_'
r.old_chain_id = '_'
else:
chain = r.chain_id
print >>fp, '%d|%s|%s -> %d|%s|%s' %( r.orig_id, r.resname, r.old_chain_id, r.id, r.resname, chain)
fp.close()
def renumber_pdb(self ):
m = Model('gmx.gro')
for i, chain in enumerate(m.chains):
if chain.id != ' ':
for r in chain.residues:
r.old_chain_id = chain.id
chain.set_chain_id( chain_ids[i] )
self.write_residue_map( m )
m.write('start.gro')
self.pdb_in = 'start.gro'
def __str__(self):
s = '< MD (%s) > ' % self.pdb_in
return s
def clean_backups(self):
files = glob('#*#')
for f in files:
os.unlink(f)
def amberize_pdb(self, pdb_in):
amb = 'make_amber.py -f %s -o amber.pdb ' % ( pdb_in )
run_command( self.amberize_pdb, amb )
self.pdb_in = 'amber.pdb'
def generate_topology(self, pdb_in):
run = 'pdb2gmx -f %s -water %s -ff %s -o gmx.gro ' % ( pdb_in, self.water, self.ff )
if self.vsite:
run+=' -vsite hydrogens'
run_command( self.generate_topology, run )
def generate_sim_box(self ):
if self.princ:
run = 'echo 4 | editconf -f gmx.gro -o tmp.gro -c -princ '
run_command( run_editconf, run)
run = 'editconf -f tmp.gro -o ed.gro -d %g -bt %s ' % (self.box_size, self.box_type)
run_command( generate_sim_box, run)
else:
run = 'editconf -f gmx.gro -o ed.gro -d %g -bt %s ' % (self.box_size, self.box_type)
run_command( self.generate_sim_box, run)
def fill_sim_box(self ):
if self.water == 'spce': water = 'spc216'
else: water = self.water
run = 'genbox -cp ed.gro -cs %s -p -o box.gro' % water
run_command( self.fill_sim_box, run)
self.check_for_multiple_molecule_entries()
def check_for_multiple_molecule_entries(self):
tp = Topology('topol.top', assign_types = False)
mol_dic = OrderedDict()
for m in tp.molecules:
if mol_dic.has_key(m[0]):
mol_dic[m[0]]+=m[1]
else:
mol_dic[m[0]] = m[1]
tp.molecules = []
for key, val in mol_dic.items():
tp.molecules.append( [key, val] )
tp.write('topol.top')
def tpr_from_box(self):
run = 'grompp -f ~/mdp/em.mdp -c box.gro'
run_command( self.tpr_from_box, run)
def get_solvent_index(self ):
run = 'echo q | make_ndx -f topol.tpr -o tmp.ndx'
run_command( self.get_solvent_index, run)
ndx = IndexFile("tmp.ndx")
return ndx.names.index('SOL')
def add_ions(self ):
idx = self.get_solvent_index()
run = 'echo %d | genion -conc %g -neutral -p -o ion.gro -nname ClJ -pname NaJ' % (idx, self.conc)
run_command( self.add_ions, run)
def make_em_tpr(self, f = 'ion.gro'):
run = 'grompp -f ~/mdp/em.mdp -c %s' % f
run_command( self.make_em_tpr, run)
def run_em(self):
run = 'mdrun -v -c em.gro'
run_command( self.run_em, run)
def pdb_from_em(self):
run = 'echo 0| trjconv -f em.gro -s topol.tpr -o min.gro'
run_command( self.pdb_from_em, run)
def compact_repr(self, pdb_in, pdb_out ):
run = 'echo 0 | trjconv -f %s -s topol.tpr -ur compact -pbc mol -o %s' % (pdb_in, pdb_out)
run_command( self.compact_repr, run)
class FreeEnergyMD(MD):
def __init__(self, mutation_file, **kwargs):
MD.__init__( self )
for key, val in kwargs.items():
setattr(self,key,val)
self.mutation_tags = []
self.read_mutation_file( mutation_file )
self.mutations = []
if self.mutation_tags:
self.mutations_from_tags()
self.runs = []
self.is_single_chain = False
def read_mutation_file(self, mut_file ):
print '\n\t\t\tReading mutation file: %s\n' % mut_file
l = open(mut_file).readlines()
l = kickOutComments(l)
count = 1
for line in l:
entr = line.strip()
if entr:
print '\t\t\t (%d) -> %s' % (count, entr)
self.mutation_tags.append( entr )
count+=1
def setup(self):
self.read_pdb()
pri
|
data-henrik/watson-conversation-tool
|
wctool.py
|
Python
|
apache-2.0
| 13,189
| 0.012207
|
# Copyright 2017-2018 IBM Corp. All Rights Reserved.
# See LICENSE for details.
#
# Author: Henrik Loeser
#
# Manage workspaces for IBM Watson Assistant service on IBM Cloud.
# See the README for documentation.
#
import json, argparse, importlib
from os.path import join, dirname
from ibm_watson import AssistantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
privcontext=None
assistant=None
def loadAndInit(confFile=None):
# Credentials are read from a file
with open(confFile) as confFile:
config=json.load(confFile)
configWA=config['credentials']
if 'ICF_KEY' in config:
global privcontext
icf_key=config['ICF_KEY'].split(':')
privcontext={"private": {"icfcreds": {"user": icf_key[0], "password": icf_key[1]}}}
# Initialize the Watson Assistant client
global assistant
if 'apikey' in configWA:
# Authentication via IAM
authenticator = IAMAuthenticator(configWA['apikey'])
assistant = AssistantV1(
authenticator=authenticator,
version=configWA['version'])
assistant.set_service_url(configWA['url'])
else:
print('Apikey for Watson Assistant in credentials.')
exit
# Define parameters that we want to catch and some basic command help
def initParser(args=None):
parser = argparse.ArgumentParser(description='Manage Watson Assistant workspaces (skills)',
prog='wctool.py',
usage='%(prog)s [-h | -l | -g | -c | -u | -delete] [options]')
parser.add_argument("-l", "-list" ,dest='listWorkspaces', action='store_true', help='list workspaces')
parser.add_argument("-c", "-create", dest='createWorkspace', action='store_true', help='create workspace')
parser.add_argument("-u", "-update", dest='updateWorkspace', action='store_true', help='update workspace')
parser.add_argument("-delete",dest='deleteWorkspace', action='store_true', help='delete workspace')
parser.add_argument("-g", "-get", dest='getWorkspace', action='store_true', help='get details for single workspace')
parser.add_argument("-logs",dest='listLogs', action='store_true', help='list logs')
parser.add_argument("-dialog",dest='dialog', action='store_true', help='have dialog')
parser.add_argument("-outputonly",dest='outputOnly', action='store_true', help='print dialog output only')
parser.add_argument("-full",dest='fullWorkspace', action='store_true', help='get the full workspace')
parser.add_argument("-id",dest='workspaceID', help='Workspace ID')
parser.add_argument("-o", "-outfile", dest='outFile', help='Workspace Output File')
parser.add_argument("-i", "-infile", dest='inFile', help='Workspace Input File')
parser.add_argument("-name",dest='wsName', help='Workspace Name')
parser.add_argument("-desc",dest='wsDescription', help='Workspace Description')
parser.add_argument("-lang",dest='wsLang', help='Workspace Language')
parser.add_argument("-actionmodule",dest='actionModule', help='Module for client action handling')
parser.add_argument("-filter",dest='filter', help='filter query')
parser.add_argument("-context",dest='context', help='context file')
parser.add_argument("-intents",dest='wsIntents', action='store_true', help='Update Intents')
parser.add_argument("-entities",dest='wsEntities', action='store_true', help='Update Entities')
parser.add_argument("-dialog_nodes",dest='wsDialogNodes', action='store_true', help='Update Dialog Nodes')
parser.add_argument("-counterexamples",dest='wsCounterexamples', action='store_true', help='Update Counterexamples')
parser.add_argument("-metadata",dest='wsMetadata', action='store_true', help='Update Metadata')
parser.add_argument("-config",dest='confFile', default='config.json', help='configuration file')
parser.add_argument("-append",dest='append', action='store_true', help='append to or replace workspace')
return parser
# List available dialogs
def listWorkspaces():
print(json.dumps(assistant.list_workspaces().get_result(), indent=2))
# Get and print a specific workspace by ID
def getPrintWorkspace(workspaceID,exportWS):
print
|
(json.dumps(assistant.get_workspace(workspace_id=workspaceID,export=exportWS).get_result(), indent=2))
# Get a specific workspace by ID and export to file
def getSaveWorkspace(workspaceID,outFile):
ws=assistant.get_workspace(workspace_id=workspaceID,export=True).get_result()
with open(outFile,'w') as jsonFile:
json.dump(ws, jsonFile, indent=2)
print ("Workspace saved to " + outFile)
# Update a workspace
|
# The workspace parts to be updated were specified as command line options
def updateWorkspace(workspaceID,
intents,
entities,
dialog_nodes,
counterexamples,
metadata,
newName=None,
newDescription=None,
newLang=None,
inFile=None,
append=False):
payload = {'intents': None,
'entities': None,
'dialog_nodes': None,
'counterexamples': None,
'metadata': None,
'append': False}
# Only read from file if specified
if (inFile is not None):
with open(inFile) as jsonFile:
ws=json.load(jsonFile)
# Read the sections to be updated
if intents:
payload['intents'] = ws['intents']
if entities:
payload['entities'] = ws['entities']
if dialog_nodes:
payload['dialog_nodes'] = ws['dialog_nodes']
if counterexamples:
payload['counterexamples'] = ws['counterexamples']
if metadata:
payload['metadata'] = ws['metadata']
# Now update the workspace
ws=assistant.update_workspace(workspace_id=workspaceID,
name=newName,
description=newDescription,
language=newLang,
intents=payload['intents'],
entities=payload['entities'],
dialog_nodes=payload['dialog_nodes'],
counterexamples=payload['counterexamples'],
metadata=payload['metadata'],
append=append).get_result()
print ("Workspace updated - new workspace")
print(json.dumps(ws, indent=2))
# Create a new workspace
def createWorkspace(newName, newDescription, newLang, inFile):
with open(inFile) as jsonFile:
ws=json.load(jsonFile)
newWorkspace=assistant.create_workspace(name=newName,
description=newDescription,
language=newLang,
intents=ws["intents"],
entities=ws["entities"],
dialog_nodes=ws["dialog_nodes"],
counterexamples=ws["counterexamples"],
metadata=ws['metadata']).get_result()
print(json.dumps(newWorkspace, indent=2))
# Delete a workspaceID
def deleteWorkspace(workspaceID):
assistant.delete_workspace(workspaceID)
print ("Workspace deleted")
# List logs for a specific workspace by ID
# For now just dump them, do not filter, do not store
def listLogs(workspaceID, filter):
print(json.dumps(assistant.list_logs(workspace_id=workspaceID,filter=filter).get_result(), indent=2))
# Start a dialog and converse with Watson
def converse(workspaceID, outputOnly=None, contextFile=None):
contextFile="session_context.json"
print ("Starting a conversation, stop by Ctrl+C or saying 'bye'")
print ("======================================================")
# Start with an empty context object
context={}
first=True
## Load conversation
|
slitvinov/lammps-sph-multiphase
|
python/examples/vizplotgui_gl.py
|
Python
|
gpl-2.0
| 4,373
| 0.02424
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# vizplotgui_gl.py
# Purpose: viz running LAMMPS simulation via GL tool with plot and GUI
# Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
g.show(ntimestep)
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
from dump import dump
from gl import gl
d = dump("tmp.dump",0)
g = gl(d)
d.next()
d.unscale()
g.zoom(1)
g.shift(0,0)
g.rotate(0,270)
g.q(10)
g.box(1)
g.show(ntimestep)
# display GUI with run/stop buttons and slider for temperature
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running
|
:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag an
|
d running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
pombredanne/coloradoes.py
|
src/coloradoes/coloradoes.py
|
Python
|
bsd-2-clause
| 3,539
| 0.001413
|
import struct
import time
from .types import t_string, t_list, t_set, t_zset, t_hash
from .errors import *
class Coloradoes(object):
STRUCT_KEY = '!ic'
STRUCT_KEY_VALUE = '!icd'
STRUCT_ID = '!i'
def __init__(self, storage=None):
if storage is None:
rai
|
se ValueError('A storage is required')
super(Coloradoes, self).__init__()
self.storage = storage
self.database = 0
def set_database(self, database):
self.database = database
def rename(self, source, target):
value = self.storage.get(source)
if value:
self.storage.set(target, value)
self.storage.delete(source)
def increment_by(self, key, increment):
id = int(self.sto
|
rage.get(key) or 0) + increment
self.storage.set(key, str(id))
return id
def get(self, key):
return self.storage.get(key)
def set(self, key, value):
return self.storage.set(key, value)
def exists(self, key):
return self.storage.exists(key)
def delete(self, key):
return self.storage.delete(key)
def get_id(self):
return self.increment_by(struct.pack(self.STRUCT_ID, self.database)
+ 'id', 1)
def set_key(self, key, type, expire=None):
self.command_del(key)
id = self.get_id()
k = struct.pack(self.STRUCT_KEY, self.database, 'K') + key
self.storage.set(k, struct.pack(self.STRUCT_KEY_VALUE, id, type,
expire or 0))
return id
def delete_key(self, key, id=None, type=None):
if id is None or type is None:
id, type = self.get_key(key, delete_expire=False)[:2]
self.storage.delete(struct.pack(self.STRUCT_KEY, self.database, 'K') +
key)
# Do any type cleanup here
# TODO: call type-specific clean up method
self.storage.delete(struct.pack('!ici', self.database, type, id))
def get_key(self, key, delete_expire=True):
data = self.storage.get(struct.pack(self.STRUCT_KEY, self.database,
'K') + key)
id, type, expire = None, None, None
if data is not None:
id, type, expire = struct.unpack(self.STRUCT_KEY_VALUE, data)
if expire == 0:
expire = None
if delete_expire is True and (expire is not None and
expire < time.time()):
id, type, expire = None, None, None
self.delete_key(key=key, id=id, type=type)
return id, type, expire
def command_select(self, database):
d = int(database)
if d >= 0 and d < 17:
self.set_database(d)
return True
else:
raise ValueError(INVALID_DB_INDEX)
def command_type(self, key):
type = self.get_key(key)[1]
if type is None:
return 'none'
if type == 'S':
return 'string'
if type == 'L':
return 'list'
raise Exception('Unknown type "%s"', type)
def __getattr__(self, attrName):
if attrName not in self.__dict__:
if attrName.startswith('command_'):
for t in (t_string, t_list, t_set, t_zset, t_hash):
if hasattr(t, attrName):
def func(*args, **kwargs):
return getattr(t, attrName)(self, *args, **kwargs)
return func
raise AttributeError()
return self.__dict__[attrName]
|
asteroidhouse/equibel
|
tests/completion_tests.py
|
Python
|
mit
| 2,605
| 0.002303
|
from sympy.logic.boolalg import *
import equibel as eb
def test_global_completion_cardinality():
G = eb.star_graph(3)
G.add_formula(1, 'p')
G.add_formula(2, 'p')
G.add_formula(3, '~p')
R_semantic = eb.global_completion(G, method=eb.SEMANTIC,
|
opt_type=eb.CARDINALITY, simplify=True)
assert(R_semantic.formula_conj(0) == eb.parse_formula('p'))
R_syntactic = eb.global_completion(G, method=eb.SYNTACTIC, opt_type=eb.CARDINALITY, simplify=True)
assert(R_syntactic == R_semantic)
def test_global_completion_two_nodes():
G = eb.path_graph(2)
G.add_f
|
ormula(0, 'p')
atoms = G.atoms()
R_semantic = eb.global_completion(G, method=eb.SEMANTIC, simplify=True)
print(R_semantic.formulas())
assert(R_semantic.formulas() == {0: atoms, 1: atoms})
R_syntactic = eb.global_completion(G, method=eb.SYNTACTIC, simplify=True)
assert(R_semantic == R_syntactic)
def test_global_completion_chain_1():
p = eb.parse_formula('p')
G = eb.path_graph(5)
G.add_formula(0, p)
G.add_formula(4, ~p)
R_semantic = eb.global_completion(G, method=eb.SEMANTIC, simplify=True)
print(R_semantic.formulas())
assert(R_semantic.formulas() == {0: set([p]), 1: set([]), 2: set([]), 3: set([]), 4: set([~p])})
R_syntactic = eb.global_completion(G, method=eb.SYNTACTIC, simplify=True)
assert(R_semantic == R_syntactic)
def test_global_completion_chain_2():
p = eb.parse_formula('p')
q = eb.parse_formula('q')
G = eb.path_graph(4)
G.add_formula(0, p & q)
G.add_formula(3, ~p)
R_semantic = eb.global_completion(G, simplify=True)
assert(R_semantic.formulas() == {0: set([p & q]), 1: set([q]), 2: set([q]), 3: set([~p & q])})
R_syntactic = eb.global_completion(G, method=eb.SYNTACTIC, simplify=True)
assert(R_semantic == R_syntactic)
def test_global_completion_big_chain():
p = eb.parse_formula('p')
q = eb.parse_formula('q')
G = eb.path_graph(10)
G.add_formula(0, 'p & q')
G.add_formula(9, '~p & ~q')
R_semantic = eb.global_completion(G, simplify=True)
for node in range(1,9):
assert(R_semantic.formulas(node) == set([]))
assert(R_semantic.formulas(0) == set([p & q]))
assert(R_semantic.formulas(9) == set([~p & ~q]))
R_syntactic = eb.global_completion(G, method=eb.SYNTACTIC, simplify=True)
assert(R_semantic == R_syntactic)
if __name__ == '__main__':
test_global_completion_cardinality()
test_global_completion_two_nodes()
test_global_completion_chain_1()
test_global_completion_chain_2()
test_global_completion_big_chain()
|
fieldOfView/OctoPrintPlugin
|
OctoPrintOutputController.py
|
Python
|
agpl-3.0
| 1,276
| 0.003135
|
# Copyright (c) 2020 Aldo Hoeben / fieldOfView
# OctoPrintPlugin is released under the terms of the AGPLv3 or higher.
from cura.PrinterOutp
|
ut.GenericOutputController import GenericOutputController
try:
# Cura 4.1 and newer
|
from cura.PrinterOutput.PrinterOutputDevice import PrinterOutputDevice, ConnectionState
from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel
except ImportError:
# Cura 3.5 - Cura 4.0
from cura.PrinterOutputDevice import PrinterOutputDevice, ConnectionState
from cura.PrinterOutput.PrinterOutputModel import PrinterOutputModel
class OctoPrintOutputController(GenericOutputController):
def __init__(self, output_device: "PrinterOutputDevice") -> None:
super().__init__(output_device)
def moveHead(self, printer: "PrinterOutputModel", x, y, z, speed) -> None:
axis_information = self._output_device.getAxisInformation()
if axis_information["x"].inverted:
x = -x
if axis_information["y"].inverted:
y = -y
if axis_information["z"].inverted:
z = -z
self._output_device.sendCommand("G91")
self._output_device.sendCommand("G0 X%s Y%s Z%s F%s" % (x, y, z, speed))
self._output_device.sendCommand("G90")
|
toolhub/toolhub.co
|
accounts/urls.py
|
Python
|
bsd-3-clause
| 1,267
| 0
|
from django.conf.urls import url, patterns, include
from accounts import views
user_tool_patterns = patterns(
"",
url(r"^lending/$", views.LendingManager.as_view(), name="lending"),
url(r"^manager/$", views.ToolManager.as_view(), name="manager"),
)
# namespaced under account:
urlpatterns = patterns(
"",
url(r"^$", views.SettingsView.as_view(), name="settings"),
url(r"^login/$", views.LoginView.as_view(), name="login"),
url(r"^logout/$", views.LogoutView.as_view(), name="logout"),
url(r"^register/$", views.SignupView.as_view(), name="signup"),
url(r"^user/(?P<username>[-_\w]+)/$",
views.UserDetailView.as_view(), name="user_detail"),
url(r"^confirm_email/(?P<key>\w+)/$", vie
|
ws.ConfirmEmailView.as_view(),
name="confirm_email"),
url(r"^password/$", views.ChangePasswordView.as_view(),
name="password"),
url(r"^password/reset/$", views.PasswordResetView.as_view(),
name="password_reset"),
url(r"^password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$",
views.PasswordResetTokenView.as_
|
view(),
name="password_reset_token"),
url(r"^delete/$", views.DeleteView.as_view(), name="delete"),
url(r"^tool/", include(user_tool_patterns, namespace="tool")),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.