repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
holmes-app/holmes-api | tests/unit/handlers/test_domains_violations_prefs.py | Python | mit | 10,248 | 0.000878 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from preggy import expect
from tornado.testing import gen_test
from tornado.httpclient import HTTPError
from ujson import loads, dumps
from tests.unit.base import ApiTestCase
from tests.fixtures import (
DomainFactory, DomainsViolationsPrefsFactory, KeyFactory, UserFactory
)
from holmes.models import (
DomainsViolationsPrefs, Key, KeysCategory, Domain, User
)
class TestDomainsViolationsPrefsHandler(ApiTestCase):
def tearDown(self):
self.db.rollback()
self.db.query(DomainsViolationsPrefs).delete()
self.db.query(Domain).delete()
self.db.query(Key).delete()
self.db.query(KeysCategory).delete()
self.db.query(User).delete()
self.db.commit()
self.server.application.redis.flushdb()
super(ApiTestCase, self).tearDown()
@gen_test
def test_can_get_prefs_for_invalid_domain(self):
try:
yield self.authenticated_fetch('/domains/blah.com/violations-prefs/')
except HTTPError, e:
expect(e).not_to_be_null()
expect(e.code).to_equal(404)
expect(e.response.reason).to_equal('Domain blah.com not found')
@gen_test
def test_cant_get_prefs_as_anonymous_user(self):
try:
yield self.anonymous_fetch('/domains/blah.com/violations-prefs/')
except HTTPError, e:
expect(e).not_to_be_null()
expect(e.code).to_equal(401)
expect(e.response.reason).to_equal('Unauthorized')
@gen_test
def test_can_get_prefs(self):
domain = DomainFactory.create(name='globo.com')
key1 = KeyFactory.create(name='some.random.1')
key2 = KeyFactory.create(name='some.random.2')
DomainsViolationsPrefsFactory.create(domain=domain, key=key1, value=100)
DomainsViolationsPrefsFactory.create(domain=domain, key=key2, value=2)
self.server.application.violation_definitions = {
'some.random.1': {
'category': 'SEO',
'default_value': 100,
'default_value_description': 'My some.random.1',
'unit': 'number'
},
'some.random.2': {
'category': 'HTTP',
'default_value': 2,
'default_value_description': 'My some.random.2',
'unit': 'number'
},
}
response = yield self.authenticated_fetch(
'/domains/%s/violations-prefs/' % domain.name
)
expect(response.code).to_equal(200)
prefs = loads(response.body)
expect(prefs).to_length(2)
expect(prefs[0]).to_length(6)
expect(prefs[1]).to_length(6)
expect(prefs).to_be_ | like([
{
'category': 'SEO',
'default_value': 100,
'title': 'My some.random.1',
'value': 100,
'key': 'some.random.1',
'unit': | 'number'
},{
'category': 'HTTP',
'default_value': 2,
'title': 'My some.random.2',
'value': 2,
'key': 'some.random.2',
'unit': 'number'
}
])
@gen_test
def test_can_get_prefs_with_invalid_violation_definition(self):
domain = DomainFactory.create(name='globo.com')
key = KeyFactory.create(name='some.random.1')
DomainsViolationsPrefsFactory.create(domain=domain, key=key)
self.server.application.violation_definitions = {}
response = yield self.authenticated_fetch(
'/domains/%s/violations-prefs/' % domain.name
)
expect(response.code).to_equal(200)
expect(loads(response.body)).to_length(0)
@gen_test
def test_can_save_prefs_as_superuser(self):
self.db.query(User).delete()
user = UserFactory(email='superuser@user.com', is_superuser=True)
domain = DomainFactory.create(name='globo.com')
key = KeyFactory.create(name='some.random')
DomainsViolationsPrefsFactory.create(domain=domain, key=key, value=100)
loaded_prefs = DomainsViolationsPrefs.get_domains_violations_prefs_by_domain(self.db, domain.name)
expect(loaded_prefs).to_length(1)
expect(loaded_prefs[0]).to_be_like({
'value': 100,
'key': 'some.random'
})
yield self.authenticated_fetch(
'/domains/%s/violations-prefs/' % domain.name,
user_email=user.email,
method='POST',
body=dumps([
{'key': 'some.random', 'value': 10},
])
)
loaded_prefs = DomainsViolationsPrefs.get_domains_violations_prefs_by_domain(self.db, domain.name)
expect(loaded_prefs).to_length(1)
expect(loaded_prefs[0]).to_be_like({
'value': 10,
'key': 'some.random'
})
@gen_test
def test_cant_save_prefs_as_normal_user(self):
self.db.query(User).delete()
user = UserFactory(email='normalser@user.com', is_superuser=False)
domain = DomainFactory.create(name='globo.com')
key = KeyFactory.create(name='some.random')
DomainsViolationsPrefsFactory.create(domain=domain, key=key, value=100)
loaded_prefs = DomainsViolationsPrefs.get_domains_violations_prefs_by_domain(self.db, domain.name)
expect(loaded_prefs).to_length(1)
expect(loaded_prefs[0]).to_be_like({
'value': 100,
'key': 'some.random'
})
try:
yield self.authenticated_fetch(
'/domains/%s/violations-prefs/' % domain.name,
user_email=user.email,
method='POST',
body=dumps([
{'key': 'some.random', 'value': 10},
])
)
except HTTPError, e:
expect(e).not_to_be_null()
expect(e.code).to_equal(401)
expect(e.response.reason).to_be_like('Unauthorized')
else:
assert False, 'Should not have got this far'
loaded_prefs = DomainsViolationsPrefs.get_domains_violations_prefs_by_domain(self.db, domain.name)
expect(loaded_prefs).to_length(1)
expect(loaded_prefs[0]).to_be_like({
'value': 100,
'key': 'some.random'
})
@gen_test
def test_cant_save_prefs_as_anonymous_user(self):
domain = DomainFactory.create(name='globo.com')
key = KeyFactory.create(name='some.random')
DomainsViolationsPrefsFactory.create(domain=domain, key=key, value=100)
loaded_prefs = DomainsViolationsPrefs.get_domains_violations_prefs_by_domain(self.db, domain.name)
expect(loaded_prefs).to_length(1)
expect(loaded_prefs[0]).to_be_like({
'value': 100,
'key': 'some.random'
})
try:
yield self.anonymous_fetch(
'/domains/%s/violations-prefs/' % domain.name,
method='POST',
body=dumps([
{'key': 'some.random', 'value': 10},
])
)
except HTTPError, e:
expect(e).not_to_be_null()
expect(e.code).to_equal(401)
expect(e.response.reason).to_be_like('Unauthorized')
else:
assert False, 'Should not have got this far'
loaded_prefs = DomainsViolationsPrefs.get_domains_violations_prefs_by_domain(self.db, domain.name)
expect(loaded_prefs).to_length(1)
expect(loaded_prefs[0]).to_be_like({
'value': 100,
'key': 'some.random'
})
@gen_test
def test_can_save_prefs_for_invalid_domain_as_superuser(self):
self.db.query(User).delete()
user = UserFactory(email='superuser@user.com', is_superuser=True)
try:
yield self.authenticated_fetch(
'/domains/blah.com/violations-prefs/',
method='POST',
user_email=user.email,
body=dumps([
{'key': 'some.random', 'value': 10},
]) |
WikipediaLibrary/TWLight | TWLight/users/management/commands/user_update_eligibility.py | Python | mit | 5,266 | 0.003228 | from datetime import timedelta
import logging
from django.utils.timezone import now
from django.core.management.base import BaseCommand
from TWLight.users.models import Editor
from TWLight.users.helpers.editor_data import (
editor_global_userinfo,
editor_valid,
editor_enough_edits,
editor_not_blocked,
editor_bundle_eligible,
editor_account_old_enough,
)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Updates editor info and Bundle eligibility for currently-eligible Editors."
def add_arguments(self, parser):
"""
Adds command arguments.
"""
parser.add_argument(
"--datetime",
action="store",
help="ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.",
)
parser.add_argument(
"--global_userinfo",
action="store",
help="Specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--timedelta_days",
action="store",
help="Number of days used to define 'recent' edits. Defaults to 30. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--wp_username",
action="store",
help="Specify a single editor to update. Other arguments and filters still apply.",
)
def handle(self, *args, **options):
"""
Updates editor info and Bundle eligibility for currently-eligible Editors.
Parameters
----------
args
options
Returns
-------
None
"""
# Default behavior is to use current datetime for timestamps to check all editors.
now_or_datetime = now()
datetime_override = None
timedelta_days = 0
wp_username = None
editors = Editor.objects.all()
# This may be overridden so that values may be treated as if they were valid for an arbitrary datetime.
# This is also passed to the model method.
if options["datetime"]:
datetime_override = now_or_datetime.fromisoformat(options["datetime"])
now_or_datetime = datetime_override
# These are used to limit the set of editors updated by the command.
# Nothing is passed to the model method.
if options["timedelta_days"]:
timedelta_days = int(options["timedelta_days"])
# Get editors that haven't been updated in the specified time range, with an option to limit on wp_u | sername.
if timedelta_days:
editors = editors.exclude(
editorlogs__timestamp__gt=now_or_datetime
- timedelta(days=timedelta_days),
)
# Optional wp_username filter.
if options["wp | _username"]:
editors = editors.filter(wp_username=str(options["wp_username"]))
# Iterator reduces memory footprint for large querysets
for editor in editors.iterator():
# T296853: avoid stale editor data while looping through big sets.
editor.refresh_from_db()
# `global_userinfo` data may be overridden.
if options["global_userinfo"]:
global_userinfo = options["global_userinfo"]
editor.check_sub(global_userinfo["id"])
# Default behavior is to fetch live `global_userinfo`
else:
global_userinfo = editor_global_userinfo(editor.wp_sub)
if global_userinfo:
editor.update_editcount(global_userinfo["editcount"], datetime_override)
# Determine editor validity.
editor.wp_enough_edits = editor_enough_edits(editor.wp_editcount)
editor.wp_not_blocked = editor_not_blocked(global_userinfo["merged"])
# We will only check if the account is old enough if the value is False
# Accounts that are already old enough will never cease to be old enough
if not editor.wp_account_old_enough:
editor.wp_account_old_enough = editor_account_old_enough(
editor.wp_registered
)
editor.wp_valid = editor_valid(
editor.wp_enough_edits,
editor.wp_account_old_enough,
# editor.wp_not_blocked can only be rechecked on login, so we're going with the existing value.
editor.wp_not_blocked,
editor.ignore_wp_blocks,
)
# Determine Bundle eligibility.
editor.wp_bundle_eligible = editor_bundle_eligible(editor)
# Save editor.
editor.save()
# Prune EditorLogs, with daily_prune_range set to only check the previous day to improve performance.
editor.prune_editcount(
current_datetime=datetime_override, daily_prune_range=2
)
# Update bundle authorizations.
editor.update_bundle_authorization()
|
blaze/dask | dask/blockwise.py | Python | bsd-3-clause | 56,476 | 0.001169 | from __future__ import annotations
import itertools
import os
from collections.abc import Hashable, Iterable, Mapping, Sequence
from itertools import product
from typing import Any
import tlz as toolz
from .base import clone_key, get_name_from_key, tokenize
from .compatibility import prod
from .core import flatten, keys_in_tasks, reverse_dict
from .delayed import unpack_collections
from .highlevelgraph import HighLevelGraph, Layer
from .optimization import SubgraphCallable, fuse
from .utils import (
_deprecated,
apply,
ensure_dict,
homogeneous_deepmap,
stringify,
stringify_collection_keys,
)
class BlockwiseDep:
"""Blockwise-IO argument
This is the base class for indexable Blockwise-IO arguments.
When constructing a ``Blockwise`` Layer, one or more of the
collection tuples passed in with ``indices`` may contain a
``BlockwiseDep`` instance (in place of a "real" collection name).
This allows a new collection to be created (via IO) within a
``Blockwise`` layer.
All ``BlockwiseDep`` instances must define a ``numblocks``
attribute to speficy the number of blocks/partitions the
object can support along each dimension. The object should
also define a ``produces_tasks`` attribute to specify if
any nested tasks will be passed to the Blockwise function.
See Also
--------
dask.blockwise.Blockwise
dask.blockwise.BlockwiseDepDict
"""
numblocks: tuple[int, ...]
produces_tasks: bool
def __getitem__(self, idx: tuple[int, ...]) -> Any:
"""Return Blockwise-function arguments for a specific index"""
raise NotImplementedError(
"Must define `__getitem__` for `BlockwiseDep` subclass."
)
def get(self, idx: tuple[int, ...], default) -> Any:
"""BlockwiseDep ``__getitem__`` Wrapper"""
try:
return self.__getitem__(idx)
except KeyError:
return default
def __dask_distributed_pack__(
self, required_indices: list[tuple[int, ...]] | None = None
):
"""Client-side serialization for ``BlockwiseDep`` objects.
Should return a ``state`` dictionary, with msgpack-serializable
values, that can be used to initialize a new ``BlockwiseDep`` object
on a scheduler process.
"""
raise NotImplementedError(
"Must define `__dask_distributed_pack__` for `BlockwiseDep` subclass."
)
@classmethod
def __dask_distributed_unpack__(cls, state):
"""Scheduler-side deserialization for ``BlockwiseDep`` objects.
Should use an input ``state`` dictionary to initialize a new
``BlockwiseDep`` object.
"""
raise NotImplementedError(
"Must define `__dask_distributed_unpack__` for `BlockwiseDep` subclass."
)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.numblocks}>"
class BlockwiseDepDict(BlockwiseDep):
"""Dictionary-based Blockwise-IO argument
This is a dictionary-backed instance of ``BlockwiseDep``.
The purpose of this class is to simplify the construction
of IO-based Blockwise Layers with block/partition-dependent
function arguments that are difficult to calculate at
graph-materialization time.
Examples
--------
Specify an IO-based function for the Blockwise Layer. Note
that the function will be passed a single input object when
the task is executed (e.g. a single ``tuple`` or ``dict``):
>>> import pandas as pd
>>> func = lambda x: pd.read_csv(**x)
Use ``BlockwiseDepDict`` to define the input argument to
``func`` for each block/partition:
>>> dep = BlockwiseDepDict(
... mapping={
... (0,) : {
... "filepath_or_buffer": "data.csv",
... "skiprows": 1,
... "nrows": 2,
... "names": ["a", "b"],
... },
... (1,) : {
... "filepath_or_buffer": "data.csv",
... "skiprows": 3,
... "nrows": 2,
... "names": ["a", "b"],
... },
... }
... )
Construct a Blockwise Layer with ``dep`` speficied
in the ``indices`` list:
>>> layer = Blockwise(
... output="collection-name",
... output_indices="i",
... dsk={"collection-name": (func, '_0')},
... indices=[(dep, "i")],
... numblocks={},
... )
See Also
--------
dask.blockwise.Blockwise
dask.blockwise.BlockwiseDep
"""
def __init__(
self,
mapping: dict,
numblocks: tuple[int, ...] | None = None,
produces_tasks: bool = False,
):
self.mapping = mapping
self.produces_tasks = produces_tasks
# By default, assume 1D shape
self.numblocks = numblocks or (len(mapping),)
def __getitem__(self, idx: tuple[int, ...]) -> Any:
return self.mapping[idx]
def __dask_distributed_pack__(
self, required_indices: list[tuple[int, ...]] | None = None
):
from distributed.protocol import to_serialize
if required_indices is None:
required_indices = self.mapping.keys()
return {
"mapping": {k: to_serialize(self.mapping[k]) for k in required_indices},
"numblocks": self.numblocks,
"produces_tasks": self.produces_tasks,
}
@classmethod
def __dask_distributed_unpack__(cls, state):
return cls(**state)
class BlockIndex(BlockwiseDep):
"""Index BlockwiseDep argument
The purpose of this class is to provide each
block of a ``Blockwise``-based operation with
the current block index.
"""
produces_tasks: bool = False
def __init__(self, numblocks: tuple[int, ...]):
# NOTE: Unused - Just needs to be set to
# follow the `BlockwiseDep` interface
self.numblocks = numblocks
def __getitem__(self, idx: tuple[int, ...]) -> tuple[int, ...]:
return idx
def __dask_distributed_pack__(self, **kwargs):
return {"numblocks": self.numblocks}
@classmethod
def __dask_distributed_unpack__(cls, state):
return cls(**state)
def subs(task, substitution):
"""Create a new task with the values substituted
This is like dask.core.subs, but takes a dict of many substitutions to
perform simultaneously. It is not as concerned with micro performance.
"""
if isinstance(task, dict):
return {k: subs(v, substitution) for k, v in task.items()}
if type(task) in (tuple, list, set):
return type(task)([subs(x, substitution) for x in task])
try:
return substitution[task]
except (KeyError, TypeError):
return task
def index_subs(ind, substitution):
"""A simple subs function that works both on tuples and strings"""
if ind is None:
return ind
else:
return tuple(substitution.get(c, c) for c in ind)
_BLOCKWISE_DEFAULT_PREFIX = "__dask_blockwise__"
def blockwise_token(i, prefix=_BLOCKWISE_DEFAULT_PREFIX):
return prefix + "%d" % i
def blockwise(
func,
output,
output_indices,
*arrind_pairs,
numblocks=None,
concatenate=None,
new_axes=None,
dependencies=(),
**kwargs,
):
"""Create a Blockwise symbolic mutable mapping
This is like the ``make_blockwise_graph`` function, but rather than construct a
dict, it returns a symbolic B | lockwise object.
``*arrind_pairs`` is similar to those in `make_blockwise_graph`, but in addition to
allowing for collections it can accept BlockwiseDep instances, which allows for lazy
evaluation of arguments to ``func`` which might be different for different
chunks/paritions.
See Also
--------
make_blockwise_graph
Blockwise
"""
new_axes = new_axes or {}
arrind_pairs = list(arrind_pairs)
# Transform indices to canon | ical elements
# We use terms like _0, and _1 rather than provided index elements
unique_indices = {
i for ii in arrind_pairs[1::2] if ii is not None for i in ii
} | set(output_indices)
sub = { |
Inspq/ansible | lib/ansible/modules/windows/win_robocopy.py | Python | gpl-3.0 | 4,912 | 0.002036 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Corwin Brown <blakfeld@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_robocopy
version_added: "2.2"
short_description: Synchronizes the contents of two directories using Robocopy.
description:
- Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available on most modern Windows | Systems.
options:
src:
description:
- Source file/directory to sync.
required: true
dest:
description:
- Destination file/directory to sync (Will receive contents of src).
required: true
recurse:
description:
- Includes all subdirectories (Toggles the `/e` flag to RoboCopy). If "flags" is set, this will be ignored.
choices:
- true
- false
default: false
required: false
purge:
description:
- Deletes any files/directories fou | nd in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is set, this will be ignored.
choices:
- true
- false
default: false
required: false
flags:
description:
- Directly supply Robocopy flags. If set, purge and recurse will be ignored.
default: None
required: false
author: Corwin Brown (@blakfeld)
notes:
- This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine, not from the master to the remote machine.
- This module does not currently support all Robocopy flags.
- Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12
'''
EXAMPLES = r'''
- name: Sync the contents of one directory to another
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
- name: Sync the contents of one directory to another, including subdirectories
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
recurse: True
- name: Sync the contents of one directory to another, and remove any files/directories found in destination that do not exist in the source
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
purge: True
- name: Sync content in recursive mode, removing any files/directories found in destination that do not exist in the source
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
recurse: True
purge: True
- name: Sync Two Directories in recursive and purging mode, specifying additional special flags
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
flags: /E /PURGE /XD SOME_DIR /XF SOME_FILE /MT:32
'''
RETURN = r'''
src:
description: The Source file/directory of the sync.
returned: always
type: string
sample: c:\Some\Path
dest:
description: The Destination file/directory of the sync.
returned: always
type: string
sample: c:\Some\Path
recurse:
description: Whether or not the recurse flag was toggled.
returned: always
type: bool
sample: False
purge:
description: Whether or not the purge flag was toggled.
returned: always
type: bool
sample: False
flags:
description: Any flags passed in by the user.
returned: always
type: string
sample: "/e /purge"
rc:
description: The return code retuned by robocopy.
returned: success
type: int
sample: 1
output:
description: The output of running the robocopy command.
returned: success
type: string
sample: "-------------------------------------------------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n-------------------------------------------------------------------------------\n"
msg:
description: Output intrepreted into a concise message.
returned: always
type: string
sample: No files copied!
changed:
description: Whether or not any changes were made.
returned: always
type: bool
sample: False
'''
|
mehtadev17/mapusaurus | mapusaurus/respondents/managers.py | Python | cc0-1.0 | 254 | 0.003937 | from d | jango.db import models
class AgencyManager(models.Manager):
def get_all_by_code(self):
agencies = self.all()
agency_map = {}
for agency in agencies:
agency_map[agency.pk] = agency
return agency_ | map
|
xuru/pyvisdk | pyvisdk/mo/virtual_disk_manager.py | Python | mit | 16,908 | 0.011356 |
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class VirtualDiskManager(BaseEntity):
'''This managed object type provides a way to manage and manipulate virtual disks
on datastores. The source and the destination names are in the form of a URL or
a datastore path.A URL has the formwhere* is or . * spe | cifies the hostname or
IP address of the VirtualCenter or ESX server and optionally the port. * is the
inventory path to the Datacenter containing the Datastore. * i | s the name of the
Datastore. * is a slash-delimited path from the root of the datastore.A
datastore path has the formwhere* is the datastore name. * is a slash-delimited
path from the root of the datastore.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.VirtualDiskManager):
super(VirtualDiskManager, self).__init__(core, name=name, ref=ref, type=type)
def CopyVirtualDisk_Task(self, sourceName, destName, sourceDatacenter=None, destDatacenter=None, destSpec=None, force=None):
'''Copy a virtual disk, performing conversions as specified in the spec.Copy a
virtual disk, performing conversions as specified in the spec.Copy a virtual
disk, performing conversions as specified in the spec.Copy a virtual disk,
performing conversions as specified in the spec.
:param sourceName: The name of the source, either a datastore path or a URL referring to the virtual disk to be copied.
:param sourceDatacenter: Ifis a datastore path, the datacenter for that datastore path. Not needed when invoked directly on ESX. If not specified on a call to VirtualCenter,must be a URL.
:param destName: The name of the destination, either a datastore path or a URL referring to the virtual disk to be created.
:param destDatacenter: Ifis a datastore path, the datacenter for that datastore path. Not needed when invoked directly on ESX. If not specified on a call to VirtualCenter, it is assumed that the destination path belongs to the source datacenter.
:param destSpec: The specification of the virtual disk to be created. If not specified, a preallocated format and busLogic adapter type is assumed.
:param force: The force flag is currently ignored. The FileAlreadyExists fault is thrown if the destination file already exists.
'''
return self.delegate("CopyVirtualDisk_Task")(sourceName, sourceDatacenter, destName, destDatacenter, destSpec, force)
def CreateVirtualDisk_Task(self, name, spec, datacenter=None):
'''Create a virtual disk.Create a virtual disk.Create a virtual disk.
:param name: The name of the disk, either a datastore path or a URL referring to the virtual disk to be created.
:param datacenter: Ifis a datastore path, the datacenter for that datastore path. Not needed when invoked directly on ESX. If not specified on a call to VirtualCenter,must be a URL.
:param spec: The specification of the virtual disk to be created.
'''
return self.delegate("CreateVirtualDisk_Task")(name, datacenter, spec)
def DefragmentVirtualDisk_Task(self, name, datacenter=None):
'''Defragment a sparse virtual disk. This is defragmentation of the virtual disk
file(s) in the host operating system, not defragmentation of the guest
operating system filesystem inside the virtual disk.Defragment a sparse virtual
disk. This is defragmentation of the virtual disk file(s) in the host operating
system, not defragmentation of the guest operating system filesystem inside the
virtual disk.Defragment a sparse virtual disk. This is defragmentation of the
virtual disk file(s) in the host operating system, not defragmentation of the
guest operating system filesystem inside the virtual disk.
:param name: The name of the disk, either a datastore path or a URL referring to the virtual disk that should be defragmented.
:param datacenter: Ifis a datastore path, the datacenter for that datastore path. Not needed when invoked directly on ESX. If not specified on a call to VirtualCenter,must be a URL.
'''
return self.delegate("DefragmentVirtualDisk_Task")(name, datacenter)
def DeleteVirtualDisk_Task(self, name, datacenter=None):
'''Delete a virtual disk. All files relating to the disk will be deleted.Delete a
virtual disk. All files relating to the disk will be deleted.Delete a virtual
disk. All files relating to the disk will be deleted.
:param name: The name of the disk, either a datastore path or a URL referring to the virtual disk to be deleted.
:param datacenter: Ifis a datastore path, the datacenter for that datastore path. Not needed when invoked directly on ESX. If not specified on a call to VirtualCenter,must be a URL.
'''
return self.delegate("DeleteVirtualDisk_Task")(name, datacenter)
def EagerZeroVirtualDisk_Task(self, name, datacenter=None):
'''Explicitly zero out unaccessed parts zeroedthick disk. Effectively a no-op if
the disk is already eagerZeroedThick. Unlike zeroFillVirtualDisk, which wipes
the entire disk, this operation only affects previously unaccessed parts of the
disk.Explicitly zero out unaccessed parts zeroedthick disk. Effectively a no-op
if the disk is already eagerZeroedThick. Unlike zeroFillVirtualDisk, which
wipes the entire disk, this operation only affects previously unaccessed parts
of the disk.Explicitly zero out unaccessed parts zeroedthick disk. Effectively
a no-op if the disk is already eagerZeroedThick. Unlike zeroFillVirtualDisk,
which wipes the entire disk, this operation only affects previously unaccessed
parts of the disk.
:param name: The name of the disk, either a datastore path or a URL referring to the virtual disk that should be inflated.
:param datacenter: Ifis a datastore path, the datacenter for that datastore path. Not needed when invoked directly on ESX. If not specified on a call to VirtualCenter,must be a URL.
'''
return self.delegate("EagerZeroVirtualDisk_Task")(name, datacenter)
def ExtendVirtualDisk_Task(self, name, newCapacityKb, datacenter=None, eagerZero=None):
'''Expand the capacity of a virtual disk to the new capacity. If the eagerZero
flag is not specified, - the extended disk region of a zerothick disk will be
zeroedthick - the extended disk region of a eagerzerothick disk will be
eagerzeroedthick - a thin-provisioned disk will always be extended as a thin-
provisioned disk. If the eagerZero flag TRUE, the extended region of the disk
will always be eagerly zeroed. If the eagerZero flag FALSE, the extended region
of a zeroedthick or eagerzeroedthick the disk will not be eagerly zeroed. This
condition has no effect on a thin source disk.Expand the capacity of a virtual
disk to the new capacity. If the eagerZero flag is not specified, - the
extended disk region of a zerothick disk will be zeroedthick - the extended
disk region of a eagerzerothick disk will be eagerzeroedthick - a thin-
provisioned disk will always be extended as a thin-provisioned disk. If the
eagerZero flag TRUE, the extended region of the disk will always be eagerly
zeroed. If the eagerZero flag FALSE, the extended region of a zeroedthick or
eagerzeroedthick the disk will not be eagerly zeroed. This condition has no
effect on a thin source disk.Expand the capacity of a virtual disk to the new
capacity. If the eagerZero flag is not spe |
mwhudson/qemu | scripts/vmstate-static-checker.py | Python | gpl-2.0 | 12,817 | 0.001482 | #!/usr/bin/python
#
# Compares vmstate information stored in JSON format, obtained from
# the -dump-vmstate QEMU command.
#
# Copyright 2014 | Amit Shah <amit.shah@redhat.com>
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILIT | Y or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
# Count the number of errors found
taint = 0
def bump_taint():
global taint
# Ensure we don't wrap around or reset to 0 -- the shell only has
# an 8-bit return value.
if taint < 255:
taint = taint + 1
def check_fields_match(name, s_field, d_field):
if s_field == d_field:
return True
# Some fields changed names between qemu versions. This list
# is used to whitelist such changes in each section / description.
changed_names = {
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
'I440FX': ['dev', 'parent_obj'],
'ich9_ahci': ['card', 'parent_obj'],
'ioh-3240-express-root-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
'pcnet': ['pci_dev', 'parent_obj'],
'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]'],
'rtl8139': ['dev', 'parent_obj'],
'qxl': ['num_surfaces', 'ssd.num_surfaces'],
'usb-host': ['dev', 'parent_obj'],
'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'xhci': ['pci_dev', 'parent_obj'],
'xio3130-express-downstream-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
'br.dev.exp.aer_log',
'parent_obj.parent_obj.exp.aer_log'],
}
if not name in changed_names:
return False
if s_field in changed_names[name] and d_field in changed_names[name]:
return True
return False
def get_changed_sec_name(sec):
# Section names can change -- see commit 292b1634 for an example.
changes = {
"ICH9 LPC": "ICH9-LPC",
}
for item in changes:
if item == sec:
return changes[item]
if changes[item] == sec:
return item
return ""
def exists_in_substruct(fields, item):
# Some QEMU versions moved a few fields inside a substruct. This
# kept the on-wire format the same. This function checks if
# something got shifted inside a substruct. For example, the
# change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
if not "Description" in fields:
return False
if not "Fields" in fields["Description"]:
return False
substruct_fields = fields["Description"]["Fields"]
if substruct_fields == []:
return False
return check_fields_match(fields["Description"]["name"],
substruct_fields[0]["field"], item)
def check_fields(src_fields, dest_fields, desc, sec):
# This function checks for all the fields in a section. If some
# fields got embedded into a substruct, this function will also
# attempt to check inside the substruct.
d_iter = iter(dest_fields)
s_iter = iter(src_fields)
# Using these lists as stacks to store previous value of s_iter
# and d_iter, so that when time comes to exit out of a substruct,
# we can go back one level up and continue from where we left off.
s_iter_list = []
d_iter_list = []
advance_src = True
advance_dest = True
while True:
if advance_src:
try:
s_item = s_iter.next()
except StopIteration:
if s_iter_list == []:
break
s_iter = s_iter_list.pop()
continue
else:
# We want to avoid advancing just once -- when entering a
# dest substruct, or when exiting one.
advance_src = True
if advance_dest:
try:
d_item = d_iter.next()
except StopIteration:
if d_iter_list == []:
# We were not in a substruct
print "Section \"" + sec + "\",",
print "Description " + "\"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "while dest has no further fields"
bump_taint()
break
d_iter = d_iter_list.pop()
advance_src = False
continue
else:
advance_dest = True
if not check_fields_match(desc, s_item["field"], d_item["field"]):
# Some fields were put in substructs, keeping the
# on-wire format the same, but breaking static tools
# like this one.
# First, check if dest has a new substruct.
if exists_in_substruct(d_item, s_item["field"]):
# listiterators don't have a prev() function, so we
# have to store our current location, descend into the
# substruct, and ensure we come out as if nothing
# happened when the substruct is over.
#
# Essentially we're opening the substructs that got
# added which didn't change the wire format.
d_iter_list.append(d_iter)
substruct_fields = d_item["Description"]["Fields"]
d_iter = iter(substruct_fields)
advance_src = False
continue
# Next, check if src has substruct that dest removed
# (can happen in backward migration: 2.0 -> 1.5)
if exists_in_substruct(s_item, d_item["field"]):
s_iter_list.append(s_iter)
substruct_fields = s_item["Description"]["Fields"]
s_iter = iter(substruct_fields)
advance_dest = False
continue
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "got \"" + d_item["field"] + "\"; skipping rest"
bump_taint()
break
check_version(s_item, d_item, sec, desc)
if not "Description" in s_item:
# Check size of this field only if it's not a VMSTRUCT entry
check_size(s_item, d_item, sec, desc, s_item["field"])
check_description_in_list(s_item, d_item, sec, desc)
def check_subsections(src_sub, dest_sub, desc, sec):
for s_item in src_sub:
found = False
for d_item in dest_sub:
if s_item["name"] != d_item["name"]:
continue
found = True
check_descriptions(s_item, d_item, sec)
if not found:
print "Section \"" + sec + "\", Description \"" + desc + "\":",
print "Sub |
nvoron23/scipy | scipy/sparse/tests/test_base.py | Python | bsd-3-clause | 151,696 | 0.011286 | #
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices. Each class in the "Matrix class
based tests" section become subclasses of the classes in the "Generic
tests" section. This is done by the functions in the "Tailored base
class for generic tests" section.
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_base.py
"""
import warnings
import operator
import contextlib
import numpy as np
from scipy._lib.six import xrange, zip as izip
from numpy import (arange, zeros, array, dot, matrix, asmatrix, asarray,
vstack, ndarray, transpose, diag, kron, inf, conjugate,
int8, ComplexWarning, power)
import random
from numpy.testing import (assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_almost_equal, assert_,
dec, run_module_suite, assert_allclose)
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix,
coo_matrix, lil_matrix, dia_matrix, bsr_matrix,
eye, isspmatrix, SparseEfficiencyWarning, issparse)
from scipy.sparse.sputils import supported_dtypes, isscalarlike, get_index_dtype
from scipy.sparse.linalg import splu, expm, inv
from scipy._lib._version import NumpyVersion
from scipy._lib.decorator import decorator
import nose
# Check for __numpy_ufunc__
class _UFuncCheck(object):
def __array__(self):
return np.array([1])
def __numpy_ufunc__(self, *a, **kwargs):
global HAS_NUMPY_UFUNC
HAS_NUMPY_UFUNC = True
HAS_NUMPY_UFUNC = False
np.add(_UFuncCheck(), np.array([1]))
warnings.simplefilter('ignore', SparseEfficiencyWarning)
warnings.simplefilter('ignore', ComplexWarning)
def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None,
downcast_maxval=None, assert_32bit=False):
"""
Monkeypatch the maxval threshold at which scipy.sparse switches to
64-bit index arrays, or make it (pseudo-)random.
"""
if maxval_limit is None:
maxval_limit = 10
if assert_32bit:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
tp = get_index_dtype(arrays, maxval, check_contents)
assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max)
assert_(tp == np.int32 or tp == np.intc)
return tp
elif fixed_dtype is not None:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return fixed_dtype
elif random:
counter = np.random.RandomState(seed=1234)
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return (np.int32, np.int64)[counter.randint(2)]
else:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
dtype = np.int32
if maxval is not None:
if maxval > maxval_limit:
dtype = np.int64
for arr in arrays:
arr = np.asarray(arr)
if arr.dtype > np.int32:
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= -maxval_limit and maxval <= maxval_limit:
# a bigger type not needed
continue
dtype = np.int64
return dtype
if downcast_maxval is not None:
def new_downcast_intp_index(arr):
if arr.max() > downcast_maxval:
raise AssertionError("downcast limited")
return arr.astype(np.intp)
@decorator
def deco(func, *a, **kw):
backup = []
modules = [scipy.sparse.bsr, scipy.sparse.coo, scipy.sparse.csc,
scipy.sparse.csr, scipy.sparse.dia, scipy.sparse.dok,
scipy.sparse.lil, scipy.sparse.sputils,
scipy.sparse.compressed, scipy.sparse.construct]
try:
for mod in modules:
backup.append((mod, 'get_index_dtype',
getattr(mod, 'get_index_dtype', None)))
setattr(mod, 'get_index_dtype', new_get_index_dtype)
if downcast_maxval is not None:
backup.append((mod, 'downcast_intp_index',
getattr(mod, 'downcast_intp_index', None)))
setattr(mod, 'downcast_intp_index', new_downcast_intp_index)
return func(*a, **kw)
finally:
for mod, name, oldfunc in backup:
if oldfunc is not None:
setattr(mod, name, oldfunc)
return deco
def todense(a):
if isinstance(a, np.ndarray) or isscalarlike(a):
return a
return a.todense()
class BinopTester(object):
# Custom type to test binary operations on sparse matrices.
def __add__(self, mat):
return "matrix on the right"
def __mul__(self, mat):
return "matrix on the right"
def __sub__(self, mat):
return "matrix on the right"
def __radd__(self, mat):
return "matrix on the left"
def __rmul__(self, mat):
return "matrix on the left"
def __rsub__(self, mat):
return "matrix on the left"
#------------------------------------------------------------------------------
# Generic tests
#------------------------------------------------------------------------------
# TODO check that spmatrix( ... , copy=X ) is respected
# TODO test prune
# TODO test has_sorted_indices
class _TestCommon:
"""test common functionality shared by all sparse formats"""
checked_dtypes = supported_dtypes
def __init__(self):
# Canonical data.
self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
self.datsp = self.spmatrix(self.dat)
# Some sparse and dense matrices with data for every supported
# dtype.
self.dat_dtypes = {}
self.datsp_dtypes = {}
for dtype in self.checked_dtypes:
self.dat_dtypes[dtype] = self.dat.astype(dtype)
self.datsp_dtypes[dtype] = self.spmatrix(self.dat.astype(dtype))
# Check that the original data is equivalent to the
# corresponding dat_dtypes & datsp_dtypes.
assert_equal(self.dat, self.dat_dtypes[np.float64])
assert_equal(self.datsp.todense(),
self.datsp_dtypes[np.float64].todense())
def test_bool(self):
def check(dtype):
datsp = self.datsp_dtypes[dtype]
assert_raises(ValueError, bool, datsp)
assert_(self.spmatrix([1]))
assert_(not self.spmatrix([0]))
for dtype in self.checked_dtypes:
fails = isinstance(self, TestDOK)
msg = "Cannot create a rank <= 2 DOK matrix."
yield dec.skipif(fails, msg)(check), dtype
def test_bool_rollover(self):
# bool's underlying dtype is 1 byte, check that it does not
# rollover True -> False at 256.
dat = np.matrix([[True, False]])
datsp = self.spmatrix(dat)
for _ in range(10):
datsp = datsp + datsp
dat = dat + dat
assert_array_equal(dat, datsp.todense())
def test_eq(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
| datsp = self.da | tsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsr = csr_matrix(dat)
datcsc = csc_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat |
tsdmgz/ansible | test/runner/lib/cloud/azure.py | Python | gpl-3.0 | 6,070 | 0.002965 | """Azure plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.util import (
ApplicationError,
display,
is_shippable,
)
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.http import (
HttpClient,
urlparse,
u | rlunparse,
parse_qs,
)
from lib.core_ci import (
AnsibleCoreCI,
)
class AzureCloudProvider(CloudProvider):
"""Azure cloud provider plugin. Sets up cloud resources before delegation."""
SHERLOCK_CONFIG_PATH = os.path.expanduser('~/.a | nsible-sherlock-ci.cfg')
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
return
if is_shippable():
return
super(AzureCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AzureCloudProvider, self).setup()
if not self._use_static_config():
self._setup_dynamic()
get_config(self.config_path) # check required variables
def _setup_dynamic(self):
"""Request Azure credentials through Sherlock."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
response = {}
if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
with open(self.SHERLOCK_CONFIG_PATH, 'r') as sherlock_fd:
sherlock_uri = sherlock_fd.readline().strip() + '&rgcount=2'
parts = urlparse(sherlock_uri)
query_string = parse_qs(parts.query)
base_uri = urlunparse(parts[:4] + ('', ''))
if 'code' not in query_string:
example_uri = 'https://example.azurewebsites.net/api/sandbox-provisioning'
raise ApplicationError('The Sherlock URI must include the API key in the query string. Example: %s?code=xxx' % example_uri)
display.info('Initializing azure/sherlock from: %s' % base_uri, verbosity=1)
http = HttpClient(self.args)
result = http.get(sherlock_uri)
display.info('Started azure/sherlock from: %s' % base_uri, verbosity=1)
if not self.args.explain:
response = result.json()
else:
aci = self._create_ansible_core_ci()
aci_result = aci.start()
if not self.args.explain:
response = aci_result['azure']
if not self.args.explain:
values = dict(
AZURE_CLIENT_ID=response['clientId'],
AZURE_SECRET=response['clientSecret'],
AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
AZURE_TENANT=response['tenantId'],
RESOURCE_GROUP=response['resourceGroupNames'][0],
RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
)
config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'azure', 'sherlock', persist=False, stage=self.args.remote_stage)
class AzureCloudEnvironment(CloudEnvironment):
"""Azure cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
config = get_config(self.config_path)
cmd.append('-e')
cmd.append('resource_prefix=%s' % self.resource_prefix)
cmd.append('-e')
cmd.append('resource_group=%s' % config['RESOURCE_GROUP'])
cmd.append('-e')
cmd.append('resource_group_secondary=%s' % config['RESOURCE_GROUP_SECONDARY'])
for key in config:
env[key] = config[key]
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
@property
def inventory_hosts(self):
"""
:rtype: str | None
"""
return 'azure'
def get_config(config_path):
"""
:param config_path: str
:return: dict[str, str]
"""
with open(config_path, 'r') as config_fd:
lines = [line for line in config_fd.read().splitlines() if ':' in line and line.strip() and not line.strip().startswith('#')]
config = dict((kvp[0].strip(), kvp[1].strip()) for kvp in [line.split(':', 1) for line in lines])
rg_vars = (
'RESOURCE_GROUP',
'RESOURCE_GROUP_SECONDARY',
)
sp_vars = (
'AZURE_CLIENT_ID',
'AZURE_SECRET',
'AZURE_SUBSCRIPTION_ID',
'AZURE_TENANT',
)
ad_vars = (
'AZURE_AD_USER',
'AZURE_PASSWORD',
'AZURE_SUBSCRIPTION_ID',
)
rg_ok = all(var in config for var in rg_vars)
sp_ok = all(var in config for var in sp_vars)
ad_ok = all(var in config for var in ad_vars)
if not rg_ok:
raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
if not sp_ok and not ad_ok:
raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
return config
|
jaggu303619/asylum | openerp/addons/sale_margin/sale_margin.py | Python | agpl-3.0 | 4,272 | 0.006086 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not pricelist:
return res
if context is None:
context = {}
frm_cur = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
| to_cur = self.pool.get('product.pricelist').browse(cr, uid, [pricelist])[0].currency_id.id
if product:
product = self.pool['product.product'].browse(cr, uid, product, context=context)
purchase_price = product.standard_price
to_uom = res.get('product_uom', uom)
if to_uom != product.uom_id.id:
purchase_price = self.pool['product.uom']._compute_price(cr, uid, product.uom_id.id, purchase_price, to_uom)
| ctx = context.copy()
ctx['date'] = date_order
price = self.pool.get('res.currency').compute(cr, uid, frm_cur, to_cur, purchase_price, round=False, context=ctx)
res['value'].update({'purchase_price': price})
return res
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0
if line.product_id:
res[line.id] = round(line.price_subtotal - ((line.purchase_price or line.product_id.standard_price) * line.product_uos_qty), 2)
return res
_columns = {
'margin': fields.function(_product_margin, string='Margin',
store = True),
'purchase_price': fields.float('Cost Price', digits=(16,2))
}
sale_order_line()
class sale_order(osv.osv):
_inherit = "sale.order"
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for sale in self.browse(cr, uid, ids, context=context):
result[sale.id] = 0.0
for line in sale.order_line:
result[sale.id] += line.margin or 0.0
return result
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'margin': fields.function(_product_margin, string='Margin', help="It gives profitability by calculating the difference between the Unit Price and the cost price.", store={
'sale.order.line': (_get_order, ['margin'], 20),
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 20),
}),
}
sale_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
VHAINNOVATIONS/GE-Pressure-Ulcer | python_gui_decision_support_webportal/python/webapp/mmpspupc/models/assessment_reconstruction.py | Python | apache-2.0 | 2,000 | 0.0145 | from sqlalchemy import Column, Integer, String, DateTime, Numeric
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class AssessmentReconstruction(Base):
"""
Definition of AssessmentReconstruction object. It will be used by SQLAlchemy's ORM to map the object to
the system_assessment_experiment_recon table.
Methods:
setFromData(data) - sets all data fields
"""
__tablename__ = 'system_assessment_experiment_measure'
id = Column('id',Integer, primary_key=True)
session_id = Column('session_id',Integer)
experiment_id = Column('experiment_id',Integer)
start_time = Column('start_time',DateTime)
reconstruction_data_directory = Column('reconstruction_data_di | rectory',String)
keyCol = 'id'
editCols = ['start_time','reconstruction_data_directory' ]
editColsLabels = ['Start Time','Reconstruction Data Directory']
editColsTypes = ['date','string']
displayTableName = 'System Assessment Experiment Reconstruction'
def setFromData(self,data):
"""
Sets all of the object fields
Arguments:
data - Dictionary containing the data
"""
| # self.id = data['id']
self.session_id = data['session_id']
self.experiment_id = data['experiment_id']
self.start_time = data['start_time']
self.reconstruction_data_directory = data['reconstruction_data_directory']
def __json__(self, request):
return {'id':self.id, 'session_id':self.session_id, 'experiment_id':self.experiment_id, 'start_time':self.start_time.isoformat(' '),
'reconstruction_data_directory':self.reconstruction_data_directory }
def __repr__(self):
return "<AssessmentReconstruction(id='%d', session_id='%d', experiment_id='%d', start_time='%s', reconstruction_data_directory='%d')>" % (
self.id, self.session_id, self.experiment_id, self.start_time, self.reconstruction_data_directory)
|
dna2github/dna2oldmemory | dna2poem/tiny/matchbox/graph.py | Python | mit | 8,836 | 0.000226 | class GraphNode(object):
def __init__(self, name=None):
self.reset()
self.name = name
self.data = None
def __str__(self):
if self._strcache is None:
self._strcache = "<GraphNode %s>" % str(self.name)
return self._strcache
def __repr__(self):
return self.__str__()
def reset(self):
self.type = 0
self.index = {}
self.hole = []
self.nodes = []
self._strcache = None
return self
def fill(self, data):
self.data = data
return self
def contains(self, node):
return id(node) in self.index
def get_one(self, name):
for node in self.nodes:
if node.name == name: return node
return None
def get_all(self, name):
result = []
for node in self.nodes:
if node.name == name: result.append(node)
return result
def get_floor(self, filter=None):
result = [self]
if filter is None:
filter = self._filter
visit = {}
cursor = 0
while cursor < len(result):
node = result[cursor]
visit[id(node)] = -1
for one in filter(node.nodes):
if one is None:
continue
if id(one) in visit:
continue
visit[id(one)] = -1
if one.type != self.type:
continue
result.append(one)
cursor += 1
return result
def get_wall(self, filter=None):
result = []
floor = [self]
if filter is None:
filter = self._filter
visit = {}
cursor = 0
while cursor < len(floor):
node = floor[cursor]
visit[id(node)] = -1
for one in filter(node.nodes):
if one is None:
continue
if id(one) in visit:
continue
visit[id(one)] = -1
if one.type != self.type:
result.append(one)
continue
floor.append(one)
cursor += 1
return result
def shortest_link_path(self, target, filter=None):
if filter is None:
filter = self._filter
visit = {}
tree = [-1]
discovered = [self]
cursor = 0
reach = False
while cursor < len(discovered) and not reach:
node = discovered[cursor]
visit[id(node)] = -1
for one in filter(node.nodes):
if one is None:
continue
if id(one) in visit:
continue
visit[id(one)] = -1
discovered.append(one)
tree.append(cursor)
if one == target:
reach = True
break
cursor += 1
if reach:
result = []
cursor = tree.pop()
result.append(target)
while cursor >= 0:
result.append(discovered[cursor])
cursor = tree[cursor]
result.reverse()
return result
else:
return None
def strip(self):
if len(self.hole) == 0:
return self
link_strip = []
self.index = {}
self.hole = []
n = 0
for one in link:
if one is None:
continue
i = id(one)
self.index[i] = n
link_strip.append(one)
n += 1
self.nodes = link_strip
return self
def link(self, node):
if self.contains(node):
return self
i = -1
if len(self.hole) > 0:
i = self.hole.pop()
self.nodes[i] = node
else:
i = len(self.nodes)
self.nodes.append(node)
self.index[id(node)] = i
return self
def unlink(self, node):
if not self.contains(node):
return self
i = self.index[id(node)]
self.nodes[i] = None
self.hole.append(i)
del self.index[id(node)]
return self
def dual_link(self, node):
self.link(node)
node.link(self)
def dual_unlink(self, node):
self.unlink(node)
node.unlink(self)
def _filter(self, nodes):
return nodes
def tranverse_bfs(self, callback=None, filter=None):
if filter is None:
filter = self._filter
visit = {}
queue = [self]
next_level = []
while len(queue) > 0:
for node in queue:
if node is None:
continue
visit[id(node)] = -1
for one in filter(node.nodes):
if one is None:
continue
if id(one) in visit:
continue
next_level.append(one)
visit[id(one)] = -1
if callback is not None:
# search can be interrupted by callback
if callback(node) is not None:
return self
queue = next_level
next_level = []
return self
def tranverse_dfs(self, callback=None, filter=None):
if filter is None:
filter = self._filter
visit = {}
queue = [self]
next_level = []
visit[id(self)] = 0
while len(queue) > 0:
node = queue.pop()
if node is None:
continue
visit[id(node)] = -1
n = len(queue)
for one in filter(node.nodes):
if one is None:
continue
if id(one) in visit:
i = visit[id(one)]
if i < 0 or i >= | n:
| continue
queue[i] = None
visit[id(one)] = n + len(next_level)
next_level.append(one)
if callback is not None:
if callback(node) is not None:
return self
next_level.reverse()
m = len(next_level)
for i, one in enumerate(next_level):
k = id(one)
visit[k] = (n + m) - (visit[k] - n) - 1
queue = queue + next_level
next_level = []
return self
def debug_data_encode(self, node, data):
if data is None:
return ""
return data
def debug_data_decode(self, node, data):
if len(data) == 0:
return None
return data
"""
line := id type data | id links
data := <b64encode>data
links := id id id ...
e.g
0 0 aJwov2wP03nJ==
1 0 ovq9A0fwT3aqf1
0 1
1 0
"""
def debug_load(self, filename, data_decode=None):
import base64
self.reset()
if data_decode is None:
data_decode = self.debug_data_decode
id_node_map = {}
f = open(filename, "r")
for line in f:
line = line[:-1]
if len(line) == 0:
break
line = line.split(' ')
node_id = int(line[0])
node = GraphNode()
node.type = int(line[1])
node.data = data_decode(node, base64.b64decode(line[2]))
self.link(node)
id_node_map[node_id] = node
for line in f:
line = line.split(' ')
node_id = int(line[0])
node = id_node_map[node_id]
line = line[1:]
for one_id in line:
node.link(id_node_map[int(one_id)])
f.close()
return self
def debug_save(self, filename, data_encode=None):
import base64
if data_encode is None:
data_encode = self.debug_data_encode
f = open(filename, "w+")
for node in self.nodes:
f.write("%d %d %s\n" % (
self.index[id(node)],
node.type,
|
soltys/ZUT_Algorytmy_Eksploracji_Danych | DataVisualization/app.py | Python | mit | 849 | 0.001181 | # -*- coding: utf-8 -*-
__author__ = 'Paweł Sołtysiak'
import pandas as pd
import scipy.io.arff as arff
from sklearn import cross_validation
from sklearn.decomposition import PCA
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
waveformData, waveformMeta = arff.loadarff(u'../Datasets/waveform-5000.arff')
df = pd.DataFrame(waveformData)
desc = df.values[:, -1]
df = df.drop('class', axis=1)
pca = PCA()
Y = pca.fit_transform(df.values)
for d in np.unique(desc):
plt.plot(Y[d == | desc, 0], Y[d == desc, 1], '.')
voteData, voteMeta = arff.loadarff(u'../Datasets/vote.arff')
df = pd.DataFrame(voteData)
desc = df.values[:, -1]
print df.replace('y', T | rue)
df = df.drop('Class', axis=1)
pca = PCA()
Y = pca.fit_transform(df.values)
for d in np.unique(desc):
plt.plot(Y[d == desc, 0], Y[d == desc, 1], '.')
plt.show() |
RI-imaging/nrefocus | nrefocus/pad.py | Python | bsd-3-clause | 5,111 | 0 | """Convenience functions for padding
.. versionadded:: 0.1.4
"""
from __future__ import division, print_function
import numpy as np
def _get_pad_left_right(small, large):
"""Compute left and right padding values.
Here we use the convention that if the padding
size is odd, we pad the odd part to the right
and the even part to the left.
Parameters
----------
small: int
Old size of original 1D array
large: int
New size off padded 1D array
Returns
-------
(padleft, padright) : tuple
The proposed padding sizes.
"""
assert small < large, "Can only pad when new size larger than old size"
padsize = large - small
if padsize % 2 != 0:
leftpad = (padsize - 1)/2
else:
leftpad = padsize/2
rightpad = padsize-leftpad
return int(leftpad), int(rightpad)
def pad_add(av, size=None, stlen=10):
""" Perform linear padding for complex array
The input array `av` is padded with a linear ramp starting at the
edges and going outwards to an average value computed from a band
of thickness `stlen` at the outer boundary of the array.
Pads will only be appended, not prepended to the array.
If the input array is complex, pads will be complex numbers
The average is computed for phase and amplitude separately.
Parameters
----------
av: complex 1D or 2D ndarray
The array that will be padded.
size: int or tuple of length 1 (1D) or tuple of length 2 (2D), optional
The final size of the padded array. Defaults to double the size
of the input array.
stlen: int, optional
The thickness of the frame within `av` that will be used to
compute an average value for padding.
Returns
-------
pv: complex 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in av.shape:
size.append(int(2*s))
elif not hasattr(size, "__len__"):
size = [size]
assert len(av.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(av.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(av.shape) == 2:
return _pad_add_2d(av, size, stlen)
else:
return _pad_add_1d(av, size, stlen)
def _pad_add_1d(av, size, stlen):
"""1D component of `pad_add`"""
assert len(size) == 1
padx = _get_pad_left_right(av.shape[0], size[0])
mask = np.zeros(av.shape, dtype=bool)
mask[stlen:-stlen] = True
border = av[~mask]
if av.dtype.name.count("complex"):
padval = np.average(np.abs(border)) * \
np.exp(1j*np.average(np.angle(border)))
else:
padval = np.average(border)
if np.__version__[:3] in ["1.7", "1.8", "1.9"]:
end_values = ((padval, padval),)
else:
end_values = (padval,)
bv = np.pad(av,
padx,
mode="linear_ramp",
end_values=end_values)
# roll the array so that the padding values are on the right
bv = np.roll(bv, -padx[0], 0)
return bv
def _pad_add_2d(av, size, stlen):
"""2D component of `pad_add`"""
assert len(size) == 2
padx = _get_pad_left_right(av.shape[0], size[0])
pady = _get_pad_left_right(av.shape[1], size[1])
mask = np.zeros(av.shape, dtype=bool)
mask[stlen:-stlen, stlen:-stlen] = True
border = av[~mask]
if av.dtype.name.count("complex"):
padval = np.average(np.abs(border)) * \
np.exp(1j*np.average(np.angle(border)))
else:
padval = np.average(border)
if np.__version__[:3] in ["1.7", "1.8", "1.9"]:
end_values = ((padval, padval), (padval, padval))
else:
end_values = (padval,)
bv = np.pad(av,
(padx, pady),
mode="linear_ramp",
end_values=end_values)
# roll the array so that the padding values are on the right
bv = np.roll(bv, -padx[0], 0)
bv = np.roll(bv, -pady[0], 1)
return bv
def pad_rem(pv, size=None):
"""Removes linear padding from array
This is a convenience function that does the opposite
of `pad_add`.
Parameters
----------
pv: 1D or 2D ndarray
The array from which the padding will be removed.
size: tuple of length 1 (1D) or 2 (2D), optional
The final size of the un-padded array. Defaults to half the size
of the input array.
Returns
-------
pv: 1D or 2D ndarray
Padded array `av` with pads appended to right and bottom.
"""
if size is None:
size = list()
for s in pv.shape:
assert s % 2 == 0, "Uneven size; specify correct size of output!"
size.append(int(s/2))
elif not hasattr(size, "__len__"):
size = [size]
assert | len(pv.shape) in [1, 2], "Only 1D and 2D arrays!"
assert len(pv.shape) == len(
size), "`size` must have same length as `av.shape`!"
if len(pv.shape) == 2:
return pv | [:size[0], :size[1]]
else:
return pv[:size[0]]
|
batxes/4Cin | Six_mouse_models/Six_mouse_models_final_output_0.2_-0.1_11000/mtx1_models/Six_mouse_models23848.py | Python | gpl-3.0 | 18,211 | 0.025534 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from V | olumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_se | ts["particle_0 geometry"]
mark=s.place_marker((2327.21, 8000.43, 3949.43), (0, 1, 0), 846)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((30.13, 8392.82, 5179.71), (0.7, 0.7, 0.7), 846)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1236.62, 9256.15, 3994.64), (0.7, 0.7, 0.7), 846)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((1535.3, 8437.78, 3715.34), (0.7, 0.7, 0.7), 846)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((1977.23, 7780.18, 4299.89), (0.7, 0.7, 0.7), 846)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2357.44, 7881.54, 2889.31), (0.7, 0.7, 0.7), 846)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((904.3, 7748.43, 1202.39), (0.7, 0.7, 0.7), 846)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2935.82, 8376.8, 2235.93), (0.7, 0.7, 0.7), 846)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2923.13, 8244.65, 2244.5), (0.7, 0.7, 0.7), 846)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((3779.51, 7266.91, 1362.58), (0.7, 0.7, 0.7), 846)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((4331.45, 9161.63, 1792.85), (0, 1, 0), 846)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4248.62, 8929.86, 3910.1), (0.7, 0.7, 0.7), 846)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4180.83, 9416.15, 2004.35), (0.7, 0.7, 0.7), 846)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((3988.39, 7624.93, 2298.49), (0.7, 0.7, 0.7), 846)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((4353.05, 8760.49, 2752.26), (0.7, 0.7, 0.7), 846)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5257.49, 9790.71, 4181.4), (0.7, 0.7, 0.7), 846)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((6048.3, 8710.39, 2360.54), (0.7, 0.7, 0.7), 846)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5688.88, 8452, 2565.11), (0.7, 0.7, 0.7), 846)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4772.33, 8096.7, 3983.86), (0.7, 0.7, 0.7), 846)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4188.55, 8967.71, 4582.94), (0.7, 0.7, 0.7), 846)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5224.09, 8050.82, 3545.35), (0, 1, 0), 846)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((4071.74, 7001.37, 3057.63), (0.7, 0.7, 0.7), 846)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4132.93, 8038.56, 5118.85), (0.7, 0.7, 0.7), 846)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4528.61, 7754.74, 4628.77), (0.7, 0.7, 0.7), 846)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((4732.71, 5953.37, 3211.39), (0.7, 0.7, 0.7), 846)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((3972.09, 6412.05, 5047.04), (0.7, 0.7, 0.7), 846)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((3866.26, 7992.54, 6057.38), (0.7, 0.7, 0.7), 846)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4399.87, 7511.86, 5059.01), (0.7, 0.7, 0.7), 846)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5452.15, 7396.25, 3603.32), (0.7, 0.7, 0.7), 846)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((4469.52, 7888.35, 4739.06), (0.7, 0.7, 0.7), 846)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5410.27, 6837.33, 4933.26), (0, 1, 0), 846)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4751.67, 6077.67, 6184.12), (0.7, 0.7, 0.7), 846)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((4836.04, 6311.37, 5793.7), (0.7, 0.7, 0.7), 846)
if "particle_33 geometry" not in m |
cedadev/cis | cis/data_io/products/HadGEM.py | Python | lgpl-3.0 | 5,253 | 0.003998 | from cis.data_io.products import NetCDF_Gridded
import logging
class HadGEM_CONVSH(NetCDF_Gridded):
"""
HadGEM plugin for reading NetCDF files converted by CONVSH. It implements a callback to pass to iris when
reading multiple files to allow correct merging
"""
def get_file_signature(self):
return [r'[a-z]{6}[\._][pamd]{2}[0-9]{4,6}.*\.nc']
@staticmethod
def load_multiple_files_callback(cube, field, filename):
from iris.util import squeeze
# We need to remove the history field when reading multiple files so that the cubes can be properly merged
cube.attributes.pop('history')
# cube.coord(name_or_coord='Hybrid height').attributes['formula_terms'] = 'a: lev b: b orog: orog'
# We also need to remove the length one time dimension so that the cube can be merged correctly (iris preserves
# the value as a scalar which then gets converted back into a full coordinate again on merge).
return squeeze(cube)
def _create_cube(self, filenames, variable):
"""Creates a cube for the specified variable.
:param filenames: List of filenames to read coordinates from
:param variable: Optional variable to read while we're reading the coordinates, can be a string or a
VariableConstraint object
:return: If variable was specified this will return an UngriddedData object, otherwise a CoordList
"""
import six
from cis.exceptions import InvalidVariableError
from cis.data_io.products.gridded_NetCDF import DisplayConstraint
from cis.data_io.gridded_data import load_cube
from iris.exceptions import CoordinateNotFoundError
# Check if the files given actually exist.
for filename in filenames:
with open(filename) as f:
pass
variable_constraint = variable
if isinstance(variable, six.string_types):
# noinspection PyPep8
variable_constraint = DisplayConstraint(cube_func=(lambda c: c.var_name == variable or
c.standard_name == variable or
c.long_name == variable), display=variable)
if len(filenames) == 1:
callback_function = self.load_single_file_callback
else:
callback_function = self.load_multiple_files_callback
try:
cube = load_cube(filenames, variable_constraint, callback=callback_function)
except ValueError as e:
if variable is None:
message = "File contains more than one cube variable name must be specified"
elif e.args[0] == "No cubes found":
message = "Variable not found: {} \nTo see a list of variables run: cis info {}" \
.format(str(variable), filenames[0])
else:
message = e.args[0]
raise InvalidVariableError(message)
try:
hybrid_ht = cube.coord(name_or_coord='Hybrid height')
hybrid_ht.attributes['formula'] = 'z(k,j,i) = a(k) + b(k)*orog(j,i)'
hybrid_ht.convert_units('m')
except CoordinateNotFoundError as e:
pass
try:
cube.coord(long_name='t').standard_name = 'time'
except CoordinateNotFoundError as e:
pass
self._add_available_aux_coords(cube, filenames)
return cube
def get_variable_names(self, filenames, data_type=None):
# Don't do any checks on valid variables at the moment as iris can't parse the hybrid height dimension units...
import iris
from cis.utils import single_warnings_only
# Filter the warnings so that they only appear once - otherwise you get lots of repeated warnings
with single_warnings_only():
cubes = iris.load(filenames)
return set(cube.name() for cube in cubes)
class HadGEM_PP(NetCDF_Gridded):
"""
HadGEM plugin for reading native pp files
"""
def get_file_signature(self):
return [r'.*\.pp']
@staticmethod
def load_multiple_files_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
| # for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
try:
cube.var_name = var_name
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
@staticmethod
def loa | d_single_file_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
try:
cube.var_name = var_name
except ValueError as e:
try:
cube.var_name = var_name.replace(' ', '_')
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
|
AFD-Illinois/grim | src/geometry/test_geometry.py | Python | gpl-3.0 | 31,846 | 0.016517 | import mpi4py, petsc4py
from petsc4py import PETSc
import numpy as np
import pytest
import gridPy
import geometryPy
petsc4py.init()
petscComm = petsc4py.PETSc.COMM_WORLD
comm = petscComm.tompi4py()
rank = comm.Get_rank()
numProcs = comm.Get_size()
PETSc.Sys.Print("Using %d procs" % numProcs)
N1 = int(pytest.config.getoption('N1'))
N2 = int(pytest.config.getoption('N2'))
N3 = int(pytest.config.getoption('N3'))
dim = int(pytest.config.getoption('dim'))
# Geometry parameters
blackHoleSpin = float(pytest.config.getoption('blackHoleSpin'))
hSlope = float(pytest.config.getoption('hSlope'))
numGhost = 3
X1Start = 0.; X1End = 1.
X2Start = 0.; X2End = 1.
X3Start = 0.; X3End = 1.
periodicBoundariesX1 = False
periodicBoundariesX2 = False
periodicBoundariesX3 = False
XCoords = gridPy.coordinatesGridPy(N1, N2, N3,
dim, numGhost,
X1Start, X1End,
X2Start, X2End,
X3Start, X3End
)
X1Coords, X2Coords, X3Coords = XCoords.getCoords(gridPy.CENTER)
geomMinkowski = geometryPy.geometryPy(geometryPy.MINKOWSKI,
0., 0.,
XCoords
)
def test_minkowski_params():
np.testing.assert_equal(N1, geomMinkowski.N1)
np.testing.assert_equal(N2, geomMinkowski.N2)
np.testing.assert_equal(N3, geomMinkowski.N3)
np.testing.assert_equal(dim, geomMinkowski.dim)
np.testing.assert_equal(numGhost, geomMinkowski.numGhost)
def test_minkowski_gCov():
np.testing.assert_allclose(geomMinkowski.gCov[0][0], -1.)
np.testing.assert_allclose(geomMinkowski.gCov[0][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[0][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[0][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[1][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[1][1], 1.)
np.testing.assert_allclose(geomMinkowski.gCov[1][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[1][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[2][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[2][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[2][2], 1.)
np.testing.assert_allclose(geomMinkowski.gCov[2][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCov[3][3], 1.)
def test_minkowski_gCon():
np.testing.assert_allclose(geomMinkowski.gCon[0][0], -1.)
np.testing.assert_allclose(geomMinkowski.gCon[0][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[0][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[0][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[1][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[1][1], 1.)
np.testing.assert_allclose(geomMinkowski.gCon[1][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[1][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[2][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[2][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[2][2], 1.)
np.testing.assert_allclose(geomMinkowski.gCon[2][3], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][0], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][1], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][2], 0.)
np.testing.assert_allclose(geomMinkowski.gCon[3][3], 1.)
def test_minkowski_g():
np.testing.assert_allclose(geomMinkowski.g, 1.)
def test_minkowski_alpha():
np.testing.assert_allclose(geomMinkowski.g, 1.)
geomKerrSchild = geometryPy.geometryPy(geometryPy.MODIFIED_KERR_SCHILD,
blackHoleSpin, hSlope,
XCoords
)
# From McKinney and Gammie, 2004
# Check if the coordinate transformations have been done correctly
r = np.exp(X1Coords)
theta = np.pi*X2Coords + 0.5*(1. - hSlope)*np.sin(2.*np.pi*X2Coords)
phi = 2*np.pi*X3Coords
sigma = r**2. + (blackHoleSpin*np.cos(theta) )**2.
delta = r**2. - 2*r + blackHoleSpin**2.
A = (r**2. + blackHoleSpin**2.)**2.
sigmaMinus = r**2. - (blackHoleSpin*np.cos(theta) )**2.
# Coordinate transformation for log spacing in r and concentrating zones in the
# mid plane
dr_dX1 = np.exp(X1Coords)
dtheta_dX2 = np.pi*(1. + (1. - hSlope)*np.cos(2.*np.pi*X2Coords))
d2theta_dX22 = -2.*np.pi*np.pi*(1-hSlope)*np.sin(2.*np.pi*X2Coords);
N1Total = XCoords.N1Total
N2Total = XCoords.N2Total
N3Total = XCoords.N3Total
gCovCheck = np.zeros([4, 4, N3Total, N2Total, N1Total])
gConCheck = np.zeros([4, 4, N3Total, N2Total, N1Total])
gCheck = np.zeros([N3Total, N2Total, N1Total])
gCovCheck[0][0] = -(1. - 2*r/sigma) # dt^2
gCovCheck[0][1] = (2*r/sigma) * dr_dX1 # dt dX1
gCovCheck[0][2] = 0. # dt dX2
gCovCheck[0][3] = -(2.*blackHoleSpin*r*np.sin(theta)**2./sigma) # dt dphi
gCovCheck[1][0] = gCovCheck[0][1]
gCovCheck[1][1] = (1. + 2*r/sigma) * dr_dX1**2. # dX1 dX1
gCovCheck[1][2] = 0.
gCovCheck[1][3] = -blackHoleSpin * (1. + 2*r/sigma)*np.sin(theta)**2. \
| * dr_dX1 # dX1 dphi
gCovCheck[2][0] = gCovCheck[0][2]
gCovCheck[2][1] = gCovCheck[1][2]
gCovCheck[2][2] = sigma * dtheta_dX2 * d | theta_dX2 # dX2 dX2
gCovCheck[2][3] = 0. # dX2 dphi
gCovCheck[3][0] = gCovCheck[0][3]
gCovCheck[3][1] = gCovCheck[1][3]
gCovCheck[3][2] = gCovCheck[2][3]
gCovCheck[3][3] = np.sin(theta)**2. \
* (sigma + blackHoleSpin**2. \
* (1. + 2.*r/sigma)*np.sin(theta)**2. \
) # dphi dphi
gCovPerZone = np.zeros([4, 4])
for k in xrange(N3Total):
for j in xrange(N2Total):
for i in xrange(N1Total):
gCovPerZone[0, 0] = gCovCheck[0][0][k, j, i]
gCovPerZone[0, 1] = gCovCheck[0][1][k, j, i]
gCovPerZone[0, 2] = gCovCheck[0][2][k, j, i]
gCovPerZone[0, 3] = gCovCheck[0][3][k, j, i]
gCovPerZone[1, 0] = gCovCheck[1][0][k, j, i]
gCovPerZone[1, 1] = gCovCheck[1][1][k, j, i]
gCovPerZone[1, 2] = gCovCheck[1][2][k, j, i]
gCovPerZone[1, 3] = gCovCheck[1][3][k, j, i]
gCovPerZone[2, 0] = gCovCheck[2][0][k, j, i]
gCovPerZone[2, 1] = gCovCheck[2][1][k, j, i]
gCovPerZone[2, 2] = gCovCheck[2][2][k, j, i]
gCovPerZone[2, 3] = gCovCheck[2][3][k, j, i]
gCovPerZone[3, 0] = gCovCheck[3][0][k, j, i]
gCovPerZone[3, 1] = gCovCheck[3][1][k, j, i]
gCovPerZone[3, 2] = gCovCheck[3][2][k, j, i]
gCovPerZone[3, 3] = gCovCheck[3][3][k, j, i]
gConPerZone = np.linalg.inv(gCovPerZone)
gCheck[k, j, i] = np.sqrt(-np.linalg.det(gCovPerZone))
gConCheck[0][0][k, j, i] = gConPerZone[0, 0]
gConCheck[0][1][k, j, i] = gConPerZone[0, 1]
gConCheck[0][2][k, j, i] = gConPerZone[0, 2]
gConCheck[0][3][k, j, i] = gConPerZone[0, 3]
gConCheck[1][0][k, j, i] = gConPerZone[1, 0]
gConCheck[1][1][k, j, i] = gConPerZone[1, 1]
gConCheck[1][2][k, j, i] = gConPerZone[1, 2]
gConCheck[1][3][k, j, i] = gConPerZone[1, 3]
gConCheck[2][0][k, j, i] = gConPerZone[2, 0]
gConCheck[2][1][k, j, i] = gConPerZone[2, 1]
gConCheck[2][2][k, j, i] = gConPerZone[2, 2]
gConCheck[2][3][k, j, i] = gConPerZone[2, 3]
gConCheck[3][0][k, j, i] = gConPerZone[3, 0]
gConCheck[3][1][k, j, i] = gConPerZone[3, 1]
gConCheck[3][2][k, j, i] = gConPerZone[3, 2]
gConCheck[3][3][k, j, i] = gConPerZone[3, 3]
alphaCheck = 1./np.sqrt(-gConCheck[0][0])
geomKerrSchild.computeConnectionCoeffs()
gammaUpDownDownCheck = np.zeros([4, 4, 4, N3Total, N2Total, N1Total])
gammaUpDownDownCheck[0][0][0] = 2.*r*sigmaMinus / sigma**3.
gammaUpDownDownCheck[0][0][1] = r * (2*r + sigma) * sigmaMinus / sigma**3.
gammaUpDownDownCheck[0][0][2] = -blackHoleSpin**2. * r * np.sin(2.*theta) \
* dtheta_dX2 / sigma**2.
gammaUpDownDownCheck[0][0][3] = -2. * blackHoleSpin * r * np.sin(theta)**2. \
|
buck06191/BayesCMD | batch/abcsbh/__init__.py | Python | gpl-2.0 | 72 | 0 | # ABCS | BH -- a drastically hacked-down version of the ABC-SY | SBIO package
|
zhangzhonglai/heron | integration-test/src/python/integration_test/core/constants.py | Python | apache-2.0 | 1,130 | 0 | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''constants.py: constants for integration test for pyheron'''
INTEGRATION_TEST_MOCK_MESSAGE_ID = "__integration_test_mock_message_id"
INTEGRATION_TEST_TERMINAL = "__integration_test_mock_terminal"
INTEGRATION_TEST_CONTROL_STREAM_ID = "__integration_test_control_stream_id"
# internal config key
MAX_EXECUTIONS = 10
HTTP_POST_URL_KEY = "http.post.url"
# | user defined config key
USER_SPOUT_CLASSPATH = "user.spout.classpath"
USER_BOLT_CLASSPATH = "user.bolt.classpath"
# user defined max executions
USE | R_MAX_EXECUTIONS = "user.max.exec"
|
OpenPhilology/nidaba | nidaba/plugins/leptonica.py | Python | gpl-2.0 | 8,246 | 0.000121 | # -*- coding: utf-8 -*-
"""
nidaba.plugins.leptonica
~~~~~~~~~~~~~~~~~~~~~~~~
Plugin accessing `leptonica <http://leptonica.com>`_ functions.
This plugin requires a liblept shared object in the current library search
path. On Debian-based systems it can be installed using apt-get
.. code-block:: console
# apt-get install libleptonica-dev
Leptonica's APIs are rather unstable and may differ significantly between
versions. If this plugin fails with weird error messages or workers are just
dying without discernable cause please submit a bug report including your
leptonica version.
"""
from __future__ import unicode_literals, print_function, absolute_import
import ctypes
from nidaba import storage
from nidaba.celery import app
from nidaba.tasks.helper import NidabaTask
from nidaba.nidabaexceptions import (NidabaInvalidParameterException,
NidabaLeptonicaException,
NidabaPluginException)
leptlib = 'liblept.so'
def setup(*args, **kwargs):
try:
ctypes.cdll.LoadLibrary(leptlib)
except Exception as e:
raise NidabaPluginException(e.message)
@app.task(base=NidabaTask, name=u'nidaba.binarize.sauvola',
arg_values={'whsize': 'int', 'factor': (0.0, 1.0)})
def sauvola(doc, method=u'sauvola', whsize=10, factor=0.35):
"""
Binarizes an input document utilizing Sauvola thresholding as described in
[0]. Expects 8bpp grayscale images as input.
[0] Sauvola, Jaakko, and Matti Pietikäinen. "Adaptive document image
binarization." Pattern recognition 33.2 (2000): 225-236.
Args:
doc (unicode): The input document tuple.
method (unicode): The suffix string appended to all output files
whsize (int): The window width and height that local statistics are
calculated on are twi | ce the value of whsize. The min | imal
value is 2.
factor (float): The threshold reduction factor due to variance. 0 =<
factor < 1.
Returns:
(unicode, unicode): Storage tuple of the output file
Raises:
NidabaInvalidParameterException: Input parameters are outside the valid
range.
"""
input_path = storage.get_abs_path(*doc)
output_path = storage.insert_suffix(input_path, method, unicode(whsize),
unicode(factor))
lept_sauvola(input_path, output_path, whsize, factor)
return storage.get_storage_path(output_path)
def lept_sauvola(image_path, output_path, whsize=10, factor=0.35):
"""
Binarizes an input document utilizing Sauvola thresholding as described in
[0]. Expects 8bpp grayscale images as input.
[0] Sauvola, Jaakko, and Matti Pietikäinen. "Adaptive document image
binarization." Pattern recognition 33.2 (2000): 225-236.
Args:
image_path (unicode): Input image path
output_path (unicode): Output image path
whsize (int): The window width and height that local statistics are
calculated on are twice the value of whsize. The minimal
value is 2.
factor (float): The threshold reduction factor due to variance. 0 =<
factor < 1.
Raises:
NidabaInvalidParameterException: Input parameters are outside the valid
range.
"""
if whsize < 2 or factor >= 1.0 or factor < 0:
raise NidabaInvalidParameterException('Parameters ({}, {}) outside of valid range'.format(whsize, factor))
try:
lept = ctypes.cdll.LoadLibrary(leptlib)
except OSError as e:
raise NidabaLeptonicaException('Loading leptonica failed: ' +
e.message)
pix = ctypes.c_void_p(lept.pixRead(image_path.encode('utf-8')))
opix = ctypes.c_void_p()
if lept.pixGetDepth(pix) != 8:
lept.pixDestroy(ctypes.byref(pix))
raise NidabaLeptonicaException('Input image is not grayscale')
if lept.pixSauvolaBinarize(pix, whsize, ctypes.c_float(factor), 0, None,
None, None, ctypes.byref(opix)):
lept.pixDestroy(ctypes.byref(pix))
raise NidabaLeptonicaException('Binarization failed for unknown '
'reason.')
if lept.pixWriteImpliedFormat(output_path.encode('utf-8'), opix, 100, 0):
lept.pixDestroy(ctypes.byref(pix))
lept.pixDestroy(ctypes.byref(pix))
raise NidabaLeptonicaException('Writing binarized PIX failed')
lept.pixDestroy(ctypes.byref(opix))
lept.pixDestroy(ctypes.byref(pix))
@app.task(base=NidabaTask, name=u'nidaba.img.dewarp')
def dewarp(doc, method=u'dewarp'):
"""
Removes perspective distortion (as commonly exhibited by overhead scans)
from an 1bpp input image.
Args:
doc (unicode, unicode): The input document tuple.
method (unicode): The suffix string appended to all output files.
Returns:
(unicode, unicode): Storage tuple of the output file
"""
input_path = storage.get_abs_path(*doc)
output_path = storage.insert_suffix(input_path, method)
lept_dewarp(input_path, output_path)
return storage.get_storage_path(output_path)
def lept_dewarp(image_path, output_path):
"""
Removes perspective distortion from an 1bpp input image.
Args:
image_path (unicode): Path to the input image
output_path (unicode): Path to the output image
Raises:
NidabaLeptonicaException if one of leptonica's functions failed.
"""
try:
lept = ctypes.cdll.LoadLibrary(leptlib)
except OSError as e:
raise NidabaLeptonicaException('Loading leptonica failed: ' +
e.message)
pix = ctypes.c_void_p(lept.pixRead(image_path.encode('utf-8')))
opix = ctypes.c_void_p()
ret = lept.dewarpSinglePage(pix, 0, 1, 1, ctypes.byref(opix), None, 0)
if ret == 1 or ret is None:
lept.pixDestroy(ctypes.byref(pix))
lept.pixDestroy(ctypes.byref(opix))
raise NidabaLeptonicaException('Dewarping failed for unknown reason.')
if lept.pixWriteImpliedFormat(output_path.encode('utf-8'), opix, 100, 0):
lept.pixDestroy(ctypes.byref(pix))
lept.pixDestroy(ctypes.byref(opix))
raise NidabaLeptonicaException('Writing dewarped PIX failed')
lept.pixDestroy(ctypes.byref(pix))
lept.pixDestroy(ctypes.byref(opix))
@app.task(base=NidabaTask, name=u'nidaba.img.deskew')
def deskew(doc, method=u'deskew'):
"""
Removes skew (rotational distortion) from an 1bpp input image.
Args:
doc (unicode, unicode): The input document tuple.
method (unicode): The suffix string appended to all output files.
Returns:
(unicode, unicode): Storage tuple of the output file
"""
input_path = storage.get_abs_path(*doc)
output_path = storage.insert_suffix(input_path, method)
lept_deskew(input_path, output_path)
return storage.get_storage_path(output_path)
def lept_deskew(image_path, output_path):
"""
Removes skew (rotational distortion from an 1bpp input image.
Args:
image_path (unicode): Input image
output_path (unicode): Path to the output document
Raises:
NidabaLeptonicaException if one of leptonica's functions failed.
"""
try:
lept = ctypes.cdll.LoadLibrary(leptlib)
except OSError as e:
raise NidabaLeptonicaException('Loading leptonica failed: ' +
e.message)
pix = ctypes.c_void_p(lept.pixRead(image_path.encode('utf-8')))
opix = ctypes.c_void_p(lept.pixFindSkewAndDeskew(pix, 4, None, None))
if opix is None:
lept.pixDestroy(ctypes.byref(pix))
raise NidabaLeptonicaException('Deskewing failed for unknown reason.')
if lept.pixWriteImpliedFormat(output_path.encode('utf-8'), opix, 100, 0):
lept.pixDestroy(ctypes.byref(pix))
lept.pixDestroy(ctypes.byref(opix))
raise NidabaLeptonicaException('Writing deskewed PIX failed')
lept.pixDes |
kbrebanov/ansible | lib/ansible/modules/network/avi/avi_cloudproperties.py | Python | gpl-3.0 | 3,643 | 0.000823 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudproperties
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CloudProperties Avi RESTful Object
description:
- This module is used to configure CloudProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cc_props:
description:
- Cloudconnector properties.
cc_vtypes:
description:
- Cloud types supported by cloudconnector.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
hyp_props:
description:
- Hypervisor properties.
info:
description:
- Properties specific to a cloud type.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CloudProperties object
avi_cloudproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cloudproperties
"""
RETURN = '''
obj:
description: CloudProperties (api/cloudproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cc_props=dict(type='dict',),
cc_vtypes=dict(type='list',),
| hyp_props=dict(type='list',),
info=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sd | k.'))
return avi_ansible_api(module, 'cloudproperties',
set([]))
if __name__ == '__main__':
main()
|
bm2-lab/cage | src/core/sg/interface_sg.py | Python | mit | 2,004 | 0.003493 | import os
from src.core import prep
from sgprocessor import *
def ProcessSg(p, opts):
if opts.anno == True:
if 'BEDDB' not in os.environ:
p.error('$BEDDB Not Exist. See README')
str_path_sgfq = opts.sg
str_nm = os.path.basename(os.path.splitext(opts.sg)[0])
str_proj = 'aux'
str_path_proj = os.path.join(opts.tdir, str_proj)
if not os.path.exists(str_path_proj):
os.makedirs(str_path_proj)
str_path_sgpsam = os.path.join(str_path_proj, str_nm + '.sgpsam')
str_path_sgsam = os.path.join(str_path_proj, str_nm + '.sgsam')
str_path_sg = os.path.join(opts.tdir, str_nm + '.sg')
print('Mapping sgRNA seq to ref genome with Bwa...')
prep.CallBWA(str_path_sgfq, '', opts.ref, str_path_sgpsam, False, opts.thrd)
prep.FilterSam(str_path_sgpsam, str_path_sgsam, False)
print('Done')
print('Processing sgsam...')
OrganizeSgsam(str_path_sgsam, str_path_sg)
print('Done')
if opts.anno == True:
str_path_sgbed = os.path.join(str_path_proj, str_nm + '.sgbed')
str_path_sgmap = os.path.join(str_path_proj, str_nm + '.sgmap')
str_path_sga = os.path.join(opts.tdir, str_nm + '.sga')
print('Annotating sgRNA...')
int_status = AnnotateSg(str_path_sgsam, opts.ref, str_path_sgbed, str_path_sgmap)
if int_status == 1:
print('Annotated with RefSeq')
elif int_status ==2:
print('Annotated with RefSeq and UCSC Gene')
elif int_status ==3:
print('Annotated with RefSeq, UCSC Gene and GENCODE')
elif int_ | status == 4:
| print('Annotated with RefSeq and UCSC Gene')
print('Warning: Some are marked with None')
elif int_status == 5:
print('Annotated with RefSeq, UCSC Gene and GENCODE')
print('Warning: Some are marked with None')
print('Done')
print('Merging sg and sgmap...')
MergeSg(str_path_sg, str_path_sgmap, str_path_sga)
print('Done')
|
montenegroariel/sigos | apps/historias_clinicas/views.py | Python | gpl-3.0 | 4,039 | 0.00322 | from django.http import HttpResponseRedirect, JsonResponse
from django.views.generic import CreateView, UpdateView
from django.contrib.messages.views import SuccessMessageMixin
from .models import HistoriaClinica, Patologia
from .forms import HistoriaClinicaForms
from apps.afiliados.models import Titular, Adherente
from apps.personas.models import Persona
class HistoriaClinicaCreate(SuccessMessageMixin, CreateView):
model = HistoriaClinica
form_class = HistoriaClinicaForms
template_name = 'historias_clinicas/historia_clinica_form.html'
success_url = '/historia/clinica/alta/'
success_message = 'La historia clínica se guardo con exito'
def form_valid(self, form):
form.instance.persona = Persona.objects.get(pk=self.kwargs['pk'])
self.success_url = '/historia/clinica/redireccion/%s' % str(Persona.objects.get(pk=self.kwargs['pk']).id)
return super(HistoriaClinicaCreate, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(HistoriaClinicaCreate, self).get_context_data(**kwargs)
context['persona'] = Persona.objects.get(pk=self.kwargs['pk'])
return context
def redireccion(request, id):
titular = Titular.objects.filter(id=id)
if titular.exists():
persona = Persona.objects.filter(titular=titular)
historia_clinica = HistoriaClinica.objects.filter(persona=persona)
if historia_clinica.exists():
return HttpResponseRedirect('/historia/clinica/modi/' + str(historia_clinica[0].id))
else:
return HttpResponseRedirect('/historia/clinica/alta/' + str(persona[0].id))
else:
adherente = Adherente.objects.filter(id=id)
if adherente.exists():
persona = Persona.objects.filter(adherente=adherente)
historia_clinica = HistoriaClinica.objects.filter(persona=persona)
if historia_clinica.exists():
return HttpResponseRedirect('/historia/clinica/modi/' + str(historia_clinica[0].id))
| else:
return HttpResponseRedirect('/historia/clinica/alta/' + str(persona[0].id))
class HistoriaClinicaUpdate(SuccessMessageMixin, UpdateView):
model = HistoriaClinica
form_class = HistoriaClinicaForms
template_name = 'historias_clinicas/historia_clinica_form.html'
success_url = '/historia/clinica/alta/'
success_message = 'La historia clínica se guardo con exito'
def form_valid(self, form):
fo | rm.instance.persona = Persona.objects.get(pk=HistoriaClinica.objects.get(pk=self.kwargs['pk']).persona.id)
self.success_url = '/historia/clinica/redireccion/%s' % str(HistoriaClinica.objects.get(
pk=self.kwargs['pk']).persona.id)
return super(HistoriaClinicaUpdate, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(HistoriaClinicaUpdate, self).get_context_data(**kwargs)
context['persona'] = Persona.objects.get(pk=HistoriaClinica.objects.get(pk=self.kwargs['pk']).persona.id)
return context
# Ajax ######
def patologia_create_ajax(request):
if request.method == 'POST':
if request.is_ajax():
id = ''
patologia = Patologia(nombre_enfermedad=request.POST.get('id_nombre_enfermedad_patologia_ajax'),
fecha_deteccion=request.POST.get('id_fecha_deteccion_patologia_ajax'))
patologia.save()
patologia = Patologia.objects.filter(
nombre_enfermedad=request.POST.get('id_nombre_enfermedad_patologia_ajax'),
fecha_deteccion=request.POST.get('id_fecha_deteccion_patologia_ajax'))
for indice in patologia.values('id'):
for valor in indice:
id = indice[valor]
for indice in patologia.values('nombre_enfermedad'):
for valor in indice:
nombre_enfermedad = indice[valor]
return JsonResponse({'id': id, 'nombre_enfermedad': nombre_enfermedad})
|
luogu-dev/cyaron | cyaron/log.py | Python | lgpl-3.0 | 3,103 | 0.006445 | from __future__ import print_function
from functools import partial
import sys
from threading import Lock
try:
import colorful
except ImportError:
class colorful:
def __getattr__(self, attr):
return lambda st: st
colorful = colorful()
from .utils import make_unicode
__print = print
def _print(*args, **kwargs):
flush = False
if 'flush' in kwargs:
flush = kwargs['flush']
del kwargs['flush']
__print(*args, **kwargs)
if flush:
kwargs.get('file', sys.stdout).flush()
def _join_dict(a, b):
"""join two dict"""
| c = a.copy()
for k, v in b.items():
c[k] = v
return c
_log_funcs = {}
_log_lock = Lock()
def log(funcname, *args, **kwargs):
"""log with log function specified by ``funcname``"""
_log_lock.acquire()
rv = _log_funcs.get(funcname, lambda *args, **kwargs: None)(*args, **kwargs)
_log_lock.release | ()
return rv
"""5 log levels
1. debug: debug info
2. info: common info
3. print: print output
4. warn: warnings
5. error: errors
"""
debug = partial(log, 'debug')
info = partial(log, 'info')
print = partial(log, 'print')
warn = partial(log, 'warn')
error = partial(log, 'error')
def register_logfunc(funcname, func):
"""register logfunc
str funcname -> name of logfunc
callable func -> logfunc
"""
if func is not None:
_log_funcs[funcname] = func
else:
try:
del _log_funcs[funcname]
except KeyError:
pass
_nb_print = lambda *args, **kwargs: _print(*args, **_join_dict(kwargs, {'flush': True}))
_nb_print_e = lambda *args, **kwargs: _print(*args, **_join_dict(kwargs, {'file': sys.stderr, 'flush': True}))
_cl_print = lambda color, *args, **kwargs: _nb_print(*[color(make_unicode(item)) for item in args], **kwargs) if sys.stdout.isatty() else _nb_print(*args, **kwargs)
_cl_print_e = lambda color, *args, **kwargs: _nb_print_e(*[color(make_unicode(item)) for item in args], **kwargs) if sys.stderr.isatty() else _nb_print_e(*args, **kwargs)
_default_debug = partial(_cl_print, colorful.cyan)
_default_info = partial(_cl_print, colorful.blue)
_default_print = _nb_print
_default_warn = partial(_cl_print_e, colorful.yellow)
_default_error = partial(_cl_print_e, colorful.red)
def set_quiet():
"""set log mode to "quiet" """
register_logfunc('debug', None)
register_logfunc('info', None)
register_logfunc('print', _default_print)
register_logfunc('warn', None)
register_logfunc('error', _default_error)
def set_normal():
"""set log mode to "normal" """
register_logfunc('debug', None)
register_logfunc('info', _default_info)
register_logfunc('print', _default_print)
register_logfunc('warn', _default_warn)
register_logfunc('error', _default_error)
def set_verbose():
"""set log mode to "verbose" """
register_logfunc('debug', _default_debug)
register_logfunc('info', _default_info)
register_logfunc('print', _default_print)
register_logfunc('warn', _default_warn)
register_logfunc('error', _default_error)
set_normal()
|
oskar456/youtube-dl | youtube_dl/extractor/karrierevideos.py | Python | unlicense | 3,379 | 0.002073 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
fix_xml_ampersands,
float_or_none,
xpath_with_ns,
xpath_text,
)
class KarriereVideosIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin',
'info_dict': {
'id': '32c91',
'ext': 'flv',
'title': 'AltenpflegerIn',
'description': 'md5:dbadd1259fde2159a9b28667cb664ae2',
'thumbnail': r're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# broken ampersands
'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun',
'info_dict': {
'id': '5sniu',
'ext': 'flv',
'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"',
'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33',
'thumbnail': r're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = (self._html_search_meta('title', webpage, default=None) or
| self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id')
# Server returns malformed headers
# Force Accept-Encoding: * to prevent gzipped results
playlist = self._download_xml(
'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id,
video_id, transform_source=fix_xml_ampersands,
header | s={'Accept-Encoding': '*'})
NS_MAP = {
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'
}
def ns(path):
return xpath_with_ns(path, NS_MAP)
item = playlist.find('./tracklist/item')
video_file = xpath_text(
item, ns('./jwplayer:file'), 'video url', fatal=True)
streamer = xpath_text(
item, ns('./jwplayer:streamer'), 'streamer', fatal=True)
uploader = xpath_text(
item, ns('./jwplayer:author'), 'uploader')
duration = float_or_none(
xpath_text(item, ns('./jwplayer:duration'), 'duration'))
description = self._html_search_regex(
r'(?s)<div class="leadtext">(.+?)</div>',
webpage, 'description')
thumbnail = self._html_search_meta(
'thumbnail', webpage, 'thumbnail')
if thumbnail:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'url': streamer.replace('rtmpt', 'rtmp'),
'play_path': 'mp4:%s' % video_file,
'ext': 'flv',
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
}
|
wydler/python-piko | piko/models.py | Python | gpl-2.0 | 6,161 | 0.000325 | import json
import socket
import struct
import hexdump
from piko.conf import settings
class Base(object):
pass
class Packet(Base):
""" PIKO packet class."""
request = ''
response = ''
__is_packed = False
__is_unpacked = False
def __init__(self, code, address, pack=settings.AUTOPACK):
self.request = '\x62%s\x03%s\x00%s' % (chr(address), chr(address), chr(code))
if pack:
self.pack()
def pack(self):
"""Pack request packet."""
if self.__is_packed:
raise Exception('Packet is already packed.')
self.request += '%s\x00' % (chr(self._generate_checksum()))
self.__is_packed = True
def is_packed(self):
return self.__is_packed
def unpack(self, pattern):
"""Unpack response packet."""
if self.__is_unpacked:
raise Exception('Packet is already unpacked.')
self._verify_checksum()
self.response = struct.unpack(pattern, self.response)
self.__is_unpacked = True
def is_unpacked(self):
return self.__is_unpacked
def _generate_checksum(self):
"""Generate checksum for packet."""
chksum = 0
for i in range(len(self.request)):
chksum -= ord(self.request[i])
chksum %= 256
return chksum
def _verify_checksum(self):
"""Verify packet checksum."""
if len(self.response) < 2:
raise ValueError('Packet too short.')
chksum = 0
for i in range(len(self.response) - 2):
chksum -= ord(self.response[i])
chksum %= 256
if chksum != ord(self.response[-2]):
raise ValueError('Checksum is wrong.')
return True
class Socket(Base):
"""PIKO socket."""
def __init__(self, *p):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, *p)
self._sock.settimeout(5)
def __getattr__(self, name):
return getattr(self._sock, name)
def connect(self, *p):
"""Connect to device."""
self._sock.connect(*p)
def disconnect(self):
self._sock.close()
def send(self, packet):
"""Send code to device."""
if settings.DEBUG:
print '#' * 76
hexd | ump.hexdump(packet.request)
self._sock.send(packet.request) |
packet.response = self._sock.recv(4096)
if settings.DEBUG:
print '-' * 76
hexdump.hexdump(packet.response)
return packet.response
class Device(Base):
"""Piko device class."""
data = {}
def __init__(self, host, port=81, addr=255):
self.sock = Socket()
self.host = host
self.port = port
self.addr = addr
self.__prefix = '[%s:%d/%d]' % (host, port, addr)
def __str__(self):
return self.__prefix
def connect(self):
"""Connect to host."""
print "%s %s" % (self.__prefix, "Connecting".ljust(20, '.')),
self.sock.connect((self.host, self.port))
print "OK!"
def disconnect(self):
"""Close connection with host."""
print "%s %s" % (self.__prefix, "Disconnecting".ljust(20, '.')),
self.sock.disconnect()
print "OK!"
def update(self):
"""Update all stats."""
self.data = {}
self._status()
self._inverter()
self._current()
self._daily()
self._total()
def accumulate_power(self):
"""Calculate accumulated power."""
return self.data['current']['ac_1'][2] + self.data['current']['ac_2'][2] + self.data['current']['ac_3'][2]
def _status(self):
"""Get the current status."""
packet = Packet(0x57, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxxBBHxxxxBx')
self.data['status'] = {
'ens_s': packet.response[0],
'ens_err': packet.response[1],
'err': packet.response[2]
}
def _inverter(self):
"""Get the inverter data."""
# Get inverter model.
self.data['inverter'] = {}
packet = Packet(0x90, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxx16sBxxxxxxBxxxxBx')
self.data['inverter']['model'] = packet.response[0].strip('\x00')
self.data['inverter']['string'] = packet.response[1]
self.data['inverter']['phase'] = packet.response[2]
# Get inverter name.
packet = Packet(0x44, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxx15sBx')
self.data['inverter']['name'] = packet.response[0].strip('\x00')
# Get inverter serial number.
packet = Packet(0x50, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxx13sBx')
self.data['inverter']['sn'] = packet.response[0]
# Get item number.
packet = Packet(0x51, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxxIBx')
self.data['inverter']['ref'] = hex(packet.response[0])
def _current(self):
"""Get current energy."""
packet = Packet(0x43, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxxHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHBBBBBx')
self.data['current'] = {}
self.data['current']['dc_1'] = packet.response[0:5]
self.data['current']['dc_2'] = packet.response[5:10]
self.data['current']['dc_3'] = packet.response[10:15]
self.data['current']['ac_1'] = packet.response[15:19]
self.data['current']['ac_2'] = packet.response[19:23]
self.data['current']['ac_3'] = packet.response[23:27]
self.data['current']['ac_f'] = packet.response[28]
self.data['current']['ac_s'] = packet.response[29]
def _daily(self):
"""Get daily energy."""
packet = Packet(0x9d, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxxIBx')
self.data['daily'] = packet.response[0]
def _total(self):
"""Get total energy."""
packet = Packet(0x45, self.addr)
self.sock.send(packet)
packet.unpack('<xxxxxIBx')
self.data['total'] = packet.response[0]
|
asposepdf/Aspose_Pdf_Cloud | Examples/Python/Examples/GetTextFormatOfPDFSegment.py | Python | mit | 1,391 | 0.010784 | import asposepdfcloud
from asposepdfcloud.PdfApi import PdfApi
from asposepdfcloud.PdfApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Pdf API SDK
api_client = asposepdfcloud.ApiClient.ApiClient(apiKey, appSid, True)
pdfApi = PdfApi(api_client);
#s | et input file name
name = "sample-input.pdf"
pageNumber = 1
fragmentNumber = 1
segmentNumber = 1
try:
#upload file to aspose cloud storage
response = storageApi.PutCreate(name, data_folder + name)
#invoke Aspose.Pdf Cloud SDK API to get text format of a particular segment
response = pdfApi.GetSegmentTextFormat(name, pageNumber, fragmentNumber, segmentNumber)
if response.Status == "OK":
segTextFormat = re | sponse.TextFormat
print "count :: " + segTextFormat.FontName
print "count :: " + str(segTextFormat.FontSize)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
bdeangelis/call_man_bat | static/frontend/jsonmaking.py | Python | mit | 6,080 | 0.026151 | import json, random, time, sys
"""
Creating a random JSON object based on lists of info and random numbers
to assign the index
"""
##Directory
input_file = "test.json"
###Success message takes the file_name and operation type (ie. written, closed)
def process_message(outcome, file_name, operation_type):
print "*******%s File: %s %s *******" % (outcome, file_name, operation_type)
##Open file
try:
open_file=open(input_file, 'w')
print "File opened"
except:
print "Error opening "+input_file
##Random chooser-random number picker function to be used over and over, but needs to be created before called
##To keep everything clean it's listed before the others funtions so that they maybe listed in order of the dictionary keys
def random_chooser(start,end):
return random.randrange(start,end)
##Lists of info
doctors_name=["Dr_K", "Dr. Pepper", "Dr. Lector", "Dr. Seus", "Dr Dre", "Dr. Phill", "Dr. Glass"]
special_notes_list=["No more doctors available for the weekend", "All offices closed for Labor Day", "Offices closed till Monday for Christmas",
"No Dr. on call Saturdays", "No Dr. on call Fridays", "No Dr. on call Mondays", "No Dr. on call Wednesdays" ,"No Dr. on call Tuesdays",
"Office closed for snow"]
dates=["1/17/2013","12/02/2011", "11/08/2012", "4/1/2010", "5/23/2011","1/15/2013","12/02/2010", "12/08/2012", "6/1/2010", "7/23/2011"]
first_name=["Bob", "Peter", "Jim", "Gerry", "Jean", "Robert", "Susan", "Mary", "Jo", "Brian"]
last_name=["Cameron", "Bender", "Neutron", "Simmons", "Jackson", "Smith", "Gardner", "Crocker","Black", "White"]
from_place=["Fort Worth","Plano","Houston","Little Rock","Detroit","Memphis", "Dallas","Arlington","Jenks","Chicago","Tulsa", "Boise", "Desmoins", "Minnieapolis", "St. Louis"]
check_list=["5647","7610","1230","3210","6543","9874","1324","3215","5897","6546","5968","6540"]
content_list=["Nice to see you!", "This is a content message", "This is another content message" ,"This is a test message to verify that the content is coming through",
"This is the content you are looking for","Content is magically here","Some content","Test content for your viewing pleasure",
"This is a test of the call_manager content system","Testing...testing...1...2...3!","Testing...testing...4...5...6!"]
##Keys for the dictionary
messages_info_keys = ["date_and_time", "caller_name", "from", "call_back_number", "call_back_ext", "check_number", "content"]
##Random pick of date from list dates
def date_picker():
picked_date=random_chooser(1,len(dates))
new_date=dates[picked_date]
return new_date
##creates a full name from lists first_name and last_name
def pick_your_name():
first=random_chooser(1,len(first_name))
last=random_chooser(1,10)
combo_name =first_name[first]+" "+last_name[last]
return combo_name
##Random pick of location from list from_place
def random_place():
picked_place=random_chooser(1,len(from_place))
place=from_place[picked_place]
return place
##Random number generator with randint from the random module
def random_number_maker(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
##combines a group of random numbers to resemble a phone number
def random_phone_number():
return "%s-%s-%s" %(str(random_number_maker(3)), str(random_number_maker(3)), str(random_number_maker(4)))
##call_back_ext picker, uses random number to generate number
def random_ext():
extension_maker=random_chooser(111,999)
return extension_maker
## not needed using random phone number generator
#call_back=[1,65,3,5,7,88]
##Random check number picker from lis | t check_list
def check():
check_picker=random_chooser(1,10)
check=check_list[check_picker]
#=[1,2,3,5,6,8,98]
return check
##Random content picker from list content_list
def content():
content_picker=random_chooser(1,len(content_list))
content=content_list[content_picker]
return content
##Generates a random number of message items
def messages_list_random_maker():
x=0
lister_maker=[]
while(x<r | andom_chooser(1,20)):
messages_info_values = [date_picker(),pick_your_name(),random_place(),random_phone_number(),random_ext(),check(), content()]
messages_info_list = dict(zip(messages_info_keys, messages_info_values))
lister_maker.append(messages_info_list)
x=x+1
return lister_maker
##dictionaries of info
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
messages_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
##Main area that puts everything together
doctors_list=[]
for name in doctors_name:
random_number=random.randrange(0,10)
special_notes_random_number=random.randrange(0,len(special_notes_list))
special_notes=special_notes_list[special_notes_random_number]
acct_number=random_number_maker(4)
ticket_number = abs(random_number-10)+1
duration_of_call = abs(random_number-10)+1
listerine = messages_list_random_maker()
account_info_dict = {"home_number":random_phone_number(), "cell_number":random_phone_number()}
doctors_list.append({"doctors_name":name, "special_notes":special_notes, "acct_number":acct_number,
"ticket_number":ticket_number, "duration_of_call":duration_of_call, "call_status": "ringing", "account_info": account_info_dict,
"messages":listerine})
##Dumps the dict to a jason object
jasoner=json.dumps(doctors_list)
#print jasoner
##Count up percentage of completion
for i in range(100):
print "\r", str(i)+"%"
time.sleep(.025)
print "\r"
##Write file
try:
open_file.write(jasoner)
process_message("SUCCESS", input_file, "Written")
except:
process_message("FAILURE" , input_file, "Not Written")
##Close file
try:
open_file.close()
process_message("SUCCESS", input_file, "Closed")
except:
process_message("FAILURE" , input_file, "Not Closed")
|
sassoftware/mint | mint/rest/middleware/error.py | Python | apache-2.0 | 3,212 | 0.004047 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import traceback
from restlib import response
from mint import logerror
from mint import mint_error
from mint.rest.api import models
from mint.rest.modellib import converter
log = logging.getLogger(__name__)
class ErrorCallback(object):
def __init__(self, controller):
self.controller = controller
def processException(self, request, excClass, exception, tb):
message = '%s: %s' % (excClass.__name__, exception)
if hasattr(exception, 'status'):
status = exception.status
else:
status = 500
self.logError(request, excClass, exception, tb, doEmail=True)
# Only send the traceback information if it's an unintentional
# exception (i.e. a 500)
if status == 500:
tbString = 'Traceback:\n' + ''.join(traceback.format_tb(tb))
text = [message + '\n', tbString]
else:
tbString = None
text = [message + '\n']
isFlash = 'HTTP_X_FLASH_VERSION' in request.headers or 'X-Wrap-Response-Codes' in request.headers
if not getattr(request, 'contentType', None):
request.contentType = 'text/xml'
request.responseType = 'xml'
if isFlash or request.contentType != 'text/plain':
# for text/plain, just print out the traceback in the easiest to read
# format.
code = status
if isFlash:
# flash ignores all data sent with a non-200 error
status = 200
error = models.Fault(code=code, message=message,
traceback=tbString)
text = converter.toText(request.responseType, error,
self.controller, request)
return response.Response(text, content_type=request.contentType,
status=status)
def logError(self, reques | t, e_type, e_value, e_tb, doEmail=True):
info = {
'uri' | : request.thisUrl,
'path' : request.path,
'method' : request.method,
'headers_in' : request.headers,
'request_params' : request.GET,
'post_params' : request.POST,
'remote' : request.remote,
}
try:
logerror.logErrorAndEmail(self.controller.cfg, e_type, e_value,
e_tb, 'API call', info, doEmail=doEmail)
except mint_error.MailError, err:
log.error("Error sending mail: %s", str(err))
|
shankari/e-mission-server | emission/tests/analysisTests/configTests/TestSaveAllConfigs.py | Python | bsd-3-clause | 4,255 | 0.007051 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import json
import copy
import uuid
# Test imports
import emission.tests.common as etc
import emission.analysis.configs.config as eacc
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.core.get_database as edb
import emission.tests.common as etc
class TestSaveAllConfigs(unittest.TestCase):
def setUp(self):
self.androidUUID = uuid.uuid4()
self.iosUUID = uuid.uuid4()
self.dummy_config = {'user_id': self.androidUUID,
'metadata': {
'key': 'config/sensor_config'
}, 'data': {
'is_duty_cycling': True
}
}
logging.debug("androidUUID = %s, iosUUID = %s" % (self.androidUUID, self.iosUUID))
def tearDown(self):
edb.get_timeseries_db().delete_many({"user_id": self.androidUUID})
edb.get_timeseries_db().delete_many({"user_id": self.iosUUID})
edb.get_usercache_db().delete_many({"user_id": self.androidUUID})
edb.get_usercache_db().delete_many({"user_id": self.iosUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.androidUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.iosUUID})
def testNoOverrides(self):
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 0)
def testOneOverride(self):
cfg_1 = copy.copy(self.dummy_config)
cfg_1['metadata']['write_ts'] = 1440700000
edb.get_timeseries_db().insert_one(cfg_1)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 1)
logging.debug(saved_entries[0])
self.assertEqual(saved_entries[0]['data']['is_duty_cycling'], cfg_1['data']['is_duty_cycling'])
def testTwoOverride(self):
cfg_1 = copy.copy(self.dummy_config)
cfg_1['metadata']['write_ts'] = 1440700000
edb.get_timeseries_db().insert_one(cfg_1)
cfg_2 = copy.copy(self.dummy_config)
cfg_2['metadat | a']['write_ts'] = 1440710000
cfg_2['data']['is_duty_cycling'] = False
edb.get_timeseries_db().insert_one(cfg_2)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sen | sor_config'}))
self.assertEqual(len(saved_entries), 1)
logging.debug(saved_entries[0])
self.assertEqual(saved_entries[0]['data']['is_duty_cycling'], cfg_2['data']['is_duty_cycling'])
def testOldOverride(self):
cfg_1 = copy.copy(self.dummy_config)
cfg_1['metadata']['write_ts'] = 1440500000
edb.get_timeseries_db().insert_one(cfg_1)
cfg_2 = copy.copy(self.dummy_config)
cfg_2['metadata']['write_ts'] = 1440610000
edb.get_timeseries_db().insert_one(cfg_2)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 0)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
|
dpazel/music_rep | instruments/instrument_class.py | Python | mit | 963 | 0.010384 | """
File: instrument_class.py
Purpose: Defines a major category of instruments in the instrument category. In this case,
is used to identity broad instrument types, e.g. stringw, woodwinds, bass, percussion, keyboards.
"""
from instruments.instrument_base import InstrumentBase
class InstrumentClass(InstrumentBase):
"""
Class to identify a broad instrument type, e.g. stringw, woodwinds, bass, percussion, keyboards.
"""
def __init__(self, name, parent=None):
"""
Constructor.
Args:
name: (Stri | ng) name of class, e.g. woodwinds, strings
"""
InstrumentBase.__init__(self, name, parent)
self.__families = []
@property
def families(self):
return list(self.__families)
def add_family(self, family):
self.__families.append(family)
def __str__(self):
return '{0}'.format(self.na | me)
|
nickbjohnson4224/greyhat-crypto-ctf-2014 | challenges/musicbox/musicbox.py | Python | mit | 4,204 | 0.024025 | import sys, struct, array
import SocketServer
# import StringIO as StringIO
# import pygame
p = 0x08d682598db70a889ff1bc7e3e00d602e9fe9e812162d4e3d06954b2ff554a4a21d5f0aab3eae5c49ac1aec7117709cba1b88b79ae9805d28ddb99be07ba05ea219654afe0c8dddac7e73165f3dcd851a3c8a3b6515766321420aff177eaaa7b3da39682d7e773aa863a729706d52e83a1d0e34d69b461c837ed239745d6c50f124e34f4d1d00ad15d6ebabda8c189c7b8b35b5bae7a9cbafc5f09bd506a39bd9d2d9245324f02ff7254fab4ab17f7a165d49e318baeb8effc4e1a3f1251d2ea1ab93f767bd6dcf5567406550ea1f194ef7deb1b2fec8b30520b6777fea1b305593db941f9ad8ce1eba6f77c3a104bd97448ec0c11688c5bf82e85c90234abfc5
q = 0x0f67e886d1a0d1e59a53b4aa831c9bcb39a5d0a8f
g = 0x27d6a1359821e2a758a93f5c06ebb26382a06a4681e7cf44d71aeff2390c87d20ce7cd885fb01fd84ad9d52839a8ae163bfee5d09820fea1a09f814801cb157b2c5bc4636d042fb2ac1a836f33adafd6735826ae1e96c3bfbd04f7df672a14120f6780e8848ff3b3123004654127c9d25843cd54c68c396a410a2f0496e8ebb35b971993dee0f596388911277fce46ff3c5191e7e76262875bb3368724d3a40c852ccc80be4dc82335fb9267c6ff0e20396ae8bb2d51e35f15fbd07fa1b354944c285367ac88763dd00fe6fe0aab5a49faf7bc10f8e90ba376efdc034e9e1cae7e79ac906aed3b513c5f3452dc33eb307ab3d45efe92a31b1cd9a6f52dd5fb09
y = 0x6bff47f5ea736b03c85885b0bd0f1f7fa2a7efef8812c544ab47f4aa3542235f5a298fc778bb9263223c66d149f88d377b1e70a5715e4554776127ffb874e218d7c75a3c6202cc3e | 2cfb6a5a4cf34e7e8d5428b90b7aa1dbf9a7e965feab029220266ad0dabade6ae09362f6463eea60e3133bb79fc4af511057e31574f4b0f34b848b180fa20da7d9a6d8adedded9819da20b8923073e35f43ca75eeb9a1ab5451c3a5446306f93ef246759f59e65e498032d48aece56f437b4b7179daf3dfa80d6a36c211ed5acdfeaf91a7e8070a49a521f3c2e411a26eeaf8fab697535914982 | f1f7cda1e1aa1aac602f9606ea326632b4fbabf6b361fe118637e048c482
def bytesToInt(s):
x = 0
for c in s:
x = (x << 8) | ord(c)
return x
def verifySig(r, s, m):
#DSA, straight from Wikipedia
if not 0 < s < q and 0 < r < q:
return False
w = pow(s, q-2, q)
u1 = m*w % q
u2 = r*w % q
v = pow(g, u1, p) * pow(y, u2, p) % p
return (v % q) == r
def superHash(b):
b += '0' * (-len(b) % 2)
h = (len(b) + 1) * (len(b) ^ 42)
x = 88172645463325252
for i, c in enumerate(array.array('H', b)):
x ^= (x<<13) & 0xFFFFFFFFFFFFFFFF
x ^= (x>>7) & 0xFFFFFFFFFFFFFFFF
x ^= (x<<17) & 0xFFFFFFFFFFFFFFFF
h += c * (((i % 7) + 9) ** (i % 25))
if i % 2:
h *= x | i
else:
h += x | i
h &= 0xFFFFFFFFFFFFFFFF
h ^= (len(b) ^ 1) * (len(b) + 42)
h &= 0xFFFFFFFFFFFFFFFF
return h
class HandleCheckin(SocketServer.BaseRequestHandler):
def readStr(self):
req = self.request
prefix = req.recv(2)
if prefix != '\x12\xae':
req.sendall("Incorrect prefix\n")
req.close()
return None
leng = struct.unpack("<I", req.recv(4))[0]
toRead = ""
while len(toRead) < leng:
toRead += req.recv(leng - len(toRead))
if len(toRead) > leng:
req.sendall("Length does not match input data size\n")
req.close()
return None
return toRead
def handle(self):
req = self.request
req.sendall("""Welcome to the new and improved Music Box! Please provide your signed music file.""")
data = self.readStr()
if data is None or len(data) < 48:
req.sendall("Incomplete header\n")
return
elif len(data) > 12345678:
req.sendall("The data. It is too much!\n")
return
r = bytesToInt(data[:20])
s = bytesToInt(data[20:40])
h = bytesToInt(data[40:48])
sound = data[48:]
if not verifySig(r, s, h):
req.sendall("Invalid signature\n")
return
elif h != superHash(sound):
req.sendall("Message hash does not match\n")
return
else:
req.sendall("Success!\n")
if "Secret backdoor lol GIMME THE FLAG" in sound:
with open('flag.txt','r') as f:
req.sendall(f.read() + "\n")
else:
req.sendall("Unfortunately, the musicbox is not available at the moment.\n")
req.close()
# f = StringIO.StringIO(sound)
# pygame.mixer.music.load(f)
# pygame.mixer.music.play(loops=-1)
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
# pygame.mixer.init()
HOST, PORT = sys.argv[1], int(sys.argv[2])
print 'Running on port', PORT
server = ThreadedServer((HOST, PORT), HandleCheckin)
server.allow_reuse_address = True
server.serve_forever()
|
holytortoise/abwreservierung | auto_process.py | Python | mit | 1,255 | 0.007177 | import os
from decouple import config
sudoPassword = config('SUDOPASSWORD')
def update():
"""
Automatisches Update des Reservierungssystems über Github
"""
try:
command_stop = 'sudo supervisorctl stop abwreservierung && sudo supervisorctl start abwreservierung && sudo chmod +x startup'
os.system('echo %s|sudo -S %s' % (sudoPassword, command_stop))
except:
pass
try:
os.system('cd /home/webserver/abwreservierung && git pull')
except:
pass
try:
command_start = 'sudo supervisorctl start abwreservierung'
os.system('echo %s|sudo -S %s' % (sudoPassword, command_start))
except:
pass
try:
command_chmod = 'sudo chmod +x startup'
os.system('echo %s|sudo -S %s' % (sudoPassword, command_chmod))
excep | t:
pass
def alte_reservierungen():
"""
Automatisches entfernen der alten Reservierungen
"""
os.system("/bin/bash -c 'source ../django-server/bin/activate && python src/manage.py alte_reservierungen && deactivate'")
scheduler = BlockingScheduler()
scheduler.add_job(update, 'cron', day_of_week='sun', hour=12)
scheduler.add_job(alte_reservierungen, 'cron', day_of_week | ="mon-fri", hour=17)
scheduler.start()
|
tensorflow/lattice | tensorflow_lattice/sonnet_modules/__init__.py | Python | apache-2.0 | 709 | 0.00141 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either | express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'sonnet_modules' namespace for TFL layers."""
from tensorflow_lattice.python.pwl_calibration_sonnet_module import PW | LCalibration
|
jgruselius/standalone_scripts | bravo_mailer.py | Python | mit | 1,107 | 0.025294 |
import smtplib
import argparse
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def main(args):
# Allow HTML-formatted emails (very simplistic atm, should be expanded if used)
msg = MIMEMultipart("alternative")
if args["body"].startswith("<html>", 0, 10):
msg.attach(MIMEText(args["body"],"html"))
else:
msg.attach(MIMEText(args["body"],"plain"))
msg["Subject"] = args["sub"]
msg["From"] = args["from"]
msg["To"] = args["to"]
s = smtplib.SMTP(args["smtp"])
# If authentication is required:
# s.starttls()
# s.login(user, pass)
s.sendmail(args["from"], [args["to"]], msg.as_string() | )
s.quit()
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Send an email")
p.add_argument("--to", "-t", required=True, help="To address")
p.add_argument("--from", "-f", required=True, help="From address")
p.add_argument("--sub", "-s", required=True, help="Subject")
p.add_argument("--body", "-b", required=True, help="Message body")
p.add_argument("--smtp", default="localhost", help="SMTP server | ")
args = p.parse_args()
main(vars(args))
|
cbxbiker61/wicd | gtk/gui.py | Python | gpl-2.0 | 33,021 | 0.003543 | #!/usr/bin/python
""" gui -- The main wicd GUI module.
Module containing the code for the main wicd GUI.
"""
#
# Copyright (C) 2007-2009 Adam Blackburn
# Copyright (C) 2007-2009 Dan O'Reilly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import time
import gobject
import pango
import gtk
from itertools import chain
from dbus import DBusException
from wicd import misc
from wicd import wpath
from wicd import dbusmanager
from wicd.misc import noneToString
import prefs
from prefs import PreferencesDialog
import netentry
from netentry import WiredNetworkEntry, WirelessNetworkEntry
from guiutil import error, LabelEntry
from wicd.translations import language
if __name__ == '__main__':
wpath.chdir(__file__)
proxy_obj = daemon = wireless = wired = bus = None
DBUS_AVAIL = False
def setup_dbus(force=True):
global bus, daemon, wireless, wired, DBUS_AVAIL
try:
dbusmanager.connect_to_dbus()
except DBusException:
if force:
print "Can't connect to the daemon, trying to start it automatically..."
if not misc.PromptToStartDaemon():
print "Failed to find a graphical sudo program, cannot continue."
return False
try:
dbusmanager.connect_to_dbus()
except DBusException:
error(None, "Could not connect to wicd's D-Bus interface. " +
"Check the wicd log for error messages.")
return False
else:
return False
prefs.setup_dbus()
netentry.setup_dbus()
bus = dbusmanager.get_bus()
dbus_ifaces = dbusmanager.get_dbus_ifaces()
daemon = dbus_ifaces['daemon']
wireless = dbus_ifaces['wireless']
wired = dbus_ifaces['wired']
DBUS_AVAIL = True
return True
def handle_no_dbus(from_tray=False):
global DBUS_AVAIL
DBUS_AVAIL = False
if from_tray: return False
print "Wicd daemon is shutting down!"
error(None, language['lost_dbus'], block=False)
return False
class WiredProfileChooser:
""" Class for displaying the wired profile chooser. """
def __init__(self):
""" Initializes and runs the wired profile chooser. """
# Import and init WiredNetworkEntry to steal some of the
# functions and widgets it uses.
wired_net_entry = WiredNetworkEntry()
dialog = gtk.Dialog(title = language['wired_network_found'],
flags = gtk.DIALOG_MODAL,
buttons = (gtk.STOCK_CONNECT, 1,
gtk.STOCK_CANCEL, 2))
dialog.set_has_separator(False)
dialog.set_size_request(400, 150)
instruct_label = gtk.Label(language['choose_wired_profile'] + ':\n')
stoppopcheckbox = gtk.CheckButton(language['stop_showing_chooser'])
wired_net_entry.is_full_gui = False
instruct_label.set_alignment(0, 0)
stoppopcheckbox.set_active(False)
# Remove widgets that were added to the normal WiredNetworkEntry
# so that they can be added to the pop-up wizard.
wired_net_entry.vbox_top.remove(wired_net_entry.hbox_temp)
wired_net_entry.vbox_top.remove(wired_net_entry.profile_help)
dialog.vbox.pack_start(instruct_label, fill=False, expand=False)
dialog.vbox.pack_start(wired_net_entry.profile_help, False, False)
dialog.vbox.pack_start(wired_net_entry.hbox_temp, False, False)
dialog.vbox.pack_start(stoppopcheckbox, False, False)
dialog.show_all()
wired_profiles = wired_net_entry.combo_profile_names
wired_net_entry.profile_help.hide()
if wired_net_entry.profile_list != None:
wired_profiles.set_active(0)
print "wired profiles found"
else:
print "no wired profiles found"
wired_net_entry.profile_help.show()
response = dialog.run()
if response == 1:
print 'reading profile ', wired_profiles.get_active_text()
wired.ReadWiredNetworkProfile(wired_profiles.get_active_text())
wired.ConnectWired()
else:
if stoppopcheckbox.get_active():
daemon.SetForcedDisconnect(True)
dialog.destroy()
class appGui(object):
""" The main wicd GUI class. """
def __init__(self, standalone=False, tray=None):
""" Initializes everything needed for the GUI. """
setup_dbus()
self.tray = tray
gladefile = os.path.join(wpath.gtk, "wicd.ui")
self.wTree = gtk.Builder()
self.wTree.add_from_file(gladefile)
self.window = self.wTree.get_object("window1")
width = int(gtk.gdk.screen_width() / 2)
if width > 530:
width = 530
self.window.resize(width, int(gtk.gdk.screen_height() / 1.7))
dic = { "refresh_clicked" : self.refresh_clicked,
"quit_clicked" : self.exit,
"rfkill_clicked" : self.switch_rfkill,
"disconnect_clicked" : self.disconnect_all,
"main_exit" : self.exit,
"cancel_clicked" : self.cancel_connect,
"hidden_clicked" : self.connect_hidden,
"preferences_clicked" : self.settings_dialog,
"about_clicked" : self.about_dialog,
"create_adhoc_clicked" : self.create_adhoc_network,
}
self.wTree.connect_signals(dic)
# Set some strings in the GUI - they may be translated
label_instruct = self.wTree.get_object("label_instructions")
label_instruct.set_label(language['select_a_network'])
probar = self.wTree.get_object("progressbar")
probar.set_text(language['connecting'])
self.rfkill_button = self.wTree.get_object("rfkill_button")
self.all_network_list = self.wTree.get_object("network_list_vbox")
self.all_network_list.show_all()
self.wired_network_box = gtk.VBox(False, 0)
self.wired_network_box.show_all()
self.network_list = gtk.VBox(False, 0)
self.all_network_list.pack_start(self.wired_network_box, False, False)
self.all_network_list.pack_start(self.network_list, True, True)
| self.network_list.show_all()
self.status_area = self.wTree.get_object("connecting_hbox")
self.status_bar = self.wTree.get_object("statusbar")
menu = self.wTree.get_object("menu1")
self.status_area.hide_all()
if os.path.exists(os.path.join(wpath.images, "wicd.png")):
| self.window.set_icon_from_file(os.path.join(wpath.images, "wicd.png"))
self.statusID = None
self.first_dialog_load = True
self.is_visible = True
self.pulse_active = False
self.pref = None
self.standalone = standalone
self.wpadrivercombo = None
self.connecting = False
self.refreshing = False
self.prev_state = None
self.update_cb = None
self.network_list.set_sensitive(False)
label = gtk.Label("%s..." % language['scanning'])
self.network_list.pack_start(label)
label.show()
self.wait_for_events(0.2)
self.window.connect('delete_event', self.exit)
self.window.connect('key-release-event', self.key_event)
daemon.SetGUIOpen(True)
bus.add_signal_receiver(self.dbus_scan_finished, 'SendEndScanSignal',
'org.wicd.daemon.wireless')
bus.add_signal_receiver(self.dbus_scan_started, 'SendStartScanSignal',
'org.wicd.daemon.wireless')
bus. |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/Scripts/enhancer.py | Python | gpl-3.0 | 1,649 | 0.001213 | #!c:\users\montes\documents\github\flu | id-designer\win64-vc\2.78\python\bin\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, | ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = float(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
if len(sys.argv) != 2:
print("Usage: enhancer file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
|
jay4ek/cs3240-labdemo | blarg.py | Python | mit | 76 | 0.026316 |
from helper import greeting
if "__name__" == "__main__":
greeting('bla | rg') | |
andriibekker/biddingsbase | main/utils.py | Python | bsd-3-clause | 279 | 0 | from django.shortcuts import render_to_response as _render_to_response
fro | m django.template import RequestContext
def render_to_response(request, *args, **kwargs):
kwargs.update({'context_instance': RequestContext(request)})
return _render_to_response(*args, ** | kwargs)
|
cychenyin/windmill | apscheduler/triggers/cron/fields.py | Python | mit | 3,059 | 0.002288 | # coding: utf-8
"""
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
fields.
"""
from calendar import monthrange
from apscheduler.triggers.cron.expressions import (
AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression)
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField')
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59,
'second': 59}
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0,
'second': 0}
class BaseField(object):
REAL = True
COMPILERS = [AllExpression, RangeExpression]
def __init__(self, name, exprs, is_default=False):
self.name = name
self.is_default = is_default
self.compile_expressions(exprs)
def get_min(self, dateval):
return MIN_VALUES[self.name]
def get_max(self, dateval):
return MAX_VALUES[self.name]
def get_value(self, dateval):
return getattr(dateval, self.name)
def get_next_value(self, dateval):
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def compile_expressions(self, exprs):
self.expressions = []
# Split a comma-separated expression list, if any
exprs = str(exprs).strip()
if ',' in exprs:
for expr in exprs.split(','):
self.compile_expression(expr)
else:
self.compile_expression(exprs)
def compile_expression(self, expr):
for compiler in sel | f.COMPILERS:
match = compiler.value_re.match( | expr)
if match:
compiled_expr = compiler(**match.groupdict())
self.expressions.append(compiled_expr)
return
raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name))
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ','.join(expr_strings)
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self)
class WeekField(BaseField):
REAL = False
def get_value(self, dateval):
return dateval.isocalendar()[1]
class DayOfMonthField(BaseField):
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression]
def get_max(self, dateval):
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField):
REAL = False
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
def get_value(self, dateval):
return dateval.weekday()
|
mozilla/zamboni | mkt/api/tests/test_renderers.py | Python | bsd-3-clause | 790 | 0 | from nose.tools import eq_
from mkt.api.renderers import SuccinctJSONRenderer
from mkt.site.tests | import TestCase
class TestSuccinctJSONRenderer(TestCase):
def setUp(self):
self.renderer = SuccinctJSONRenderer()
self.input = {'foo': 'bar'}
def test_no_spaces(self):
output = self.renderer.render(self.input)
eq_(output, '{"foo":"bar"}')
def test_indent_context(self):
output = self.renderer.render(self.input,
renderer_context={'indent': 4})
eq_(output, | '{\n "foo": "bar"\n}')
def test_accepted_header(self):
header = 'application/json; indent=4'
output = self.renderer.render(self.input, accepted_media_type=header)
eq_(output, '{\n "foo": "bar"\n}')
|
Tehnix/cred-server | cred/config.py | Python | bsd-3-clause | 2,169 | 0.000922 | """
The configuration files are searched for in the following order:
1. Local directory
2. Users home directory
3. Users app directory
4. System app directory
The file searched for is called .credrc for 1. and 2., and without the dot
for 3. and 4.. If none are found, it will use the default configuration.
"""
import sys
import os
import appdirs
import yaml
loaded_configuration = None
config_file = 'credrc'
dot_config_file = '.' + config_file
appname = 'cred-server'
appauthor = 'cred'
default_config = {
'SSL': False,
'approot': '127.0.0.1',
'host': '*',
'port': 5000,
'database': {
'type': 'sqlite3',
'user': '',
'password': '',
'host': '',
'port': '',
'database': 'cred-server.db'
},
'scheduler': False,
'schedulerPeriod': 30,
'pingtimeout': 240
}
def locate_config_file():
app_dirs = appdirs.AppDirs(appname, appauthor)
if os.path.isfile(os.path.join(os.getcwd(), config_file)):
r | eturn os. | path.join(os.getcwd(), config_file)
elif os.path.isfile(os.path.join(os.path.expanduser('~'), dot_config_file)):
return os.path.join(os.path.expanduser('~'), dot_config_file)
elif os.path.isfile(os.path.join(app_dirs.user_data_dir, config_file)):
return os.path.join(app_dirs.user_data_dir, config_file)
elif os.path.isfile(os.path.join(app_dirs.site_data_dir, config_file)):
return os.path.join(app_dirs.site_data_dir, config_file)
else:
return False
def load_config_file(filename):
try:
config = None
with open(filename, 'r') as f:
config = yaml.load(f)
print('Using configuration at {0}'.format(filename))
if not config.keys() == default_config.keys():
print('Invalid configuration file! (either missing or too many fields!)')
sys.exit(1)
return config
except yaml.constructor.ConstructorError as e:
print('Failed to parse configuration file! At {0}'.format(filename))
sys.exit(1)
except FileNotFoundError as e:
print('Found no file at {0}'.format(filename))
sys.exit(1)
|
kyoren/https-github.com-h2oai-h2o-3 | h2o-py/tests/testdir_jira/pyunit_NOPASS_hex_1897_glm_offset.py | Python | apache-2.0 | 2,423 | 0.009492 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def offset_1897():
print 'Checking binomial models for GLM with and without offset'
print 'Import prostate dataset into H2O and R...'
prostate_hex = h2o.import_file(tests.locate("smalldata/prostate/prostate.csv"))
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex[["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"]],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(379.053509501537)
assert abs(379.053509501537 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex[["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"]],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(1515.91815848623)
assert abs(1515.91815848623 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex[["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"]],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_dev | iance())
print "r residual: {0}".format(216.339989007507)
assert abs(216.339989007507 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex[["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"]],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson",
| offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(2761.76218461138)
assert abs(2761.76218461138 - prostate_glm_h2o.residual_deviance()) < 0.1
if __name__ == "__main__":
tests.run_test(sys.argv, offset_1897)
|
kaplun/Invenio-OpenAIRE | modules/bibformat/lib/bibformat_regression_tests.py | Python | gpl-2.0 | 20,498 | 0.004119 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat module regression tests."""
__revision__ = "$Id$"
import unittest
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG
from invenio.testutils import make_test_suite, \
run_test_suite, \
test_web_page_content
from invenio.bibformat import format_record
class BibFormatAPITest(unittest.TestCase):
"""Check BibFormat API"""
def test_basic_formatting(self):
"""bibformat - Checking BibFormat API"""
result = format_record(recID=73,
| of='hx',
ln=CFG_SITE_LANG,
verbose=0,
search_pattern=[],
xml_record=None,
user_info=None,
on_the_fly=True)
| pageurl = CFG_SITE_URL + '/record/73?of=hx'
result = test_web_page_content(pageurl,
expected_text=result)
class BibFormatBibTeXTest(unittest.TestCase):
"""Check output produced by BibFormat for BibTeX output for
various records"""
def setUp(self):
"""Prepare some ideal outputs"""
self.record_74_hx = '''<pre>
@article{Wang:74,
author = "Wang, B and Lin, C Y and Abdalla, E",
title = "Quasinormal modes of Reissner-Nordstrom Anti-de Sitter
Black Holes",
journal = "Phys. Lett., B",
number = "hep-th/0003295",
volume = "481",
pages = "79-88",
year = "2000",
}
</pre>'''
def test_bibtex_output(self):
"""bibformat - BibTeX output"""
pageurl = CFG_SITE_URL + '/record/74?of=hx'
result = test_web_page_content(pageurl,
expected_text=self.record_74_hx)
self.assertEqual([], result)
class BibFormatDetailedHTMLTest(unittest.TestCase):
"""Check output produced by BibFormat for detailed HTML ouput for
various records"""
def setUp(self):
"""Prepare some ideal outputs"""
# Record 7 (Article)
self.record_74_hd_header = '''<table border="0" width="100%">
<tr>
<td>Published Article<small> / Particle Physics - Theory</small></td>
<td><small><strong></strong></small></td>
<td align="right"><strong>hep-th/0003295</strong></td>
</tr>
</table>'''
self.record_74_hd_title = '''<center><big><big><strong>Quasinormal modes of Reissner-Nordstrom Anti-de Sitter Black Holes</strong></big></big></center>'''
self.record_74_hd_authors = '''<a href="%(siteurl)s/search?f=author&p=Wang%%2C%%20B&ln=%(lang)s">Wang, B</a><small> (Fudan University)</small> ; <a href="%(siteurl)s/search?f=author&p=Lin%%2C%%20C%%20Y&ln=%(lang)s">Lin, C Y</a> ; <a href="%(siteurl)s/search?f=author&p=Abdalla%%2C%%20E&ln=%(lang)s">Abdalla, E</a><br />'''% \
{'siteurl' : CFG_SITE_URL,
'lang': CFG_SITE_LANG}
self.record_74_hd_abstract = '''<small><strong>Abstract: </strong>Complex frequencies associated with quasinormal modes for large Reissner-Nordstr$\ddot{o}$m Anti-de Sitter black holes have been computed. These frequencies have close relation to the black hole charge and do not linearly scale withthe black hole temperature as in Schwarzschild Anti-de Sitter case. In terms of AdS/CFT correspondence, we found that the bigger the black hole charge is, the quicker for the approach to thermal equilibrium in the CFT. The propertiesof quasinormal modes for $l>0$ have also been studied.</small><br />'''
self.record_74_hd_pubinfo = '''<strong>Published in: </strong><a href="http://weblib.cern.ch/cgi-bin/ejournals?publication=Phys.%20Lett.%2C%20B&volume=481&year=2000&page=79">Phys. Lett., B :481 2000 79-88</a>'''
self.record_74_hd_fulltext = '''0003295.pdf"><img style="border:none"'''
self.record_74_hd_citations = '''<strong>Cited by:</strong> try citation search for <a href="%(siteurl)s/search?f=reference&p=hep-th/0003295&ln=%(lang)s">hep-th/0003295</a>'''% \
{'siteurl' : CFG_SITE_URL,
'lang': CFG_SITE_LANG}
self.record_74_hd_references = '''<li><small>[17]</small> <small>A. Chamblin, R. Emparan, C. V. Johnson and R. C. Myers, Phys. Rev., D60: 104026 (1999) 5070 90 110 130 150 r+ 130 230 330 50 70 90 110 130 150 r+</small> </li>'''
# Record 7 (Picture)
self.record_7_hd_header = '''<table border="0" width="100%">
<tr>
<td>Pictures<small> / Life at CERN</small></td>
<td><small><strong></strong></small></td>
<td align="right"><strong>CERN-GE-9806033</strong></td>
</tr>
</table>'''
self.record_7_hd_title = '''<center><big><big><strong>Tim Berners-Lee</strong></big></big></center>'''
self.record_7_hd_date = '''<center>28 Jun 1998</center>'''
self.record_7_hd_abstract = '''<p><span class="blocknote">
Caption</span><br /> <small>Conference "Internet, Web, What's next?" on 26 June 1998 at CERN : Tim Berners-Lee, inventor of the World-Wide Web and Director of the W3C, explains how the Web came to be and give his views on the future.</small></p><p><span class="blocknote">
Légende</span><br /><small>Conference "Internet, Web, What's next?" le 26 juin 1998 au CERN: Tim Berners-Lee, inventeur du World-Wide Web et directeur du W3C, explique comment le Web est ne, et donne ses opinions sur l'avenir.</small></p>'''
self.record_7_hd_resource = '''<img src="%s/record/7/files/9806033.gif?subformat=icon" alt="9806033" style="max-width:250px;_width:250px;" />''' % CFG_SITE_URL
self.record_7_hd_resource_link = '%s/record/7/files/9806033.jpeg' % CFG_SITE_URL
def test_detailed_html_output(self):
"""bibformat - Detailed HTML output"""
# Test record 74 (Article)
pageurl = CFG_SITE_URL + '/record/74?of=hd'
result = test_web_page_content(pageurl,
expected_text=[self.record_74_hd_header,
self.record_74_hd_title,
self.record_74_hd_authors,
self.record_74_hd_abstract,
self.record_74_hd_pubinfo,
self.record_74_hd_fulltext,
#self.record_74_hd_citations,
#self.record_74_hd_references
])
self.assertEqual([], result)
# Test record 7 (Picture)
pageurl = CFG_SITE_URL + '/record/7?of=hd'
result = test_web_page_content(pageurl,
expected_text=[self.record_7_hd_header,
self.record_7_hd_title,
self.record_7_hd_date,
self.record_7_hd_abstract,
|
NifTK/NiftyNet | niftynet/layer/dilatedcontext.py | Python | apache-2.0 | 1,675 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer import layer_util
class DilatedTensor(object):
"""
This context manager makes a wrapper of input_tensor
When created, the input_tensor is dilated,
the input_tensor resumes to original space when exiting the context.
"""
def __init__(self, input_tensor, dilation_factor):
assert (layer_util.check_spatial_dims(
input_tensor, lambda x: x % dilation_factor == 0))
self._tensor = input_tensor
self.dilation_factor = dilation_factor
# parameters to transform input tensor
self.spatial_rank = layer_util.infer_sp | atial_rank(self._tensor)
self.zero_paddings = [[0, 0]] * self.spatial_rank
self.block_shape = [dilation_factor] * self.sp | atial_rank
def __enter__(self):
if self.dilation_factor > 1:
self._tensor = tf.space_to_batch_nd(self._tensor,
self.block_shape,
self.zero_paddings,
name='dilated')
return self
def __exit__(self, *args):
if self.dilation_factor > 1:
self._tensor = tf.batch_to_space_nd(self._tensor,
self.block_shape,
self.zero_paddings,
name='de-dilate')
@property
def tensor(self):
return self._tensor
@tensor.setter
def tensor(self, value):
self._tensor = value
|
smeissner/eden | static/scripts/tools/build.sahana.py | Python | mit | 14,872 | 0.003833 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# run as:
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py
# or
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py -A gis
#
#
# Built with code/inspiration from MapFish, OpenLayers & Michael Crute
#
try:
theme = settings.get_theme()
except:
print "ERROR: File now needs to be run in the web2py environment in order to pick up which theme to build"
exit()
import os
import sys
import shutil
SCRIPTPATH = os.path.join(request.folder, "static", "scripts", "tools")
os.chdir(SCRIPTPATH)
sys.path.append("./")
# For JS
import getopt
import jsmin, mergejs
# For CSS
import re
def mergeCSS(inputFilenames, outputFilename):
output = ""
for inputFilename in inputFilenames:
output += open(inputFilename, "r").read()
open(outputFilename, "w").write(output)
return outputFilename
def cleanline(theLine):
""" Kills line breaks, tabs, and double spaces """
p = re.compile("(\n|\r|\t|\f|\v)+")
m = p.sub("", theLine)
# Kills double spaces
p = re.compile("( )+")
m = p.sub(" ", m)
# Removes last semicolon before }
p = re.compile("(; }|;})+")
m = p.sub("}", m)
# Removes space before {
p = re.compile("({ )+")
m = p.sub("{", m)
# Removes all comments
p = re.compile("/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/")
m = p.sub("", m)
# Strip off the Charset
p = re.compile("@CHARSET .*;")
m = p.sub("", m)
# Strip spaces before the {
p = re.compile(" {")
m = p.sub("{", m)
# Strip space after :
p = re.compile(": ")
m = p.sub(":", m)
# Strip space after ,
p = re.compile(", ")
m = p.sub(",", m)
# Strip space after ;
p = re.compile("; ")
m = p.sub(";", m)
return m
def compressCSS(inputFilename, outputFilename):
theFile = open(inputFilename, "r").read()
output = ""
for line in theFile:
output = output + cleanline(line)
# Once more, clean the entire file string
_output = cleanline(output)
open(outputFilename, "w").write(_output)
return
def dojs(dogis = False, warnings = True):
""" Minifies the JavaScript """
# Do we have local version of the Closure Compiler available?
use_compressor = "jsmin" # Fallback
try:
import closure
use_compressor = "closure"
print "using local Closure Compiler"
except Exception, E:
print "No closure (%s)" % E
print "Download from http://closure-compiler.googlecode.com/files/compiler-latest.zip"
try:
import closure_w | s
use_compressor = "c | losure_ws"
print "Using Closure via Web Service - limited to files < 1Mb!"
except ImportError:
print "No closure_ws"
if use_compressor == "closure":
if not warnings:
closure.extra_params = "--warning_level QUIET"
minimize = closure.minimize
elif use_compressor == "closure_ws":
minimize = closure_ws.minimize
elif use_compressor == "jsmin":
minimize = jsmin.jsmin
sourceDirectory = ".."
configFilename = "sahana.js.cfg"
outputFilename = "S3.min.js"
# Merge JS files
print "Merging Core libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
# Compress JS files
print "Compressing - JS"
minimized = minimize(merged)
# Add license
print "Adding license file."
minimized = open("license.txt").read() + minimized
# Print to output files
print "Writing to %s." % outputFilename
open(outputFilename, "w").write(minimized)
# Remove old JS files
print "Deleting %s." % outputFilename
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
# Move new JS files
print "Moving new JS files"
shutil.move(outputFilename, "../S3")
# dataTables
print "Compressing dataTables"
sourceDirectorydataTables = ".."
configFilenamedataTables = "sahana.js.dataTables.cfg"
outputFilenamedataTables = "s3.dataTables.min.js"
mergeddataTables = mergejs.run(sourceDirectorydataTables,
None,
configFilenamedataTables)
minimizeddataTables = minimize(mergeddataTables)
open(outputFilenamedataTables, "w").write(minimizeddataTables)
try:
os.remove("../S3/%s" % outputFilenamedataTables)
except:
pass
shutil.move(outputFilenamedataTables, "../S3")
# Vulnerability
print "Compressing Vulnerability"
sourceDirectoryVulnerability = ".."
configFilenameVulnerability = "sahana.js.vulnerability.cfg"
outputFilenameVulnerability = "s3.vulnerability.min.js"
mergedVulnerability = mergejs.run(sourceDirectoryVulnerability,
None,
configFilenameVulnerability)
minimizedVulnerability = minimize(mergedVulnerability)
open(outputFilenameVulnerability, "w").write(minimizedVulnerability)
try:
os.remove("../S3/%s" % outputFilenameVulnerability)
except:
pass
shutil.move(outputFilenameVulnerability, "../S3")
print "Compressing Vulnerability GIS"
sourceDirectoryVulnerability = "../../themes/Vulnerability/js"
configFilenameVulnerability = "sahana.js.vulnerability_gis.cfg"
outputFilenameVulnerability = "OpenLayers.js"
mergedVulnerability = mergejs.run(sourceDirectoryVulnerability,
None,
configFilenameVulnerability)
minimizedVulnerability = minimize(mergedVulnerability)
open(outputFilenameVulnerability, "w").write(minimizedVulnerability)
try:
os.remove("../../themes/Vulnerability/js/%s" % outputFilenameVulnerability)
except:
pass
shutil.move(outputFilenameVulnerability, "../../themes/Vulnerability/js")
# Single scripts
for filename in [
"contacts",
"embed_component",
"inline_component",
"locationselector.widget",
"popup",
"report",
"select_person",
"timeline",
]:
print "Compressing s3.%s.js" % filename
inputFilename = os.path.join("..", "S3", "s3.%s.js" % filename)
outputFilename = "s3.%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
if dogis:
sourceDirectoryGIS = "../S3"
sourceDirectoryOpenLayers = "../gis/openlayers/lib"
sourceDirectoryOpenLayersExten = "../gis"
sourceDirectoryMGRS = "../gis"
sourceDirectoryGeoExt = "../gis/GeoExt/lib"
sourceDirectoryGeoExtux = "../gis/GeoExt/ux"
sourceDirectoryGxp = "../gis/gxp"
#sourceDirectoryGeoExplorer = "../gis/GeoExplorer"
configFilenameGIS = "sahana.js.gis.cfg"
configFilenameOpenLayers = "sahana.js.ol.cfg"
configFilenameOpenLayersExten = "sahana.js.ol_exten.cfg"
configFilenameMGRS = "sahana.js.mgrs.cfg"
configFilenameGeoExt = "sahana.js.geoext.cfg"
configFilenameGeoExtux = "sahana.js.geoextux.cfg"
configFilenameGxpMin = "sahana.js.gxp.cfg"
configFilenameGxpFull = "sahana.js.gxpfull.cfg"
#configFilenameGeoExplorer = "sahana.js.geoexplorer.cfg"
outputFilenameGIS = "s3.gis.min.js"
outputFilenameOpenLayers = "OpenLayers.js"
outputFilenameMGRS = "MGRS.min.js"
outputFilenameGeoExt = "GeoExt.js"
outputFilenameGxp = "gxp.js"
#outputFilenameGeoExplorer = "GeoExplorer.js"
# Merge GIS JS Files
print "Merging GIS scripts."
mergedGIS = mergejs.run(sourceDirectoryGIS,
None,
configFilenameGIS)
|
essanpupil/cashflow | cashflow/urls.py | Python | mit | 1,046 | 0.000956 | """cashflow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/ | topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-base | d views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^cash/', include('cash.urls', namespace='cash')),
]
|
oy-vey/algorithms-and-data-structures | 5-AdvancedAlgorithmsAndComplexity/Week1/evacuation/evacuation.py | Python | mit | 3,343 | 0.002094 | # python3
import queue
class Edge:
def __init__(self, u, v, capacity):
self.u = u
self.v = v
self.capacity = capacity
self.flow = 0
# This class implements a bit unusual scheme for storing edges of the graph,
# in order to retrieve the backward edge for a given edge quickly.
class FlowGraph:
def __init__(self, n):
# List of all - forward and backward - edges
self.edges = []
# These adjacency lists store only indices of edges in the edges list
self.graph = [[] for _ in range(n)]
def add_edge(self, from_, to, capacity):
# Note that we first append a forward edge and then a backward edge,
# so | all forward edges are stored at even indices (starting from 0),
# whereas backward edges are stored at odd indices.
forward_edge = Edge(from_, to, capacity)
backward_edge = Edge(to, from_, 0)
self.graph[from_].append(len(self.edges))
self.e | dges.append(forward_edge)
self.graph[to].append(len(self.edges))
self.edges.append(backward_edge)
def size(self):
return len(self.graph)
def get_ids(self, from_):
return self.graph[from_]
def get_edge(self, id):
return self.edges[id]
def add_flow(self, id, flow):
# To get a backward edge for a true forward edge (i.e id is even), we should get id + 1
# due to the described above scheme. On the other hand, when we have to get a "backward"
# edge for a backward edge (i.e. get a forward edge for backward - id is odd), id - 1
# should be taken.
#
# It turns out that id ^ 1 works for both cases. Think this through!
self.edges[id].flow += flow
self.edges[id ^ 1].flow -= flow
def read_data():
vertex_count, edge_count = map(int, input().split())
graph = FlowGraph(vertex_count)
for _ in range(edge_count):
u, v, capacity = map(int, input().split())
graph.add_edge(u - 1, v - 1, capacity)
return graph
def BFS(graph, s):
dist = [-1] * graph.size()
path_edge_ids = [None] * graph.size()
dist[s] = 0
q = queue.Queue()
q.put(s)
while not q.empty():
u = q.get()
edge_ids = graph.graph[u]
for edge, edge_id in [(graph.get_edge(e_id), e_id) for e_id in edge_ids]:
if dist[edge.v] == -1 and (edge.capacity - edge.flow) > 0:
q.put(edge.v)
dist[edge.v] = dist[u] + 1
path_edge_ids[edge.v] = edge_id
return dist, path_edge_ids
def ReconstructPath(s, u, path_edge_ids, graph):
result = []
while u != s:
e_to_u_id = path_edge_ids[u]
result.append(e_to_u_id)
u = graph.get_edge(e_to_u_id).u
return result
def max_flow(graph, from_, to):
flow = 0
while True:
(dist, path_edge_ids) = BFS(graph, from_)
if path_edge_ids[to] is None:
return flow
path_to_sink_edge_ids = ReconstructPath(from_, to, path_edge_ids, graph)
X = min([(graph.get_edge(e_id).capacity - graph.get_edge(e_id).flow) for e_id in path_to_sink_edge_ids])
for e_id in path_to_sink_edge_ids:
graph.add_flow(e_id, X)
flow += X
if __name__ == "__main__":
graph = read_data()
print(max_flow(graph, 0, graph.size() - 1))
|
mtask/multissh | multissh.py | Python | apache-2.0 | 8,206 | 0.003656 | #!/usr/bin/python2
from fabric.api import *
from fabric.tasks import execute
import sys
import os
import argparse
import ntpath
import fnmatch
"""
Author: mtask@github.com
"""
class multissh(object):
def manage_servers(self, add=False, delete=False, param=None):
if add:
self.server = param.strip()
if fnmatch.fnmatch(self.server, "*@*=*"):
with open("multissh.conf", 'a') as config:
config.write(self.server)
else:
print "[!] Invalid syntax"
return
elif delete:
try:
self.delete_num = int(param)
except Exception as e:
raise e
self.hosts = []
with open('multissh.conf','r') as conf:
self.config_lines = conf.readlines()
for self.config in self.config_lines:
if self.config.startswith("#"):
continue
elif self.config.startswith("keypath="):
continue
else:
try:
self.params = self.config.split('=',1)
self.hosts.append(self.params[0])
except Exception as e:
raise e
self.server_num = 1
self.host_to_delete = None
for self.h in self.hosts:
if self.server_num == self.delete_num:
self.host_to_delete = self.h
self.server_num += 1
if self.host_to_delete:
self.ans = raw_input("[!] Really delete "+self.host_to_delete+"?(Y/n)")
if self.ans.lower() == "n":
return
else:
print "[!] Host not found"
sys.exit(0)
with open('multissh.conf','w') as conf:
for self.line in self.config_lines:
if self.host_to_delete in self.line:
continue
else:
conf.write(self.line)
def get_settings(self, list=False):
self.hosts = []
self.users = []
self.keypath = None
with open('multissh.conf','r') as conf:
for self.config in conf:
if self.config.startswith("#"):
continue
elif self.config.startswith("keypath="):
try:
self.keypath = self.config.split('=',1)[1].strip()
except Exception as e:
raise e
else:
try:
self.params = self.config.split('=',1)
| self.hosts.append(self.params[0])
self.users.append(self.params[1])
except Exception as e:
raise e
if list:
self | .server_num = 1
for self.h in self.hosts:
print "["+str(self.server_num)+"] "+self.h
self.server_num += 1
else:
return (self.hosts, self.users, self.keypath)
def run_cmd(self, cmd,sudo_=False, script=False, copy_file="", yesno=False):
def file_base_name(path):
try:
file_name = ntpath.basename(path)
return file_name
except Exception as e:
raise e
self.failed = []
self.cmd = cmd
self.servers,self.users, self.keypath = self.get_settings()
os.path.expanduser("~/")
if not self.keypath:
if os.path.isfile(os.path.expanduser("~/")+".ssh/id_rsa"):
self.keypath = "~/.ssh/id_rsa"
else:
print "[!] No clue where the ssh keys are..."
sys.exit(0)
for self.s, self.u in zip(self.servers, self.users):
if yesno:
self.confirm = raw_input("Current server is "+self.s+". Run command?(y/N)")
if self.confirm.lower() != "y":
continue
with settings(host_string=self.s, user=self.u, key_filename=self.keypath):
try:
if script:
if os.path.isfile(self.cmd):
put(self.cmd, "tempscript", mode=0755)
else:
print "[!] Path to local script not found."
sys.exit(1)
if sudo_:
sudo("./tempscript")
sudo("rm tempscript")
else:
run("./tempscript")
run("rm tempscript")
return
elif copy_file:
self.base_name = file_base_name(copy_file)
if os.path.isfile(copy_file):
put(copy_file, self.base_name)
return
else:
if sudo_:
sudo(self.cmd)
else:
run(self.cmd)
except Exception as e:
self.failed.append(self.s)
print "Execution failed on: "+self.s
print "Error:"+str(e)
if len(self.failed) == 0:
if script:
print "[!] Script executed on all servers"
else:
if yesno:
print "[!] Command executed on selected servers"
else:
print "[!] Command executed on all servers"
else:
print "[!] Execution failed on:"
for f in self.failed:
print f
def parse_args(self):
self.descr = """
Easily run commands through multiple ssh servers.
Configurate hosts to multissh.conf.
Example configuration: user@server=user
"""
self.parser = argparse.ArgumentParser(description=self.descr)
self.parser.add_argument("-c", "--cmd", type=str, help="Run command script on servers. Wrap commans inside \"\"")
self.parser.add_argument("-s", "--script", type=str, help="Path to local script to move and run on servers")
self.parser.add_argument("-S", "--sudo", action='store_true', help="Run with sudo. Can be used with --cmd, --script and --copy-file. Leave \"sudo\" out of the given command")
self.parser.add_argument("-l", "--list", action='store_true', help="List servers")
self.parser.add_argument("-a", "--add", type=str, help="Add server to config. Use syntax of multissh.config")
self.parser.add_argument("-d", "--delete", type=str, help="Delete server from config. Use list switch to get server's number")
self.parser.add_argument("-cf", "--copyfile", type=str, help="Copy file to servers. Give local path as argument.")
self.parser.add_argument("-yn", "--yesno", action='store_true', help="Ask on every server if to run command on it.")
self.args = self.parser.parse_args()
return self.args
def main(self):
self.arg = self.parse_args()
if self.arg.yesno:
yn = True
else:
yn = False
if self.arg.add:
self.manage_servers(add=True, param=self.arg.add)
if self.arg.delete:
self.manage_servers(delete=True, param=self.arg.delete)
if self.arg.list:
self.get_settings(list=True)
sys.exit(0)
if self.arg.cmd:
if self.arg.sudo:
self.run_cmd(self.arg.cmd, sudo_=True, yesno=yn)
else:
self.run_cmd(self.arg.cmd, yesno=yn)
if self.arg.script:
if self.arg.sudo:
self.run_cmd(self.arg.script, sudo_=True, script=True, yesno=yn)
else:
self.run_cmd(self.arg.script, script=True, yesno=yn)
if self.arg.copyfile:
self.run_cmd("",copy_file=self.arg.copyfile, yesno=yn)
if __name__=='__main_ |
alex-i-git/LearnPython-Diploma-Project | info.py | Python | mit | 29 | 0.034483 | from dbot impor | t info
inf | o() |
esneider/relationshit-server | app.py | Python | mit | 2,414 | 0.005385 | import os
import sys
import json
from flask import Flask, request, Response
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db = SQLAlchemy(app)
import database
@app.route('/')
def hello():
return 'Hello World!'
@app.route('/fakemessage', methods = ['POST'])
def fake_message():
userId = request.json["userId"]
messageList = request.json["messageList"]
return messageList
@app.route('/messageList', methods = ['POST'])
def messageList():
print '[start] POST messageList'
sys.stdout.flush()
print request.headers
sys.stdout.flush()
json = request.get_json(force=True)
userId = json["userId"]
messageList = json["messageList"]
database.upload_messages(userId, messageList)
print '[end] POST messageList'
sys.stdout.flush()
return 'OK'
@app.route('/topLists', methods = ['GET'])
def topLists():
print '[start] GET topLists'
sys.stdout.flush()
print request.headers
sys.stdout.flush()
# lists = [('Top friends', [('asd', 123), ('asd', 123)]), ('Top asd', [('asd', 123), ('asd', 123)])]
userId = reque | st.args.get('IMEI')
lists = database.process(userId)
data = json.dumps(lists)
resp = Response(response=data, mimetype="application/json")
print '[end] GET topLists'
sys.stdout.flush()
return resp
@app.route('/contactsData', methods = ['GET'])
def contactsData():
print '[start] GET contactsData'
sys.stdout.flush()
print request.headers
sys.stdout.flush()
userId = request.args.get('IMEI')
blabla = database.user_data(u | serId)
data = json.dumps(blabla)
resp = Response(response=data, mimetype="application/json")
print '[end] GET contactsData'
sys.stdout.flush()
return resp
@app.route('/test')
def api_hello():
print "before calling process"
#database.test_query(db)
#database.contact_query(db, "352584060592000", "32507")
database.process(db, "352584060592000")
print "after executing process"
sys.stdout.flush()
@app.route('/teststats')
def test_stats():
print "before calling max_unreplied_messages"
result = database.max_unreplied_messages(db, "352584060592000", "32507")
print result
print "after executing max_unreplied_messages"
sys.stdout.flush()
if __name__ == "__main__":
app.run(debug=True)
|
caceres/SlopedPlanesMacro | SlopedPlanesTaskPanel.py | Python | gpl-3.0 | 42,295 | 0.00104 | # -*- coding: utf8 -*-
# *****************************************************************************
# * *
# * Copyright (c) 2017 *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * For detail see the LICENSE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
# * See the GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA | *
# * *
# *****************************************************************************
import math
import FreeCAD
import FreeCADGui
from PySide import QtGui, QtCore
__title__ = "SlopedPlanesMacro"
__author__ = "Damian Caceres"
__url__ = "http://www.freecadweb.org"
__version__ = ""
class _TaskPanel_SlopedPlanes():
''''''
def __init__(self, slopedPl | anes):
''''''
self.updating = False
self.obj = slopedPlanes
self.shaping()
form = QtGui.QWidget()
self.form = form
form.setObjectName("TaskPanel")
form.setWindowTitle(self.obj.Label)
grid = QtGui.QGridLayout(form)
self.grid = grid
grid.setObjectName("grid")
title = QtGui.QLabel(form)
self.title = title
grid.addWidget(title, 0, 0, 1, 2)
tree = _TreeWidget()
self.tree = tree
tree.setParent(form)
grid.addWidget(tree, 1, 0, 1, 2)
tree.itemChanged.connect(self.edit)
advancedOptions = QtGui.QCheckBox(form)
self.advancedOptions = advancedOptions
advancedOptions.setObjectName("AdvancedOptions")
grid.addWidget(advancedOptions, 2, 0, 1, 1)
advancedOptions.clicked.connect(self.advanced)
foot = QtGui.QLabel(form)
self.foot = foot
foot.setObjectName("foot")
grid.addWidget(foot, 3, 0, 1, 2)
FreeCADGui.Selection.addObserver(self)
def retranslateUi(self):
''''''
advancedOptions = self.advancedOptions
advancedOptions.setText("Advanced Options")
advancedOptions.setToolTip("More parameters to control the faces.")
self.title.setText("SlopedPlanes parameters by faces")
doc = ("Hint: Select a face over the figure and \n"
"the related item in this task panel will be selected")
self.foot.setText(doc)
if advancedOptions.isChecked():
self.tree.setHeaderLabels([("Face"),
("Angle"),
("Length"),
("Height"),
("Run"),
("Slope"),
("OverhangLength"),
("OverhangHeight"),
("OverhangRun"),
("Left Width"),
("Right Width"),
("Curves"),
("Sweep Curve"),
("Face")])
else:
self.tree.setHeaderLabels([("Face"),
("Angle")])
def isAllowedAlterSelection(self):
''''''
return False
def isAllowedAlterView(self):
''''''
return True
def isAllowedAlterDocument(self):
''''''
return False
def getStandardButtons(self):
''''''
return int(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Close |
QtGui.QDialogButtonBox.Ok)
def clicked(self, button):
''''''
if button == QtGui.QDialogButtonBox.Apply:
placement = self.obj.Placement
self.resetObject()
self.obj.Placement = placement
FreeCAD.ActiveDocument.recompute()
self.update()
self.shaping()
def reject(self):
''''''
FreeCADGui.Selection.removeObserver(self)
FreeCADGui.ActiveDocument.resetEdit()
return True
def accept(self):
''''''
FreeCADGui.Selection.removeObserver(self)
self.resetObject()
self.obj.touch()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
def helpRequested(self):
''''''
pass
def edit(self, item, column):
''''''
if not self.updating:
self.resetObject()
def advanced(self):
''''''
tree = self.tree
if self.advancedOptions.isChecked():
tree.setColumnCount(14)
tree.header().resizeSection(0, 60)
tree.header().resizeSection(1, 120)
tree.header().resizeSection(2, 120)
tree.header().resizeSection(3, 120)
tree.header().resizeSection(4, 120)
tree.header().resizeSection(5, 130)
tree.header().resizeSection(6, 130)
tree.header().resizeSection(7, 130)
tree.header().resizeSection(8, 120)
tree.header().resizeSection(9, 120)
tree.header().resizeSection(10, 120)
tree.header().resizeSection(11, 60)
tree.header().resizeSection(12, 180)
tree.header().resizeSection(13, 60)
else:
tree.setColumnCount(2)
tree.header().resizeSection(0, 60)
tree.header().resizeSection(1, 60)
self.update()
def update(self):
''''''
# print 'update'
self.updating = True
slopedPlanes = self.obj
tree = self.tree
tree.clear()
tree.obj = slopedPlanes
if slopedPlanes:
linkList = [o.Name for o in slopedPlanes.SweepCurves]
linkList.insert(0, None)
up = slopedPlanes.Up
down = slopedPlanes.Down
pyFaceList = slopedPlanes.Proxy.Pyth
numSlope = 0
for pyFace in pyFaceList:
originList = []
pyWireList = pyFace.wires
size = pyFace.size
# print '### numFace ', pyFace.numFace
lenWires = len(pyWireList)
for pyWire in pyWireList:
numWire = pyWire.numWire
# print '## numWire ', numWire
pyPlaneList = pyWire.planes
if up:
if numWire == 1:
numSlope += 1
for pyPlane in pyPlaneList:
numAngle = pyPlane.numGeom
angle = pyPlane.angle
sweepCurve = pyPlane.sweepCurve
# print '# numAngle, angle ', (numAngle, angle)
# print 'originList ', originList
if [numWire, numAngle] not in originList and\
angle not in orig |
boyombo/django-stations | stations/parcel/migrations/0003_parcel_date_loaded.py | Python | mit | 507 | 0 | # | -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-14 08:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('parcel', '0002_remove_client_gender'),
]
operations = [
migrations.AddField(
model_name='parcel',
name='date_loaded',
field=models.DateTimeField(default=django.utils.timezone.now),
),
] | |
ACJTeam/enigma2 | lib/python/Tools/KeyBindings.py | Python | gpl-2.0 | 8,341 | 0.030812 |
keyBindings = { }
from keyids import KEYIDS
from Components.config import config
from Components.RcModel import rc_model
keyDescriptions = [{
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("YELLOW",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("REWIND",),
KEYIDS["KEY_PLAYPAUSE"]: ("PLAYPAUSE",),
KEYIDS["KEY_PLAY"]: ("PLAYPAUSE",),
KEYIDS["KEY_NEXTSONG"]: ("FASTFORWARD",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",)
},
{
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("VIDEO",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("RED", "SHIFT"),
KEYIDS["KEY_PLAYPAUSE"]: ("YELLOW", "SHIFT"),
KEYIDS["KEY_PLAY"]: ("GREEN", "SHIFT"),
KEYIDS["KEY_NEXTSONG"]: ("BLUE", "SHIFT"),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("TV", "SHIFT"),
KEYIDS["KEY_RECORD"]: ("RADIO", "SHIFT")
},
{
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("REWIND",),
KEYIDS["KEY_PLAYPAUSE"]: ("PAUSE",),
KEYIDS["KEY_PLAY"]: ("PLAY",),
KEYIDS["KEY_NEXTSONG"]: ("FASTFORWARD",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",),
KEYIDS["KEY_PAGEUP"]: ("PAGEUP",),
KEYIDS["KEY_PAGEDOWN"]: ("PAGEDOWN",)
},
{ # XP1000
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_SUBTITLE"]: ("SUBTITLE",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_PREVIOUSSONG"]: ("SKIPBACK",),
KEYIDS["KEY_REWIND"]: ("REWIND",),
KEYIDS["KEY_FASTFORWARD"]: ("FASTFORWARD",),
KEYIDS["KEY_NEXTSONG"]: ("SKIPFORWAR | D",),
KEYIDS["KEY_PLAYPAUSE"]: ("PLAYPAUSE",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: | ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",),
KEYIDS["KEY_BOOKMARKS"]: ("PORTAL",),
KEYIDS["KEY_VMODE"]: ("VMODE",),
KEYIDS["KEY_PROGRAM"]: ("TIMER",),
KEYIDS["KEY_SLEEP"]: ("SLEEP",),
KEYIDS["KEY_EPG"]: ("EPG",),
},
{ # Formuler F1
KEYIDS["BTN_0"]: ("UP", "fp"),
KEYIDS["BTN_1"]: ("DOWN", "fp"),
KEYIDS["KEY_OK"]: ("OK", ""),
KEYIDS["KEY_UP"]: ("UP",),
KEYIDS["KEY_DOWN"]: ("DOWN",),
KEYIDS["KEY_POWER"]: ("POWER",),
KEYIDS["KEY_RED"]: ("RED",),
KEYIDS["KEY_BLUE"]: ("BLUE",),
KEYIDS["KEY_GREEN"]: ("GREEN",),
KEYIDS["KEY_YELLOW"]: ("YELLOW",),
KEYIDS["KEY_MENU"]: ("MENU",),
KEYIDS["KEY_LEFT"]: ("LEFT",),
KEYIDS["KEY_RIGHT"]: ("RIGHT",),
KEYIDS["KEY_VIDEO"]: ("PVR",),
KEYIDS["KEY_INFO"]: ("INFO",),
KEYIDS["KEY_AUDIO"]: ("AUDIO",),
KEYIDS["KEY_TV"]: ("TV",),
KEYIDS["KEY_RADIO"]: ("RADIO",),
KEYIDS["KEY_TEXT"]: ("TEXT",),
KEYIDS["KEY_NEXT"]: ("ARROWRIGHT",),
KEYIDS["KEY_PREVIOUS"]: ("ARROWLEFT",),
KEYIDS["KEY_REWIND"]: ("REWIND",),
KEYIDS["KEY_PAUSE"]: ("PAUSE",),
KEYIDS["KEY_PLAY"]: ("PLAY",),
KEYIDS["KEY_FASTFORWARD"]: ("FASTFORWARD",),
KEYIDS["KEY_CHANNELUP"]: ("BOUQUET+",),
KEYIDS["KEY_CHANNELDOWN"]: ("BOUQUET-",),
KEYIDS["KEY_0"]: ("0",),
KEYIDS["KEY_1"]: ("1",),
KEYIDS["KEY_2"]: ("2",),
KEYIDS["KEY_3"]: ("3",),
KEYIDS["KEY_4"]: ("4",),
KEYIDS["KEY_5"]: ("5",),
KEYIDS["KEY_6"]: ("6",),
KEYIDS["KEY_7"]: ("7",),
KEYIDS["KEY_8"]: ("8",),
KEYIDS["KEY_9"]: ("9",),
KEYIDS["KEY_EXIT"]: ("EXIT",),
KEYIDS["KEY_STOP"]: ("STOP",),
KEYIDS["KEY_RECORD"]: ("RECORD",),
KEYIDS["KEY_F1"]: ("F1",),
KEYIDS["KEY_F2"]: ("F2",),
KEYIDS["KEY_F3"]: ("F3",),
KEYIDS["KEY_BACK"]: ("RECALL",),
KEYIDS["KEY_CONTEXT_MENU"]: ("CONTEXT",),
KEYIDS["KEY_EPG"]: ("EPG",),
KEYIDS["KEY_BOOKMARKS"]: ("PLAYLIST",),
},
]
def addKeyBinding(domain, key, context, action, flags):
keyBindings.setdefault((context, action), []).append((key, domain, flags))
# returns a list of (key, flags) for a specified action
def queryKeyBinding(context, action):
if (context, action) in keyBindings:
return [(x[0], x[2]) for x in keyBindings[(context, action)]]
else:
return [ ]
def getKeyDescription(key):
if rc_model.rcIsDefault():
idx = config.misc.rcused.value
else:
rctype = config.plugins.remotecontroltype.rctype.value
if rctype == 14: # XP1000
idx = 3
elif rctype == 18: # F1
idx = 4
else:
idx = 2
if key in keyDescriptions[idx]:
return keyDescriptions[idx].get(key, [ ])
de |
chrisdembia/yeadon | examples/bicyclerider/version_information.py | Python | bsd-3-clause | 5,785 | 0.002593 | """
An IPython extension that provides a magic command that displays
a table with information about versions of installed modules.
This makes it much easier to determine which versions of modules
were installed in the source IPython interpreter's environment.
Produces output in:
* Plaintext (IPython [qt]console)
* HTML (IPython notebook, ``nbconvert --to html``, ``--to slides``)
* JSON (IPython notebook ``.ipynb`` files)
* LaTeX (e.g. ``ipython nbconvert example.ipynb --to LaTeX --post PDF``)
Usage
======
.. sourcecode:: ipython
In [1]: %load_ext version_information
In [2]: %version_information
Out[2]:
Software versions
Python 2.7.3 (default, Sep 26 2013, 20:08:41) [GCC 4.6.3]
IPython 2.0.0-dev
OS posix [linux2]
Mon Dec 09 10:21:40 2013 CST
In [3]: %version_information sphinx, jinja2
Out[3]:
Software versions
Python 2.7.3 (defa | ult, Sep 26 2013, 20:08:41) [GCC 4.6.3]
IPython 2.0.0-dev
OS posix [linux2]
sphinx 1.2b3
jinja2 2.7.1
Mon Dec 09 10:21 | :52 2013 CST
.. note:: ``%version_information`` expects to find the module version in
``<module>.__version__``.
If ``<module>.__version__`` is not set, it attempts to get a version
string with ``pkg_resources.require('<module>')[0].version``
(the ``version`` field from ``setup.py``).
This script is hosted at https://github.com/jrjohansson/version_information/
and is shared under the Creative Commons Attribution license.
"""
import cgi
import json
import os
import sys
import time
import IPython
from IPython.core.magic import magics_class, line_magic, Magics
import locale
def _date_format_encoding():
return locale.getlocale(locale.LC_TIME)[1] or locale.getpreferredencoding()
try:
import pkg_resources
except ImportError:
pkg_resources = None
@magics_class
class VersionInformation(Magics):
@line_magic
def version_information(self, line=''):
"""Show information about versions of modules.
Usage:
%version_information [optional comma-separated list of modules]
"""
self.packages = [("Python", sys.version.replace("\n", "")),
("IPython", IPython.__version__),
("OS", "%s [%s]" % (os.name, sys.platform))]
modules = line.replace(' ', '').split(",")
for module in modules:
if len(module) > 0:
try:
code = "import %s; version=%s.__version__" % (module, module)
ns_g = ns_l = {}
exec(compile(code, "<string>", "exec"), ns_g, ns_l)
self.packages.append((module, ns_l["version"]))
except Exception as e:
try:
if pkg_resources is None:
raise
version = pkg_resources.require(module)[0].version
self.packages.append((module, version))
except Exception as e:
self.packages.append((module, str(e)))
return self
def _repr_json_(self):
obj = {
'Software versions': [
{'module': name, 'version': version} for
(name, version) in self.packages]}
return json.dumps(obj)
def _repr_html_(self):
html = "<table>"
html += "<tr><th>Software</th><th>Version</th></tr>"
for name, version in self.packages:
_version = cgi.escape(version)
html += "<tr><td>%s</td><td>%s</td></tr>" % (name, _version)
try:
html += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime('%a %b %d %H:%M:%S %Y %Z')
except:
html += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime('%a %b %d %H:%M:%S %Y %Z').decode(_date_format_encoding())
html += "</table>"
return html
@staticmethod
def _latex_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\letterunderscore{}',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_latex_(self):
latex = r"\begin{tabular}{|l|l|}\hline" + "\n"
latex += r"{\bf Software} & {\bf Version} \\ \hline\hline" + "\n"
for name, version in self.packages:
_version = self._latex_escape(version)
latex += r"%s & %s \\ \hline" % (name, _version) + "\n"
try:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime('%a %b %d %H:%M:%S %Y %Z') + "\n"
except:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime('%a %b %d %H:%M:%S %Y %Z').decode(_date_format_encoding()) + "\n"
latex += r"\end{tabular}" + "\n"
return latex
def _repr_pretty_(self, pp, cycle):
text = "Software versions\n"
for name, version in self.packages:
text += "%s %s\n" % (name, version)
try:
text += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime('%a %b %d %H:%M:%S %Y %Z')
except:
text += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime('%a %b %d %H:%M:%S %Y %Z').decode(_date_format_encoding())
pp.text(text)
def load_ipython_extension(ipython):
ipython.register_magics(VersionInformation)
|
leapcode/soledad | src/leap/soledad/server/session.py | Python | gpl-3.0 | 4,855 | 0 | # -*- coding: utf-8 -*-
# session.py
# Copyright (C) 2017 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Twisted resource containing an authenticated Soledad session.
"""
from zope.interface import implementer
from twisted.cred.credentials import Anonymous
from twisted.cred import error
from twisted.python import log
from twisted.python.components import registerAdapter
from twisted.web import util
from twisted.web._auth import wrapper
from twisted.web.guard import HTTPAuthSessionWrapper
from twisted.web.resource import ErrorPage
from twisted.web.resource import IResource
from twisted.web.server import Session
from zope.interface import Interface
from zope.interface import Attribute
from leap.soledad.server.auth import credentialFactory
from leap.soledad.server.url_mapper import URLMapper
class ISessionData(Interface):
username = Attribute('An uuid.')
password = Attribute('A token.')
@implementer(ISessionData)
class SessionData(object):
def __init__(self, session):
self.username = None
self.password = None
registerAdapter(SessionData, Session, ISessionData)
def _sessionData(request):
session = request.getSession()
data = ISessionData(session)
return data
@implementer(IResource)
class UnauthorizedResource(wrapper.UnauthorizedResource):
isLeaf = True
def __init__(self):
pass
def render(self, request):
request.setResponseCode(401)
if request.method == b'HEAD':
return b''
return b'Unauthorized'
def getChildWithDefault(self, path, request):
return self
@implementer(IResource)
class SoledadSession(HTTPAuthSessionWrapper):
def __init__(self, portal):
self._mapper = URLMapper()
self._portal = portal
self._credentialFactory = credentialFactory
# expected by the contract of the parent class
self._credentialFactories = [credentialFactory]
def _matchPath(self, request):
match = self._mapper.match(request.path, request.method)
return match
def _parseHeader(self, header):
elements = header.split(b' ')
scheme = elements[0].lower()
if scheme == self._credentialFactory.scheme:
return (b' '.join(elements[1:]))
return None
def _authorizedResource(self, request):
# check whether the path of the request exists in the app
match = self._matchPath(request)
if not match:
return UnauthorizedResource()
# get authorization header or fail
header = request.getHeader(b'authorization')
if not header:
return util.DeferredResource(self._login(Anonymous()))
# parse the authorization header
auth_data = self._parseHeader(header)
if not auth_data:
return UnauthorizedResource()
# decode the credentials from the parsed header
try:
| credentials = self._credentialFactory.decode(auth_data, request)
except error.LoginFailed:
return UnauthorizedResource()
except Exception:
# If you port this to the newer log facility, be aware that
# the tests rely on the error to be l | ogged.
log.err(None, "Unexpected failure from credentials factory")
return ErrorPage(500, None, None)
# make sure the uuid given in path corresponds to the one given in
# the credentials
request_uuid = match.get('uuid')
if request_uuid and request_uuid != credentials.username:
return ErrorPage(500, None, None)
# eventually return a cached resouce
sessionData = _sessionData(request)
if sessionData.username == credentials.username \
and sessionData.password == credentials.password:
return self._portal.realm.auth_resource
# if all checks pass, try to login with credentials and cache
# credentials in case of success
def _cacheSessionData(res):
sessionData.username = credentials.username
sessionData.password = credentials.password
return res
d = self._login(credentials)
d.addCallback(_cacheSessionData)
return util.DeferredResource(d)
|
Abjad/abjad | tests/test_Container_extend.py | Python | gpl-3.0 | 6,074 | 0 | import pytest
import abjad
def test_Container_extend_01():
"""
Extend container with list of leaves.
"""
voice = abjad.Voice("c'8 d'8")
abjad.beam(voice[:])
voice.extend([abjad.Note("c'8"), abjad.Note("d'8")])
assert abjad.lilypond(voice) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
]
c'8
d'8
}
"""
), print(abjad.lilypond(voice))
assert abjad.wf.wellformed(voice)
def test_Container_extend_02():
"""
Extend container with contents of other container.
"""
voice_1 = abjad.Voice("c'8 d'8")
abjad.beam(voice_1[:])
voice_2 = abjad.Voice("e'8 f'8")
abjad.beam(voice_2[:])
voice_1.extend(voice_2)
assert abjad.lilypond(voice_1) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
]
e'8
[
f'8
]
}
"""
), print(abjad.lilypond(voice_1))
assert abjad.wf.wellformed(voice_1)
def test_Container_extend_03():
"""
Extending container with empty list leaves container unchanged. |
"""
voice = abjad.Voice("c'8 d'8")
abjad.beam(voice[:])
voice.extend([])
assert abjad.lilypond(voice) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
]
}
"""
), print(abjad.lilypond(voice))
assert abjad.wf.wellformed(voice)
def test_Container_extend_04():
"""
Extending one container with empty second contai | ner leaves both
containers unchanged.
"""
voice = abjad.Voice("c'8 d'8")
abjad.beam(voice[:])
voice.extend(abjad.Voice([]))
assert abjad.lilypond(voice) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
]
}
"""
), print(abjad.lilypond(voice))
assert abjad.wf.wellformed(voice)
def test_Container_extend_05():
"""
Trying to extend container with noncomponent raises TypeError.
"""
voice = abjad.Voice("c'8 d'8")
abjad.beam(voice[:])
with pytest.raises(Exception):
voice.extend(7)
with pytest.raises(Exception):
voice.extend("foo")
def test_Container_extend_06():
"""
Trying to extend container with noncontainer raises exception.
"""
voice = abjad.Voice("c'8 d'8")
abjad.beam(voice[:])
with pytest.raises(Exception):
voice.extend(abjad.Note("c'4"))
with pytest.raises(Exception):
voice.extend(abjad.Chord("<c' d' e'>4"))
def test_Container_extend_07():
"""
Extend container with partial and spanned contents of other container.
"""
voice_1 = abjad.Voice("c'8 d'8")
abjad.beam(voice_1[:])
voice_2 = abjad.Voice("c'8 d'8 e'8 f'8")
abjad.beam(voice_2[:])
voice_1.extend(voice_2[-2:])
assert abjad.lilypond(voice_1) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
]
e'8
f'8
]
}
"""
), print(abjad.lilypond(voice_1))
assert abjad.wf.wellformed(voice_1)
assert abjad.lilypond(voice_2) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
}
"""
), print(abjad.lilypond(voice_2))
assert abjad.wf.wellformed(voice_2)
def test_Container_extend_08():
"""
Extend container with partial and spanned contents of other container.
Covered span comes with components from donor container.
"""
voice_1 = abjad.Voice("c'8 d'8")
abjad.beam(voice_1[:])
voice_2 = abjad.Voice("c'8 d'8 e'8 f'8")
abjad.beam(voice_2[:])
abjad.slur(voice_2[-2:])
assert abjad.lilypond(voice_2) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
e'8
(
f'8
)
]
}
"""
), print(abjad.lilypond(voice_2))
voice_1.extend(voice_2[-2:])
assert abjad.lilypond(voice_1) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
]
e'8
(
f'8
)
]
}
"""
), print(abjad.lilypond(voice_1))
assert abjad.wf.wellformed(voice_1)
assert abjad.lilypond(voice_2) == abjad.string.normalize(
r"""
\new Voice
{
c'8
[
d'8
}
"""
), print(abjad.lilypond(voice_2))
assert abjad.wf.wellformed(voice_2)
def test_Container_extend_09():
"""
Extend container with LilyPond input string.
"""
container = abjad.Container([])
container.extend("c'4 ( d'4 e'4 f'4 )")
assert abjad.lilypond(container) == abjad.string.normalize(
r"""
{
c'4
(
d'4
e'4
f'4
)
}
"""
), print(abjad.lilypond(container))
assert abjad.wf.wellformed(container)
def test_Container_extend_10():
"""
Lists must be flattened.
"""
maker = abjad.NoteMaker()
lists = [
maker([0, 2], [abjad.Duration(1, 4)]),
maker([4, 5], [abjad.Duration(1, 4)]),
maker([7, 9], [abjad.Duration(1, 4)]),
maker([11, 12], [abjad.Duration(1, 4)]),
]
components = abjad.sequence.flatten(lists, depth=-1)
container = abjad.Container()
container.extend(components)
assert abjad.lilypond(container) == abjad.string.normalize(
r"""
{
c'4
d'4
e'4
f'4
g'4
a'4
b'4
c''4
}
"""
), print(abjad.lilypond(container))
assert abjad.wf.wellformed(container)
|
14bmkelley/raytracer-python | cast.py | Python | mit | 3,882 | 0.028336 | import collisions
import data
import vector_math
import math
def cast_ray(ray, sphere_list, amb, light, eye_point):
result_color = data.Color(1.0, 1.0, 1.0)
#test for closest sphere to the eye
collision_tuple = find_closest_collision(ray, sphere_list)
if collision_tuple:
#some useful variables
sphere_hit = collision_tuple[0]
sphere_hit_point = collision_tuple[1]
#basic color before manipulation
result_r = sphere_hit.color.r * sphere_hit.finish.amb * amb.r
result_g = sphere_hit.color.g * sphere_hit.finish.amb * amb.g
result_b = sphere_hit.color.b * sphere_hit.finish.amb * amb.b
#computing light intensity
sphere_vector = vector_math.vector_from_to(sphere_hit.center, sphere_hit_point)
sphere_normal = vector_math.normalize_vector(sphere_vector)
scaled_normal = vector_math.scale_vector(sphere_normal, 0.01)
hit_point = vector_math.translate_point(sphere_hit_point, scaled_normal)
light_vector = vector_math.vector_from_to(hit_point, light.pt)
light_normal = vector_math.normalize_vector(light_vector)
light_scale = vector_math.dot_vector(sphere_normal, light_normal)
if light_scale > 0:
sphere_normal_ray = data.Ray(hit_point, light_normal)
possible_obstruction = find_closest_collision(sphere_normal_ray, sphere_list)
if possible_obstruction == None or distance(hit_point, possible_obstruction[1]) > distance(hit_point, light.pt):
result_r += sphere_hit.color.r * light_scale * light.color.r * sphere_hit.finish.diff
result_g += sphere_hit.color.g * light_scale * light.color.g * sphere_hit.finish.diff
| result_b += sphere_hit.color.b * light_scale * light.color.b * sphere_hit.finish.diff
#computing specular intensity
tmp_vector = vector_math.scale_vector(sphere_normal, 2 * light_scale)
reflection_vector = vector_math.difference_vector(light_normal, tmp_vecto | r)
eye_vector = vector_math.vector_from_to(eye_point, hit_point)
eye_normal = vector_math.normalize_vector(eye_vector)
spec_scale = vector_math.dot_vector(reflection_vector, eye_normal)
if spec_scale > 0:
result_r += light.color.r * sphere_hit.finish.spec * spec_scale ** (1 / float(sphere_hit.finish.rough))
result_g += light.color.g * sphere_hit.finish.spec * spec_scale ** (1 / float(sphere_hit.finish.rough))
result_b += light.color.b * sphere_hit.finish.spec * spec_scale ** (1 / float(sphere_hit.finish.rough))
result_color = data.Color(result_r, result_g, result_b)
return result_color
def cast_all_rays(view, eye_point, sphere_list, amb, light, file):
j = view.max_y
while j > view.min_y:
i = view.min_x
while i < view.max_x:
screen_point = data.Point(i, j, 0)
dir_to_pixel = vector_math.vector_from_to(eye_point, screen_point)
ray = data.Ray(eye_point, dir_to_pixel)
color = cast_ray(ray, sphere_list, amb, light, eye_point)
printed_r = str(color_convert(color.r))
printed_g = str(color_convert(color.g))
printed_b = str(color_convert(color.b))
file.write("{0} {1} {2}\n".format(printed_r, printed_g, printed_b))
i += find_delta(view.min_x, view.max_x, view.width)
j -= find_delta(view.min_y, view.max_y, view.height)
def find_delta(min, max, length):
return (max - min)/float(length)
def distance(p1, p2):
vector = vector_math.vector_from_to(p1, p2)
return vector_math.length_vector(vector)
def find_closest_collision(ray, sphere_list):
collision_list = collisions.find_intersection_points(sphere_list, ray)
if collision_list:
closest_sphere_index = 0
for i in range(1, len(collision_list)):
if distance(ray.pt, collision_list[i][1]) < distance(ray.pt, collision_list[closest_sphere_index][1]):
closest_sphere_index = i
return collision_list[closest_sphere_index]
else:
return None
def color_convert(color_component):
result_color = 255
if color_component < 1:
result_color = int(color_component * 255)
return result_color
|
crisisking/skybot | plugins/bf.py | Python | unlicense | 2,513 | 0.000398 | '''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
from builtins import chr
from builtins import range
import re
import random
import unittest
from util import hook
@hook.command
def bf(inp, max_steps=1000000, buffer_size=5000):
".bf <prog> -- executes brainfuck program <prog>"""
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * buffer_size # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = (memory[mp] + 1) % 256
elif c == '-':
memory[mp] = (memory[mp] - 1) % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * buffer_size)
elif c == '<':
mp = (mp - 1) % len(memory)
elif c == '.':
output += chr(memory[mp])
| if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
| ip += 1
steps += 1
if steps > max_steps:
if output == '':
output = 'no output'
output += ' [exceeded %d iterations]' % max_steps
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if stripped_output == '':
if output != '':
return 'no printable output'
return 'no output'
return stripped_output[:430] |
google/autocjk | src/evaluate_font.py | Python | apache-2.0 | 6,620 | 0.000152 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For evaluating performance against a local font.
Usage:
```
bazel run //src:evaluate_font -- \
--alsologtostderr \
--font_path=<path_to_reference_font> \
--limit=10 \
--out_dir=/tmp/output
```
"""
from absl import app
from absl import flags
from absl import logging
from fontTools import ttLib
from models import generator_lib
from multiprocessing.pool import ThreadPool
from src import imageutils
from src.utils import decomposer as decomposer_lib
from src.utils import font_helper as font_helper_lib
from src.utils import region as region_lib
from typing import Iterable, Optional, Sequence, Text, Tuple
import os
import random
import re
import statistics
import tempfile
import tensorflow as tf
_VERBS = frozenset("⿰⿱⿲⿳⿴⿵⿶⿷⿸⿹⿺⿻")
_FONT_PATH = flags.DEFINE_string(
"font_path", None, "Font file against which to select absent characters.")
_LIMIT = flags.DEFINE_integer(
"limit", None, "If set, the max number of characters to evaluate.")
_NUM_WORKERS = flags.DEFINE_integer(
"num_workers", 16, "The number of workers to use in parallel.")
_OUT_DIR = flags.DEFINE_string(
"out_dir", None,
"If set, writes [a | b | real(a,b) | predicted(a,b)] images to this directory."
)
def _get_viable_ids(font_path: Text) -> Iterable[Tuple[Text, Text, Text]]:
"""Yields an iterable of [(亻, 尔, 你), ...]..
Args:
font_path: The path to the font file.
Yields:
An iterable of pairs of (lhs, rhs, character).
"""
font_helper = font_helper_lib.FontHelper(font=ttLib.TTFont(file=font_path,
fontNumber=2),
input_region=region_lib.Region.G)
decomposer = decomposer_lib.Decomposer()
# NB: KNOWN characters.
known_characters = list(font_helper.known_characters())
random.shuffle(known_characters)
for character in known_characters:
try:
ids = decomposer.decompose(font_helper.region,
character).decomposition
except decomposer_lib.NoDecompositionError:
continue
# Only think about "⿰.." for now.
if not re.match(r"⿰..", ids):
continue
lhs, rhs = ids[1], ids[2]
if lhs in _VERBS or rhs in _VERBS:
continue
yield (lhs, rhs, character)
def calculate_wrongness(generator,
lhs: Text,
rhs: Text,
character: Text,
out_dir: Optional[Text] = None) -> Tuple[Text, float]:
"""For a character, generates the predicted image and returns Wrongness.
Args:
generator: a keras.Model which can generate characters.
lhs: A string, the left-hand component.
rhs: A string, the right-hand component.
character: A string, the target character.
out_dir: If set, an image of the form [a | b | real(a,b) | predicted(a,b)]
is written to this directory.
Returns:
A tuple of the input character and a wrongness %.
"""
with tempfile.NamedTemporaryFile(
delete=False, suffix=".png") as predicted_file, open(
os.path.join(out_dir, f"0x{ord(character):d}.png"), "wb"
) as out_file, tempfile.NamedTemporaryFile(
suffix=".png"
) as superimposed_file, tempfile.NamedTemporaryFile(
suffix=".png") as lhs_file, tempfile.NamedTemporaryFile(
suffix=".png") as rhs_file, tempfile.NamedTemporaryFile(
suffix=".png") as actual_file:
imageutils.write_character(_FONT_PATH.value, lhs, lhs_file.name)
imageutils.write_character(_FONT_PATH.value, rhs, rhs_file.name)
imageutils.write_character(_FONT_PATH.value, character,
actual_file.name)
imageutils.predict_from_paths(generator, lhs_file.name, rhs_file.name,
predicted_file.name)
if out_dir:
imageutils.superimpose(actual_file.name, predicted_file.name,
superimposed_file.name)
imageutils.horizontal_stitch([
lhs_file.name, rhs_file.name, actual_file.name,
predicted_file.name, superimposed_file.name
], out_file.name)
return character, imageutils.wrongness(predicted_file.name,
actual_file.name)
def _calculate_wrongness_kwargs(args):
logging.info("Rendering %s %s %s to %s", args['lhs'], args['rhs'],
args['character'], args['out_dir'])
return calculate_wrongness(**args)
def main(argv: Sequence[str] | ) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# Load in the generator,
generator = tf.keras.models.load_model(generator_lib.PATH_TO_GENERATOR)
n = 0
list_of_kwargs_to_function = []
| for lhs, rhs, character in _get_viable_ids(_FONT_PATH.value):
if _LIMIT.value and n >= _LIMIT.value:
break
n = n + 1
list_of_kwargs_to_function.append({
"generator": generator,
"lhs": lhs,
"rhs": rhs,
"character": character,
"out_dir": _OUT_DIR.value,
})
pool = ThreadPool(processes=_NUM_WORKERS.value)
result = pool.map(_calculate_wrongness_kwargs, list_of_kwargs_to_function)
pool.close()
pool.join()
# map from character to wrongness.
wrongness_map = {}
for c, w in result:
wrongness_map[c] = w
logging.info("\tMean: %.3f", statistics.mean(wrongness_map.values()))
logging.info("\tVariance: %.3f",
statistics.variance(wrongness_map.values()))
logging.info("\tLeast wrongness: %s",
min(wrongness_map, key=wrongness_map.get))
logging.info("\tMost wrongness: %s",
max(wrongness_map, key=wrongness_map.get))
if __name__ == "__main__":
app.run(main)
|
AbletonAG/abl.pivotal_client | setup.py | Python | mit | 453 | 0 | from setuptools import setup, find_packages
setup(
name='abl.pivota | l_client',
| version='1.1.0',
description='A simple client for Pivotal Tracker',
author='Ableton Web Team',
author_email='webteam@ableton.com',
url='http://ableton.com/',
zip_safe=False,
install_requires=[],
packages=find_packages(),
include_package_data=True,
license='MIT',
entry_points={
"console_scripts": [],
},
)
|
brynpickering/calliope | calliope/core/io.py | Python | apache-2.0 | 3,119 | 0.001282 | """
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
io.py
~~~~~
Functions to read and save model results.
"""
import os
import xarray as xr
from calliope._version import __version__
from calliope import exceptions
from calliope.core.util.dataset import split_loc_techs
def read_netcdf(path):
"""Read model_data from NetCDF file"""
with xr.open_dataset(path) as model_data:
model_data.load()
calliope_version = model_data.attrs.get('calliope_version', False)
if calliope_version:
if not str(calliope_version) in __version__:
exceptions.warn(
'This model data was created with Calliope version {}, '
'but you are running {}. Proceed with caution!'.format(calliope_version, __version__)
)
# FIXME some checks for consistency
# use check_dataset from the checks module
# also check the old checking from 0.5.x
return model_data
def save_netcdf(model_data, path):
encoding = {k: {'zlib': True, 'complevel': 4} for k in model_data.data_vars}
# Convert boolean attrs to ints
bool_attrs = [
k for k, v in model_data.attrs.items()
if isinstance(v, bool)
]
for k in bool_attrs:
model_data.attrs[k] = int(model_data.attrs[k])
# Convert None attrs to 'None'
none_attrs = [
k for k, v in model_data.attrs.items()
if v is None
]
for k in none_attrs:
model_data.attrs[k] = 'No | ne'
# Convert `object` dtype coords to string
# FIXME: remove once xarray issue https://github.com/pydata/xarray/issues/2404 is resolved
for k, v in model_data.coords.items():
if v.dtype == 'O':
model_data[k] = v.astype('<U{}'.format(max([len(i.item()) for i in v])))
try:
model_data.to_netcdf(path, format='netCDF | 4', encoding=encoding)
model_data.close() # Force-close NetCDF file after writing
finally: # Convert ints back to bools, 'None' back to None
for k in bool_attrs:
model_data.attrs[k] = bool(model_data.attrs[k])
for k in none_attrs:
model_data.attrs[k] = None
def save_csv(model_data, path, dropna=True):
"""
If termination condition was not optimal, filters inputs only, and
warns that results will not be saved.
"""
os.makedirs(path, exist_ok=False)
if ('termination_condition' not in model_data.attrs or
model_data.attrs['termination_condition'] == 'optimal'):
data_vars = model_data.data_vars
else:
data_vars = model_data.filter_by_attrs(is_result=0).data_vars
exceptions.warn(
'Model termination condition was not optimal, saving inputs only.'
)
for var in data_vars:
in_out = 'results' if model_data[var].attrs['is_result'] else 'inputs'
out_path = os.path.join(path, '{}_{}.csv'.format(in_out, var))
series = split_loc_techs(model_data[var], as_='Series')
if dropna:
series = series.dropna()
series.to_csv(out_path)
|
jboy/nim-pymod | libpy/UsefulConfigParser.py | Python | mit | 5,807 | 0.005166 | # Copyright (c) 2015 SnapDisco Pty Ltd, Australia.
# All rights reserved.
#
# This source code is licensed under the terms of the MIT license
# found in the "LICENSE" file in the root directory of this source tree.
import sys
if sys.version_info.major >= 3:
from configparser import RawConfigParser
else:
from ConfigParser import RawConfigParser
from .OrderedMultiDict import OrderedMultiDict
class UsefulConfigParser(object):
"""A config parser that sucks less than those in module `ConfigParser`."""
def __init__(self, filenames_to_try=[]):
# FUN FACT: In Python 3.2, they spontaneously changed the behaviour of
# RawConfigParser so that it no longer considers ';' a comment delimiter
# for inline comments.
#
# Compare:
# "Configuration files may include comments, prefixed by specific
# characters (# and ;). Comments may appear on their own in an otherwise
# empty line, or may be entered in lines holding values or section names.
# In the latter case, they need to be preceded by a whitespace character
# to be recognized as a comment. (For backwards compatibility, only ;
# starts an inline comment, while # does not.)"
# -- https://docs.python.org/2/library/configparser.html
# vs:
# "Comment prefixes are strings that indicate the start of a valid comment
# within a config file. comment_prefixes are used only on otherwise empty
# lines (optionally indented) whereas inline_comment_prefixes can be used
# after every valid value (e.g. section names, options and empty lines as
# well). By default inline comments are disabled and '#' and ';' are used
# as prefixes for whole line comments.
# Changed in version 3.2: In previous versions of configparser behaviour
# matched comment_prefixes=('#',';') and inl | ine_comment_prefixes=(';',)."
# -- https://docs.python.org/3/library/configparser.html#customizing-parser-behaviour
#
# Grrr...
if sys.version_info.major >= 3:
self._cp = RawConfigParser(dict_type=OrderedMultiDict, inline_comment_prefixes=(';',))
else:
self._cp = RawConfigParser(dict_type=OrderedMultiDict)
if isinstance(filenames_to_ | try, str):
filenames_to_try = [filenames_to_try]
self._filenames_to_try = filenames_to_try[:]
def read(self, filenames_to_try=[]):
if isinstance(filenames_to_try, str):
filenames_to_try = [filenames_to_try]
self._filenames_to_try.extend(filenames_to_try)
return self._cp.read(self._filenames_to_try)
def sections(self):
return self._cp.sections()
def options(self, section_name):
## The client code doesn't need to check in advance that the requested
## section name is present in the config; this function will check
## this automatically, so no exception is raised by RawConfigParser.
## Check that `section_name` is present in the config.
## Otherwise, RawConfigParser will raise ConfigParser.NoSectionError.
if not self._cp.has_section(section_name):
return []
return self._cp.options(section_name)
def get(self, section_name, option_name, do_optionxform=True):
if do_optionxform:
# https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.optionxform
option_name = self._cp.optionxform(option_name)
if section_name is None:
return self._get_optval_in_sections(self.sections(), option_name)
elif isinstance(section_name, str):
return self._get_optval_in_sections([section_name], option_name)
else:
return self._get_optval_in_sections(section_name, option_name)
def _get_optval_in_sections(self, section_names, option_name):
## The client code doesn't need to check in advance that the requested
## section name(s) are present in the config; this function will check
## this automatically, so no exception is raised by RawConfigParser.
optvals = []
for section_name in section_names:
## Check that `section_name` is present in the config.
## Otherwise, RawConfigParser will raise ConfigParser.NoSectionError.
if not self._cp.has_section(section_name):
continue
optvals.extend([optval
for optname, optval in self._cp.items(section_name)
if optname == option_name])
return optvals
def getboolean(self, section_name, option_name, do_optionxform=True):
# https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.getboolean
return [self._coerce_to_boolean(optval)
for optval in self.get(section_name, option_name, do_optionxform)]
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def _coerce_to_boolean(self, optval_str):
# 'The accepted values for the option are "1", "yes", "true", and "on",
# which cause this method to return True, and "0", "no", "false", and
# "off", which cause it to return False. These string values are checked
# in a case-insensitive manner. Any other value will cause it to raise
# ValueError.'
# https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.getboolean
ovs_lower = optval_str.lower()
if ovs_lower not in self._boolean_states:
raise ValueError("Not a boolean: %s" % optval_str)
return self._boolean_states[ovs_lower]
|
Oneiroi/clustercheck | setup.py | Python | agpl-3.0 | 707 | 0.001414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='clustercheck',
use_scm_version=True,
setup_requires | =['setuptools_scm'],
install_requires=[
'Twisted>=12.2',
'PyMySQL'
],
description='Standalone service for reporting of Percona XtraDB/Galera cluster nodes',
license='AGPL-3.0-only',
keywords='galera,mariadb,percona,database,cluster',
author='David Busby',
author_email='oneiroi@fedoraproject.org',
url=' | https://github.com/Oneiroi/clustercheck/',
packages=find_packages(),
entry_points={
'console_scripts': [
'clustercheck = clustercheck:main',
],
}
)
|
KarrLab/kinetic_datanator | datanator/parse_metabolite_concentration.py | Python | mit | 2,388 | 0.017588 | import pandas as pd
import urllib.request
import json
from datanator_query_python.config import config
from datanator_query_python.util import mongo_util
class ParseMetaboliteConcentration(mongo_util.MongoUtil):
def __init__(self,
MongoDB=None,
db=None,
collection=None,
max_entries=float('inf'),
username=None,
password=None,
authSource = 'admin',
readPreference = 'nearest'):
super().__init__(MongoDB=MongoDB, db=db,
username = username,
password = password,
authSource = authSource,
readPreference=readPreference)
self.max_entries = max_entries
self.collection = collection
def parse_metabolite(self):
"""
Read JSON metabolite concentration files from Github and
insert separate documents for each metabolite into MongoDB database
Args:
()
Return:
()
"""
collection = self.db_obj[self.collection]
metabolites = ["ATP","CTP","GMP","GTP","IMP","NAD","NADH","NADP","NADPH","TTP","UTP"]
for i in range(len(metabolites)):
url = urllib.request.urlopen("https://raw.githubuserconte | nt.com/KarrLab/datanator/tutorial/docs/metabolites/"+metabolites[i]+".json")
data = json.loads(url.rea | d().decode())
collection.insert_one({"inchikey":data['inchikey']})
for j in range(len(data['concentrations'])):
sub_data = data['concentrations'][j]
collection.update_one({"inchikey":data['inchikey']},{"$addToSet":{'concentrations':sub_data}})
def main():
conf=config.Victoria()
conf_main = config.Config()
username = conf.USERNAME
password = conf.PASSWORD
MongoDB = conf_main.SERVER
src = ParseMetaboliteConcentration(MongoDB = MongoDB,
username=username,
password=password,
collection = "metabolite_concentration",
db = "datanator-demo")
src.parse_metabolite()
if __name__== '__main__':
main()
|
udayinfy/openerp-7.0 | stock_with_cost/__init__.py | Python | agpl-3.0 | 1,119 | 0.000894 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open | Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT | ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
deyvedvm/cederj | urionlinejudge/python/1014.py | Python | gpl-3.0 | 71 | 0 | X = in | t(input())
Y = float( | input())
M = X / Y
print("%.3f km/l" % M)
|
Pio1962/fondinfo | exercises/e5_2011_2_bisection.py | Python | gpl-3.0 | 448 | 0.004464 | import math
def sqrt_bisection(x: float) -> float:
low, high = 0, x
if high < 1:
high = 1
y = (hi | gh + low) / 2
delta = y ** 2 - x
| while not (-0.001 <= delta <= 0.001):
if delta < 0:
low = y
else:
high = y
y = (high + low) / 2
delta = y ** 2 - x
return y
if __name__ == '__main__':
x = float(input())
print(sqrt_bisection(x))
print(math.sqrt(x))
|
ahoarfrost/metaseek | server/migrations/versions/a482d6aceb85_.py | Python | mit | 670 | 0.001493 | """empty message
Revision ID: a482d6aceb85
Revises: 01356afcc714
Create Date: 2017-11-09 15:57:26.28 | 8532
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a482d6aceb85'
down_revision = '01356afcc714'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('discovery', sa.Column('num_datasets', sa.Integer(), nullable=True))
# ### e | nd Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('discovery', 'num_datasets')
# ### end Alembic commands ###
|
wd15/chimad-phase-field | _apps/box/test.py | Python | mit | 3,449 | 0.00116 | """Test main.py
"""
import os
import json
from unittest.mock import patch, Mock
from starlette.testclient import TestClient
from toolz.curried import pipe, curry
from fastapi import UploadFile
from click.testing import CliRunner
from boxsdk import JWTAuth
from main import app, get_auth, upload_to_box, get_config_filename
client = TestClient(app) # pylint: disable=invalid-name
is_ = lambda x: (lambda y: x is y) # pylint: disable=invalid-name
equals = lambda x: (lambda y: x == y) # pylint: disable=invalid-name
@patch("main.JWTAuth")
def test_auth(jwtauth):
"""Thes the get_auth function
"""
jwtauth.return_value = JWTAuth
assert pipe(
get_auth(
dict(clientID="test", clientSecret="test"),
dict(enterpriseID="test"),
dict(publicKeyID="test", passphrase="test"),
),
is_(JWTAuth),
)
class MockStream(Mock):
"""Mock class for testing
"""
@staticmethod
def get_download_url():
"""Fake URL
"""
return "https://test.com"
id = 0
def get_data(self):
"""Confirm the test data
"""
return dict(
file_id=self.id, download_link=self.get_download_url(), folder_name="test"
)
class MockFolder(Mock):
"""Mock class for testing
"""
@staticmethod
def create_subfolder(*_):
"""Fake folder
"""
return MockFolder()
@staticmethod
def upload_stream(*_):
"""Fake stream
"""
return MockStream()
class MockClient(Mock):
"""Mock class for testing
"""
@staticmethod
def folder(folder_id=None):
"""Fake folder
"""
return MockFolder(name=folder_id)
@curry
def write_json(filename, data):
"""Write a JSON file. Used in the tests.
"""
with open(filename, "w") as fstream:
json.dump(data, fstream)
return filename
def get_test_config():
"""Generate a fake config file
"""
ret | urn pipe(
dict(
enterpriseID="test",
boxAppSettings=dict(
clientID="test",
clientSecret="test",
appAuth=dict(publicKeyID="test", passphrase="test"),
),
),
write_json("test_config.json"),
)
@patch("main.JWTAuth", autospec=True)
@patch("main.Client", new=MockClient)
@patch("main.get_config_filename", new=get_test_config)
def test_upload_to_box(*_):
| """Test the upload_to_box function
"""
with CliRunner().isolated_filesystem():
assert pipe(
get_test_config(),
upload_to_box(UploadFile("wow"), "test"),
equals(MockStream().get_data()),
)
@patch("main.JWTAuth", autospec=True)
@patch("main.Client", new=MockClient)
@patch("main.get_config_filename", new=get_test_config)
def test_upload_endpoint(*_):
"""Test the upload endpoint
"""
with CliRunner().isolated_filesystem():
assert pipe(
"test.txt",
write_json(data=dict(a=1)),
lambda x: dict(fileb=open(x, "rb")),
lambda x: client.post(f"/upload/", files=x),
lambda x: x.json()["download_link"],
equals(MockStream().get_data()["download_link"]),
)
def test_config_filename():
"""Test get_config_filename
"""
assert pipe(
get_config_filename(), os.path.basename, equals("1014649_e91k0tua_config.json")
)
|
zonble/axe_py | axe4.py | Python | mit | 1,045 | 0.019139 | #!/usr/bin/env python
# encoding: utf-8
# http://axe.g0v.tw/level/4
import urllib2, re
lines = []; last_url = None
for index in range(1, 25):
url = "http://axe-level-4.herokuapp.com/lv4/" if index == 1 \
else "http://axe-level-4.herokuapp.com/lv4/?page=" + str(index)
# The hint is that we shall make our bot look like a real browser.
| req = urllib2.Request(url)
req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.73.11 (KHTML, like Gecko) Version/7.0.1 Safari/537.73.11')
if last_url:
req.add_header(' | Referer', last_url)
last_url = url
html = urllib2.urlopen(req).read()
pattern = r"<tr>\s*<td>(.*)</td>\s*<td>(.*)</td>\s*<td>(.*)</td>\s*</tr>"
results = re.findall(pattern, html, re.MULTILINE)[1:]
format = '{"town": "%s", "village": "%s", "name" : "%s"}'
for result in results:
lines.append(format % tuple(result))
with open("test.txt", "w") as f:
f.write("[%s]" % ",\n".join(lines))
|
hugegreenbug/libgestures | include/build/android/pylib/utils/test_options_parser.py | Python | bsd-3-clause | 3,471 | 0.00461 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses options for the instrumentation tests."""
import os
# TODO(gkanwar): Some downstream scripts current rely on these functions
# existing. This dependency should be removed, and this file deleted, in the
# future.
def AddBuildTypeOption(option_parser):
"""Decorates OptionParser with build type option."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
option_parser.add_option('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
def AddTestRunnerOptions(option_parser, default_timeout=60):
"""Decorates OptionParser with options applicable to all tests."""
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=default_timeout)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
option_parser.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help='Number of retries for a test before '
'giving up.')
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps',
'trac | eview']
option_parser.add_option('--profiler', dest='profilers', action='append',
choices=profilers,
help='Profiling tool to run during test. '
'Pass multiple times to run multiple profilers. '
'Available | profilers: %s' % profilers)
option_parser.add_option('--tool',
dest='tool',
help='Run the test under a tool '
'(use --tool help to list them)')
option_parser.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
option_parser.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help='Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.')
AddBuildTypeOption(option_parser)
|
jtpereyda/boofuzz | request_definitions/http_header.py | Python | gpl-2.0 | 13,946 | 0.000072 | from boofuzz import *
# List of all HTTP Headers I could find
# List of all blocks defined here (for easy copy/paste)
"""
sess.connect(s_get("HTTP HEADER ACCEPT"))
sess.connect(s_get("HTTP HEADER ACCEPTCHARSET"))
sess.connect(s_get("HTTP HEADER ACCEPTDATETIME"))
sess.connect(s_get("HTTP HEADER ACCEPTENCODING"))
sess.connect(s_get("HTTP HEADER ACCEPTLANGUAGE"))
sess.connect(s_get("HTTP HEADER AUTHORIZATION"))
sess.connect(s_get("HTTP HEADER CACHECONTROL"))
sess.connect(s_get("HTTP HEADER CLOSE"))
sess.connect(s_get("HTTP HEADER CONTENTLENGTH"))
sess.connect(s_get("HTTP HEADER CONTENTMD5"))
sess.connect(s_get("HTTP HEADER COOKIE"))
sess.connect(s_get("HTTP HEADER DATE"))
sess.connect(s_get("HTTP HEADER DNT"))
sess.connect(s_get("HTTP HEADER EXPECT"))
sess.connect(s_get("HTTP HEADER FROM"))
sess.connect(s_get("HTTP HEADER HOST"))
sess.connect(s_get("HTTP HEADER IFMATCH"))
sess.connect(s_get("HTTP HEADER IFMODIFIEDSINCE"))
sess.connect(s_get("HTTP HEADER IFNONEMATCH"))
sess.connect(s_get("HTTP HEADER IFRANGE"))
sess.connect(s_get("HTTP HEADER IFUNMODIFIEDSINCE"))
sess.connect(s_get("HTTP HEADER KEEPALIVE"))
sess.connect(s_get("HTTP HEADER MAXFORWARDS"))
sess.connect(s_get("HTTP HEADER PRAGMA"))
sess.connect(s_get("HTTP HEADER PROXYAUTHORIZATION"))
sess.connect(s_get("HTTP HEADER RANGE"))
sess.connect(s_get("HTTP HEADER REFERER"))
sess.connect(s_get("HTTP HEADER TE"))
sess.connect(s_get("HTTP HEADER UPGRADE"))
sess.connect(s_get("HTTP HEADER USERAGENT"))
sess.connect(s_get("HTTP HEADER VIA"))
sess.connect(s_get("HTTP HEADER WARNING"))
sess.connect(s_get("HTTP HEADER XATTDEVICEID"))
sess.connect(s_get("HTTP HEADER XDONOTTRACK"))
sess.connect(s_get("HTTP HEADER XFORWARDEDFOR"))
sess.connect(s_get("HTTP | HEADER XREQUESTEDWITH"))
sess.connect(s_get("HTTP HEADER XWAPPROFILE"))
"""
# Fuzz | Accept header
# Accept: text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5
s_initialize("HTTP HEADER ACCEPT")
s_static("GET / HTTP/1.1\r\n")
s_static("Accept")
s_delim(":")
s_delim(" ")
s_string("text")
s_delim("/")
s_string("*")
s_delim(";")
s_string("q")
s_delim("=")
s_int(0, output_format="ascii")
s_delim(".")
s_int(3, output_format="ascii")
s_delim(",")
s_delim(" ")
s_string("text")
s_delim("/")
s_string("html")
s_delim(";")
s_string("q")
s_delim("=")
s_int(0, output_format="ascii")
s_delim(".")
s_int(7, output_format="ascii")
s_delim(",")
s_delim(" ")
s_string("text")
s_delim("/")
s_string("html")
s_delim(";")
s_string("level")
s_delim("=")
s_string("1")
s_delim(",")
s_delim(" ")
s_string("text")
s_delim("/")
s_string("html")
s_delim(";")
s_string("level")
s_delim("=")
s_int(2, output_format="ascii")
s_delim(";")
s_string("q")
s_delim("=")
s_int(0, output_format="ascii")
s_delim(".")
s_int(4, output_format="ascii")
s_delim(",")
s_delim(" ")
s_string("*")
s_delim("/")
s_string("*")
s_delim(";")
s_string("q")
s_delim("=")
s_int(0, output_format="ascii")
s_delim(".")
s_int(5, output_format="ascii")
s_static("\r\n\r\n")
# Fuzz Accept-Charset header
# Accept-Charset: utf-8, unicode-1-1;q=0.8
s_initialize("HTTP HEADER ACCEPTCHARSET")
s_static("GET / HTTP/1.1\r\n")
s_static("Accept-Charset")
s_delim(":")
s_delim(" ")
s_string("utf")
s_delim("-")
s_int(8, output_format="ascii")
s_delim(",")
s_delim(" ")
s_string("unicode")
s_delim("-")
s_int(1, output_format="ascii")
s_delim("-")
s_int(1, output_format="ascii")
s_delim(";")
s_string("q")
s_delim("=")
s_int(0, output_format="ascii")
s_delim(".")
s_int(8, output_format="ascii")
s_static("\r\n\r\n")
# Fuzz Accept-Datetime header
# Accept-Datetime: Thu, 31 May 2007 20:35:00 GMT
s_initialize("HTTP HEADER ACCEPTDATETIME")
s_static("GET / HTTP/1.1\r\n")
s_static("Accept-Datetime")
s_delim(":")
s_delim(" ")
s_string("Thu")
s_delim(",")
s_delim(" ")
s_string("31")
s_delim(" ")
s_string("May")
s_delim(" ")
s_string("2007")
s_delim(" ")
s_string("20")
s_delim(":")
s_string("35")
s_delim(":")
s_string("00")
s_delim(" ")
s_string("GMT")
s_static("\r\n\r\n")
# Fuzz Accept-Encoding header
# Accept-Encoding: gzip, deflate
s_initialize("HTTP HEADER ACCEPTENCODING")
s_static("GET / HTTP/1.1\r\n")
s_static("Accept-Encoding")
s_delim(":")
s_delim(" ")
s_string("gzip")
s_delim(", ")
s_string("deflate")
s_static("\r\n\r\n")
# Fuzz Accept-Language header
# Accept-Language: en-us, en;q=0.5
s_initialize("HTTP HEADER ACCEPTLANGUAGE")
s_static("GET / HTTP/1.1\r\n")
s_static("Accept-Language")
s_delim(":")
s_delim(" ")
s_string("en-us")
s_delim(",")
s_string("en")
s_delim(";")
s_string("q")
s_delim("=")
s_string("0.5")
s_static("\r\n\r\n")
# Fuzz Authorization header
# Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
s_initialize("HTTP HEADER AUTHORIZATION")
s_static("GET / HTTP/1.1\r\n")
s_static("Authorization")
s_delim(":")
s_delim(" ")
s_string("Basic")
s_delim(" ")
s_string("QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
s_static("\r\n\r\n")
# Fuzz Cache-Control header
# Cache-Control: no-cache
s_initialize("HTTP HEADER CACHECONTROL")
s_static("GET / HTTP/1.1\r\n")
s_static("Cache-Control")
s_delim(":")
s_delim(" ")
s_string("no")
s_delim("-")
s_string("cache")
s_static("\r\n\r\n")
# Fuzz Connection header
# Connection: close
s_initialize("HTTP HEADER CLOSE")
s_static("GET / HTTP/1.1\r\n")
s_static("Connection")
s_delim(":")
s_delim(" ")
s_string("close")
s_static("\r\n\r\n")
# Fuzz Content Length header
# Content-Length: 348
s_initialize("HTTP HEADER CONTENTLENGTH")
s_static("GET / HTTP/1.1\r\n")
s_static("Content-Length")
s_delim(":")
s_delim(" ")
s_string("348")
s_static("\r\n\r\n")
# Fuzz Content MD5 header
# Content-MD5: Q2hlY2sgSW50ZWdyaXR5IQ==
s_initialize("HTTP HEADER CONTENTMD5")
s_static("GET / HTTP/1.1\r\n")
s_static("Content-MD5")
s_delim(":")
s_delim(" ")
s_string("Q2hlY2sgSW50ZWdyaXR5IQ==")
s_static("\r\n\r\n")
# Fuzz COOKIE header
# Cookie: PHPSESSIONID=hLKQPySBvyTRq5K5RJmcTHQVtQycmwZG3Qvr0tSy2w9mQGmbJbJn;
s_initialize("HTTP HEADER COOKIE")
s_static("GET / HTTP/1.1\r\n")
if s_block_start("cookie"):
s_static("Cookie")
s_delim(":")
s_delim(" ")
s_string("PHPSESSIONID")
s_delim("=")
s_string("hLKQPySBvyTRq5K5RJmcTHQVtQycmwZG3Qvr0tSy2w9mQGmbJbJn")
s_static(";")
s_static("\r\n")
s_block_end()
s_repeat("cookie", max_reps=5000, step=500)
s_static("\r\n\r\n")
# Fuzz Date header
# Date: Tue, 15 Nov 2012 08:12:31 EST
s_initialize("HTTP HEADER DATE")
s_static("GET / HTTP/1.1\r\n")
s_static("Date")
s_delim(":")
s_delim(" ")
s_string("Tue")
s_delim(",")
s_delim(" ")
s_string("15")
s_delim(" ")
s_string("Nov")
s_delim(" ")
s_string("2012")
s_delim(" ")
s_string("08")
s_delim(":")
s_string("12")
s_delim(":")
s_string("31")
s_delim(" ")
s_string("EST")
s_static("\r\n\r\n")
# Fuzz DNT header -> May be same as X-Do-Not-Track?
# DNT: 1
s_initialize("HTTP HEADER DNT")
s_static("GET / HTTP/1.1\r\n")
s_static("DNT")
s_delim(":")
s_delim(" ")
s_string("1")
s_static("\r\n\r\n")
# Fuzz Expect header
# Expect: 100-continue
s_initialize("HTTP HEADER EXPECT")
s_static("GET / HTTP/1.1\r\n")
s_static("Expect")
s_delim(":")
s_delim(" ")
s_string("100")
s_delim("-")
s_string("continue")
s_static("\r\n\r\n")
# Fuzz From header
# From: derp@derp.com
s_initialize("HTTP HEADER FROM")
s_static("GET / HTTP/1.1\r\n")
s_static("From")
s_delim(":")
s_delim(" ")
s_string("derp")
s_delim("@")
s_string("derp")
s_delim(".")
s_string("com")
s_static("\r\n\r\n")
# Fuzz Host header
# Host: 127.0.0.1
s_initialize("HTTP HEADER HOST")
s_static("GET / HTTP/1.1\r\n")
s_static("Host")
s_delim(":")
s_delim(" ")
s_string("127.0.0.1")
s_static("\r\n")
s_string("Connection")
s_delim(":")
s_delim(" ")
s_string("Keep-Alive")
s_static("\r\n\r\n")
# Fuzz If-Match header
# If-Match: "737060cd8c284d8af7ad3082f209582d"
s_initialize("HTTP HEADER IFMATCH")
s_static("GET / HTTP/1.1\r\n")
s_static("If-Match")
s_delim(":")
s_delim(" ")
s_static('"')
s_string("737060cd8c284d8af7ad3082f209582d")
s_static('"')
s_static("\r\n\r\n")
# Fuzz If-Modified-Since header
# If-Modified-Since: Sat, 29 Oct 2012 19:43:31 ESTc
s_initialize("HTTP HEADER IFMODIFIEDSINCE")
s_static("GET / HTTP/1.1\r\n")
s_static("If-Modified-Since")
s_delim(":")
s_delim(" ")
s_string("Sat")
s_delim(",")
s_delim(" ")
s_s |
tylertian/Openstack | openstack F/python-novaclient/novaclient/v1_1/floating_ips_bulk.py | Python | apache-2.0 | 1,938 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bulk Floating IPs interface
"""
from novaclient import base
class FloatingIP(base.Resource):
def __repr__(self):
return "<FloatingIP: %s>" % self.address
class FloatingIPBulkManager(base.ManagerWithFind):
resource_class = FloatingIP
def list(self, host=None):
"""
List all floating IPs
"""
if host is None:
return self._list('/os-floating-ips-bulk', 'floating_ip_info')
else:
return self._list('/os-floating-ips-bulk/%s' % host,
'floating_ip_info')
def create(self, ip_range, pool=None, interface=None):
"""
Create floating IPs by range
"""
body = {"floating_ips_b | ulk_cr | eate": {'ip_range': ip_range}}
if pool is not None:
body['floating_ips_bulk_create']['pool'] = pool
if interface is not None:
body['floating_ips_bulk_create']['interface'] = interface
return self._create('/os-floating-ips-bulk', body,
'floating_ips_bulk_create')
def delete(self, ip_range):
"""
Delete floating IPs by range
"""
body = {"ip_range": ip_range}
return self._update('/os-floating-ips-bulk/delete', body)
|
jpec/tasks.py | config.py | Python | gpl-2.0 | 350 | 0 | # config | .py
# Configuration for tasks application.
# Author: Julien Pecqueur (julien@peclu.net)
# License: GPL
NAME = 'tasks.py'
VERSION = 0.1
# Path to tasks database
DB = './tasks.db'
# Interface to listen
HOST = '0.0.0.0'
PORT = '8081'
# Debug mode
DEBUG = False
# Auto-reload service in case of file change
RELOADER | = True
|
wtanaka/beam | sdks/python/apache_beam/version.py | Python | apache-2.0 | 870 | 0 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this | file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under | the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK version information and utilities."""
__version__ = '2.1.0.dev'
|
ZhangXiaoyu-Chief/sandwich | account/models.py | Python | apache-2.0 | 1,237 | 0.003314 | from djan | go.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, verbose_name=u'用户名', on_delete=models.CASCADE, related_name="userprofile")
nickname = models.CharField(max_length=32, verbose_name=u'昵称')
avatar = models.CharField(max_length=300, blank=True, null=True, verbose_name=u'头像')
| def __str__(self):
return self.nickname
def __unicode__(self):
return self.nickname
def get_info(self):
return {
"id": self.id,
"username": self.user.username,
"nickname": self.nickname if self.nickname else "",
"avatar": self.avatar if self.avatar else "",
"email": self.user.email,
"is_superuser": self.user.is_superuser,
"status": self.user.is_active,
"create_date": self.user.date_joined.strftime("%Y-%m-%d %H:%M"),
# "last_date": self.user.get_latest_by()
"group": [{"id": group.id, "name": group.name} for group in self.user.groups.all()]
}
class Meta:
verbose_name = u'用户资料'
verbose_name_plural = u'用户资料'
|
rehandalal/buchner | docs/conf.py | Python | bsd-3-clause | 7,703 | 0.005452 | # -*- coding: utf-8 -*-
#
# buchner documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 31 14:35:13 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'buchner'
copyright = u'2013, Rehan Dalal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from buchner import __version__
version = __version__
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
| html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter | title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'buchnerdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'buchner.tex', u'buchner Documentation', u'Rehan Dalal',
'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'buchner', u'buchner Documentation',
[u'Rehan Dalal'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'buchner', u'buchner Documentation',
u'Rehan Dalal', 'buchner', 'One line description of project.',
'Miscellaneous'), ]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
CooperLuan/devops.notes | taobao/top/api/rest/SubusersGetRequest.py | Python | mit | 303 | 0.029703 | '''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class SubusersGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, p | ort)
self.user_nick = None
def getapiname(self):
return 'taobao.subusers.get'
| |
Panos512/invenio | modules/websubmit/lib/functions/Send_APP_Mail.py | Python | gpl-2.0 | 12,215 | 0.009415 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Send_APP_Mail
## This function send an email informing the original
## submitter of a document that the referee has approved/
## rejected the document. The email is also sent to the
## referee for checking.
## Author: T.Baron
## PARAMETERS:
## newrnin: name of the file containing the 2nd reference
## addressesAPP: email addresses to which the email will
## be sent (additionally to the author)
## categformatAPP: variable needed to derive the addresses
## mentioned above
import os
import re
from invenio.config import CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_CERN_SITE, \
CFG_SITE_RECORD
from invenio.access_control_admin import acc_get_role_users, acc_get_role_id
from invenio.dbquery import run_sql
from invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.errorlib import register_exception
from invenio.search_engine import print_record
from invenio.mailutils import scheduled_send_email
from invenio.bibtask import bibtask_allocate_sequenceid
## The field in which to search for the record submitter/owner's email address:
if CFG_CERN_SITE:
## This is a CERN site - we use 859__f for submitter/record owner's email:
CFG_WEBSUBMIT_RECORD_OWNER_EMAIL = "859__f"
else:
## Non-CERN site. Use 8560_f for submitter/record owner's email:
CFG_WEBSUBMIT_RECORD_OWNER_EMAIL = "8560_f"
def Send_APP_Mail (parameters, curdir, form, user_info=None):
"""
This function send an ema | il informing the original submitter of a
document that the referee has approved/ rejected the document. The
email is also sent to the referee for checking.
Parameters:
* addressesAPP: email addresses of the people who will receive
this email (comma separated list). this parameter may contain
the <CATEG> string. In which case the variable computed from
the [categformatAFP] parameter replaces th | is string.
eg.: "<CATEG>-email@cern.ch"
* categformatAPP contains a regular expression used to compute
the category of the document given the reference of the
document.
eg.: if [categformatAFP]="TEST-<CATEG>-.*" and the reference
of the document is "TEST-CATEGORY1-2001-001", then the computed
category equals "CATEGORY1"
* newrnin: Name of the file containing the 2nd reference of the
approved document (if any).
* edsrn: Name of the file containing the reference of the
approved document.
"""
global titlevalue,authorvalue, emailvalue,sysno,rn
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
sequence_id = bibtask_allocate_sequenceid(curdir)
doctype = form['doctype']
titlevalue = titlevalue.replace("\n"," ")
authorvalue = authorvalue.replace("\n","; ")
# variables declaration
categformat = parameters['categformatAPP']
otheraddresses = parameters['addressesAPP']
newrnpath = parameters['newrnin']
## Get the name of the decision file:
try:
decision_filename = parameters['decision_file']
except KeyError:
decision_filename = ""
## Get the name of the comments file:
try:
comments_filename = parameters['comments_file']
except KeyError:
comments_filename = ""
## Now try to read the comments from the comments_filename:
if comments_filename in (None, "", "NULL"):
## We don't have a name for the comments file.
## For backward compatibility reasons, try to read the comments from
## a file called 'COM' in curdir:
if os.path.exists("%s/COM" % curdir):
try:
fh_comments = open("%s/COM" % curdir, "r")
comment = fh_comments.read()
fh_comments.close()
except IOError:
## Unable to open the comments file
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open " \
"comments file [%s/COM] but was " \
"unable to." % curdir
register_exception(prefix=exception_prefix)
comment = ""
else:
comment = comment.strip()
else:
comment = ""
else:
## Try to read the comments from the comments file:
if os.path.exists("%s/%s" % (curdir, comments_filename)):
try:
fh_comments = open("%s/%s" % (curdir, comments_filename), "r")
comment = fh_comments.read()
fh_comments.close()
except IOError:
## Oops, unable to open the comments file.
comment = ""
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open comments " \
"file [%s/%s] but was unable to." \
% (curdir, comments_filename)
register_exception(prefix=exception_prefix)
else:
comment = comment.strip()
else:
comment = ""
## Now try to read the decision from the decision_filename:
if decision_filename in (None, "", "NULL"):
## We don't have a name for the decision file.
## For backward compatibility reasons, try to read the decision from
## a file called 'decision' in curdir:
if os.path.exists("%s/decision" % curdir):
try:
fh_decision = open("%s/decision" % curdir, "r")
decision = fh_decision.read()
fh_decision.close()
except IOError:
## Unable to open the decision file
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open " \
"decision file [%s/decision] but was " \
"unable to." % curdir
register_exception(prefix=exception_prefix)
decision = ""
else:
decision = decision.strip()
else:
decision = ""
else:
## Try to read the decision from the decision file:
try:
fh_decision = open("%s/%s" % (curdir, decision_filename), "r")
decision = fh_decision.read()
fh_decision.close()
except IOError:
## Oops, unable to open the decision file.
decision = ""
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open decision " \
"file [%s/%s] but was unable to." \
% (curdir, decision_filename)
register_exception(prefix=exception_prefix)
else:
decision = decision.strip()
if os.path.exists("%s/%s" % (curdir,newrnpath)):
fp = open("%s/%s" % (curdir,newrnpath) , "r")
newrn = fp.read()
fp.close()
else:
newrn = ""
|
sv0/django-markdown-app | example/project/wsgi.py | Python | lgpl-3.0 | 389 | 0 | """
WSGI config for project.
It exposes the WSGI callable as a module-level variable named | ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi | /
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
|
dominicrodger/django-magazine | magazine/tests/html_sanitizer.py | Python | mit | 1,974 | 0 | from django.test import TestCase
from magazine.utils.word_cleaner import clean_word_text
class HTMLSanitizerTestCase(TestCase):
def testStripAttributes(self):
html = (u"<a href=\"foobar\" name=\"hello\""
u"title=\"View foobar\" onclick=\"malicious()\">hello!</a>")
self.assertEqual(clean_word_text(html),
u"<a href=\"foobar\" name=\"hello\" "
"title=\"View foobar\">"
"hello!</a>")
def testStripTags(self):
html = u'<script type="text/javascript">alert("what?");</script>hello!'
self.assertEqual(clean_word_text(html), u'alert("what?");hello!')
def testStyleStripped(self):
html = u'<style>foobar</style><p>hello!</p>'
self.assertEqual(clean_word_text(html), u'<p>hello!</p>')
# Check we're not reliant on the <style> tag looking a
# particular way
html = u"""
<style type="text/css" somethingelse="something">foobard</style>
<p>hello!</p>
"""
self.assertEqual(clean_word_text(html), u'<p>hello!</ | p>')
# Check we don't care about case
html = u"""
<STYLE TYPE="TEXT/CSS" somethingelse="something">foobar</STYLE>
<p>hello!</p>
"""
self.assertEqual(clean_word_text(html), u'<p>he | llo!</p>')
# Check multiple style blocks are stripped
html = u"""
<STYLE TYPE="TEXT/CSS" somethingelse="something">foobar</STYLE>
<p>hello!</p>
<style type="text/css" somethingelse="something">foobar</style>
"""
self.assertEqual(clean_word_text(html), u'<p>hello!</p>')
def testStyleStrippedEmptyTag(self):
# Check we don't do much other than strip the style tag
# for empty style tags
html = u"""
<style type="text/css" somethingelse="something" /><p>hello!</p>"""
self.assertEqual(clean_word_text(html), u'<p>hello!</p>')
def testEmpty(self):
html = u''
self.assertEqual(clean_word_text(html), u'')
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractMercurytranslations7BlogspotCom.py | Python | bsd-3-clause | 578 | 0.032872 |
def extractMercurytranslations7Blogspo | tCom(item):
'''
Parser for 'mercurytranslations7.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_ty | pe=tl_type)
return False
|
jordanemedlock/psychtruths | temboo/core/Library/Utilities/Encoding/HTMLUnescape.py | Python | apache-2.0 | 2,975 | 0.00437 | # -*- coding: utf-8 -*-
###############################################################################
#
# HTMLUnescape
# Replaces character entity names in the specified text with equivalent HTML markup characters.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecu | tion
import json
class HTMLUnescape(C | horeography):
def __init__(self, temboo_session):
"""
Create a new instance of the HTMLUnescape Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(HTMLUnescape, self).__init__(temboo_session, '/Library/Utilities/Encoding/HTMLUnescape')
def new_input_set(self):
return HTMLUnescapeInputSet()
def _make_result_set(self, result, path):
return HTMLUnescapeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return HTMLUnescapeChoreographyExecution(session, exec_id, path)
class HTMLUnescapeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the HTMLUnescape
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_EscapedHTML(self, value):
"""
Set the value of the EscapedHTML input for this Choreo. ((required, string) The escaped HTML that should be unescaped.)
"""
super(HTMLUnescapeInputSet, self)._set_input('EscapedHTML', value)
class HTMLUnescapeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the HTMLUnescape Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_UnescapedHTML(self):
"""
Retrieve the value for the "UnescapedHTML" output from this Choreo execution. ((string) The unescaped HTML.)
"""
return self._output.get('UnescapedHTML', None)
class HTMLUnescapeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return HTMLUnescapeResultSet(response, path)
|
pytorch/fairseq | examples/latent_depth/latent_depth_src/__init__.py | Python | mit | 380 | 0 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the | root directory of this source tree.
from . import multilingual_translation_latent_depth # noqa
from .loss import latent_depth # noqa
from .models import latent_multilingual_transformer # noqa
from .modules import latent_layers # | noqa
|
stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/besselj1/benchmark/python/scipy/benchmark.py | Python | apache-2.0 | 2,191 | 0 | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in complian | ce with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on | an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.special.j1."""
from __future__ import print_function
import timeit
NAME = "besselj1"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import j1; from random import random;"
stmt = "y = j1(random()*100000.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::scipy::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
|
hfoffani/pddl-lib | pddlpy/__init__.py | Python | apache-2.0 | 665 | 0.001506 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Hernán M. Foffani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# |
#
from .pddl import DomainProblem
|
dalek7/umbrella | Python/DDUtil.py | Python | mit | 339 | 0.020649 | import os
import datetime
def exit():
| os._exit(0)
def GetTimeString(m = -1):
if m==0:
s1 = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
else:
s1 = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
return s1
|
def MakeDir(directory):
if not os.path.exists(directory):
os.makedirs(directory) |
prathamtandon/g4gproblems | Graphs/binary_tree_to_bst.py | Python | mit | 1,610 | 0.001863 | """
Given a binary tree, convert it to BST. The conversion should be done in such a way
that keeps the original structure of binary tree.
Input:
10
/ \
2 7
/ \
8 4
Output:
8
/ \
4 10
/ \
2 7
Input:
10
/ \
30 15
/ \
20 5
Output:
15
/ \
10 20
/ \
5 30
"""
"""
Approach:
1. Do inorder traversal of t | ree and store it in a temp array.
2. Sort the temp array.
3. Do another inorder traversal, this time replace tree node values with values from sorted array.
Time complexity is O(nlog(n))
"""
def binary_ | to_bst(root):
result = []
inorder(root, result, False)
result = sorted(result)
inorder(root, result, True)
def inorder(root, result, result_to_bst):
if root:
inorder(root.left, result, result_to_bst)
if result_to_bst:
data = result.pop(0)
root.data = data
else:
result.append(root.data)
inorder(root.right, result, result_to_bst)
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
if __name__ == '__main__':
root = Node(10)
root.left = Node(2)
root.right = Node(7)
root.left.left = Node(8)
root.left.right = Node(4)
binary_to_bst(root)
assert root.data == 8
assert root.left.data == 4
assert root.right.data == 10
assert root.left.left.data == 2
assert root.left.right.data == 7
|
philrosenfield/core_overshoot_clusters | setup.py | Python | mit | 901 | 0 | from setuptools import setup, find_packages
setup(name='core_overshoot_clusters',
version='0.1',
description='Repo for reproducing Rosenfield et. al ApJ 2017, xxx, xxx',
url='http://github.com/philrosenfield/core_overshoot_clusters',
author='Philip Rosenfield',
author_email='philip.rosenfield@cfa.harvard.edu',
license='MIT',
| packages=['core_overshoot_clusters'],
zip_safe=False,
classifiers=['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2 | ',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
install_requires=['matplotlib', 'numpy', 'astropy', 'scipy', 'pandas'],
include_package_data=True)
|
Azure/azure-sdk-for-python | sdk/reservations/azure-mgmt-reservations/azure/mgmt/reservations/aio/operations/_calculate_exchange_operations.py | Python | mit | 7,926 | 0.005804 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CalculateExchangeOperations:
"""CalculateExchangeOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.reservations.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _post_initial(
self,
body: "_models.CalculateExchangeRequest",
**kwargs: Any
) -> Optional["_models.CalculateExchangeOperationResultResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.CalculateExchangeOperationResultResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._post_initial.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'CalculateExchangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response)
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_post_initial.metadata = {'url': '/providers/Microsoft.Capacity/calculateExchange'} # type: ignore
async def begin_post(
self,
body: "_models.CalculateExchangeRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.CalculateExchangeOperationResultResponse"]:
"""Calculates the refund amounts and price of the new purchases.
Calculates price for exchanging ``Reservations`` if there are no policy errors.
:param body: Request containing purchases and refunds that need to be executed.
:type body: ~azure.mgmt.reservations.models.CalculateExchangeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CalculateExchangeOperationResultResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.reservations.models.CalculateExchangeOperationResultResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CalculateExchangeOperationResultResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._post_initial(
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response)
if cls:
| return cls(pipeline_response, deserialized, {})
return deserialized |
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_post.metadata = {'url': '/providers/Microsoft.Capacity/calculateExchange'} # type: ignore
|
koala-ai/tensorflow_nlp | nlp/text_representation/glove/run.py | Python | apache-2.0 | 1,371 | 0.001459 | # -*- coding: utf-8 -*-
import argparse
import tensorflow as tf
from nlp.text_representation.glove.train import train
def main(args):
if args.process == tf.estimator.ModeKeys.TRAIN:
train(args)
else:
raise Exception("cannot support this process:" + args.process)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', required=True, type=str, defa | ult='data/glove')
parser.add_argument('--log_dir', required=True, type=str, default='data/glove/log')
parser.add_argument('--embedding_size', type=int, default=300)
parser.add_argument('--context_size', type=int, default=10)
parser.add_argument('--max_vocab_size', type=int, default=100000)
| parser.add_argument('--min_occurrences', type=int, default=1)
parser.add_argument('--scaling_factor', type=float, default=0.75)
parser.add_argument('--cooccurrence_cap', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--learning_rate', type=float, default=0.05)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--tsne_epoch_interval', type=int, default=50)
parser.add_argument('--result_file', type=str)
parser.add_argument('--process', type=str, default='train')
args = parser.parse_args()
main(args)
|
Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Viewers/Pyploter/05_ExampleDoc.py | Python | mit | 925 | 0.083243 |
#ImportModules
import ShareYourSystem as SYS
#figure
MyPyploter=SYS.PyploterClass(
).mapSet(
{
'-Charts':
{
'|a':{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#lia | rg':[
[1,2,3],
[2,6,3]
],
'#kwa | rg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}),
('|1',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[0,1,2],
[2,3,4]
],
'#kwarg':{
'linestyle':"--",
'color':'r'
}
}
)
],
})
],
'PyplotingChartVariable':
[
('set_xlim',[0,5])
]
}
}
}
).pyplot(
)
#print
print('MyPyploter is ')
SYS._print(MyPyploter)
#show
SYS.matplotlib.pyplot.show()
|
Mesitis/community | sample-code/Python/20 Users/validate_token.py | Python | mit | 2,764 | 0.01411 | '''
- login and get token
- process 2FA if 2FA is setup for this account
- Validates password for changing password using forgot password
'''
import requests
import json
get_token_url = "https://api.canopy.cloud:443/api/v1/sessions/"
validate_otp_url = "https://api.canopy.cloud:443/api/v1/sessions/otp/validate.json" #calling the production server for OTP authentication
get_partner_users_url = "https://api.canopy.cloud:443/api/v1/admin/users.json"
validate_token_url = "https://api.canopy.cloud:443/api/v1/users/password/forget/validate_token.json"
#please replace below with your username and password over here
username = 'userxxx'
password = 'passxxx'
#please enter the OTP token in case it is enabled
otp_code = '123456'
#first call for a fresh token
payload = "user%5Busername%5D=" + username + "&user%5Bpassword%5D=" + password
headers = {
'accept': "application/json",
'content-type':"application/x-www-form-urlencoded"
}
response = requests.request("POST", get_token_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
token = response.json()['token']
login_flow = response.json()['login_flow']
#in case 2FA is enabled use the OTP code to get the second level of authentication
if login_flow == '2fa_verification':
headers['Authorization'] = token
payload = 'otp_code=' + otp_code
response = requests.request("POST", validate_otp_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True) #print response.text
token = response.json()['token']
login_role = response.json()['role']
switch_user_id = response.json()['id']
if login_role == 'Partneradmin':
#print "============== partner's users ==========="
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8"
}
partner_users = []
response = requests.request("GET", get_partner_users_url, headers=headers)
for parent_user in response.json()['users']:
partner_users.append(parent_user['id'])
#print partner_users
#take the first users in the list as the switch_user_id
switch_user_id = partner_users[0]
#in case the user is a partner_admin then switch_user_id is any one of the users it has access to (here we take the first one from the list)
#replace below with password token
forgo | tten_password_token = ""
payload = {"token":forgotten_password_token}
headers = {
'authorization': token,
'username': username,
'content-type': "a | pplication/x-www-form-urlencoded",
'x-app-switch-user': str(switch_user_id)
}
response = requests.request("POST", validate_token_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
|
eirki/script.service.koalahbonordic | service.py | Python | mit | 2,275 | 0.002637 | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import, division)
import datetime as dt
import xbmc
import xbmcgui
from threading import Thread
from lib import constants as const
from lib import kodi
if const.os == "win":
import pywin32setup
from lib import playback
def minutes_to_next_rounded_update_time():
frequency_secs = {
"15 min": 900,
"30 min": 1800,
"1 hour": 3600,
"2 hours": 7200
}[kodi.settings["schedule frequency"]]
frequency = dt.timedelta(seconds=frequency_secs)
now = dt.datetime.now()
time_since_hour = dt.timedelta(minutes=now.minute, seconds=now.second)
scheduled_next_unrounded = frequency + time_since_hour
scheduled_next = (scheduled_next_unrounded.seconds // frequency_secs) * frequency_secs
till_scheduled_next = scheduled_next - time_since_hour.seconds
return till_scheduled_next
def run_schedule():
timeout = minutes_to_next_rounded_update_time()
kodi.log("Starting update scheduler, next update at %s" %
(dt.datetime.now() + dt.timedelta(seconds=timeout)).strftime("%H:%M"))
while True:
abort = xbmc.Monitor().waitForAbort(timeout)
if abort:
kodi.log("Closing background service")
break
timeout = { |
"15 min": 900,
"30 min": 1800,
"1 hour": 3600,
"2 hours": 7200
}[kodi.settings["schedule frequency"]]
scheduler_enabled = kodi.settings["enable schedule"]
player_active = kodi.r | pc("Player.GetActivePlayers")
koala_active = xbmcgui.Window(10000).getProperty("%s running" % const.addonname) == "true"
if player_active or koala_active or not scheduler_enabled:
continue
kodi.log("Starting scheduled update next update at %s" %
(dt.datetime.now() + dt.timedelta(seconds=timeout)).strftime("%H:%M"))
xbmc.executebuiltin("RunScript(%s, mode=library, action=schedule)" % const.addonid)
if __name__ == '__main__':
Thread(target=playback.PlaybackManager).start()
if kodi.settings["enable startup"]:
xbmc.executebuiltin("RunScript(%s, mode=library, action=startup)" % const.addonid)
run_schedule()
|
aroig/offlineimap | offlineimap/folder/Gmail.py | Python | gpl-2.0 | 15,648 | 0.003962 | # Gmail IMAP folder support
# Copyright (C) 2008 Riccardo Murri <riccardo.murri@gmail.com>
# Copyright (C) 2002-2007 John Goerzen <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
from offlineimap import imaputil
from offlineimap import imaplibutil
import offlineimap.accounts
"""Folder implementation to support features of the Gmail IMAP server.
"""
from .IMAP import IMAPFolder
class GmailFolder(IMAPFolder):
"""Folder implementation to support features of the Gmail IMAP server.
Removing a message from a folder will only remove the | "label" from
the message and keep it in the "All mails" folder. To really delete
a message it needs to be copied to the Trash folder. However, this
is dangerous as our folder moves are i | mplemented as a 1) delete in
one folder and 2) append to the other. If 2 comes before 1, this
will effectively delete the message from all folders. So we cannot
do that until we have a smarter folder move mechanism.
For more information on the Gmail IMAP server:
http://mail.google.com/support/bin/answer.py?answer=77657&topic=12815
https://developers.google.com/google-apps/gmail/imap_extensions
"""
def __init__(self, imapserver, name, repository):
super(GmailFolder, self).__init__(imapserver, name, repository)
self.trash_folder = repository.gettrashfolder(name)
# Gmail will really delete messages upon EXPUNGE in these folders
self.real_delete_folders = [ self.trash_folder, repository.getspamfolder() ]
# The header under which labels are stored
self.labelsheader = self.repository.account.getconf('labelsheader', 'X-Keywords')
# enables / disables label sync
self.synclabels = self.repository.account.getconfboolean('synclabels', 0)
# if synclabels is enabled, add a 4th pass to sync labels
if self.synclabels:
self.syncmessagesto_passes.append(('syncing labels', self.syncmessagesto_labels))
# Labels to be left alone
ignorelabels = self.repository.account.getconf('ignorelabels', '')
self.ignorelabels = set([lb.strip() for lb in ignorelabels.split(',') if len(lb.strip()) > 0])
def getmessage(self, uid):
"""Retrieve message with UID from the IMAP server (incl body). Also
gets Gmail labels and embeds them into the message.
:returns: the message body or throws and OfflineImapError
(probably severity MESSAGE) if e.g. no message with
this UID could be found.
"""
imapobj = self.imapserver.acquireconnection()
try:
data = self._fetch_from_imap(imapobj, str(uid), '(X-GM-LABELS BODY.PEEK[])', 2)
finally:
self.imapserver.releaseconnection(imapobj)
# data looks now e.g.
#[('320 (X-GM-LABELS (...) UID 17061 BODY[] {2565}','msgbody....')]
# we only asked for one message, and that msg is in data[0].
# msbody is in [0][1].
body = data[0][1].replace("\r\n", "\n")
# Embed the labels into the message headers
if self.synclabels:
m = re.search('X-GM-LABELS\s*\(([^\)]*)\)', data[0][0])
if m:
labels = set([imaputil.dequote(lb) for lb in imaputil.imapsplit(m.group(1))])
else:
labels = set()
labels = labels - self.ignorelabels
labels_str = self.format_labels_string(self.labelsheader, sorted(labels))
body = self.savemessage_addheader(body, self.labelsheader, labels_str)
if len(body)>200:
dbg_output = "%s...%s" % (str(body)[:150], str(body)[-50:])
else:
dbg_output = body
self.ui.debug('imap', "Returned object from fetching %d: '%s'" %
(uid, dbg_output))
return body
def getmessagelabels(self, uid):
if 'labels' in self.messagelist[uid]:
return self.messagelist[uid]['labels']
else:
return set()
def cachemessagelist(self):
if not self.synclabels:
return super(GmailFolder, self).cachemessagelist()
self.ui.collectingdata(None, self)
self.messagelist = {}
imapobj = self.imapserver.acquireconnection()
try:
msgsToFetch = self._msgs_to_fetch(imapobj)
if not msgsToFetch:
return # No messages to sync
# Get the flags and UIDs for these. single-quotes prevent
# imaplib2 from quoting the sequence.
# Note: msgsToFetch are sequential numbers, not UID's
res_type, response = imapobj.fetch("'%s'" % msgsToFetch,
'(FLAGS X-GM-LABELS UID)')
if res_type != 'OK':
raise OfflineImapError("FETCHING UIDs in folder [%s]%s failed. "
"Server responded '[%s] %s'" % (
self.getrepository(), self,
res_type, response),
OfflineImapError.ERROR.FOLDER)
finally:
self.imapserver.releaseconnection(imapobj)
for messagestr in response:
# looks like: '1 (FLAGS (\\Seen Old) UID 4807)' or None if no msg
# Discard initial message number.
if messagestr == None:
continue
messagestr = messagestr.split(' ', 1)[1]
options = imaputil.flags2hash(messagestr)
if not 'UID' in options:
self.ui.warn('No UID in message with options %s' %\
str(options),
minor = 1)
else:
uid = long(options['UID'])
flags = imaputil.flagsimap2maildir(options['FLAGS'])
m = re.search('\(([^\)]*)\)', options['X-GM-LABELS'])
if m:
labels = set([imaputil.dequote(lb) for lb in imaputil.imapsplit(m.group(1))])
else:
labels = set()
labels = labels - self.ignorelabels
rtime = imaplibutil.Internaldate2epoch(messagestr)
self.messagelist[uid] = {'uid': uid, 'flags': flags, 'labels': labels, 'time': rtime}
def savemessage(self, uid, content, flags, rtime):
"""Save the message on the Server
This backend always assigns a new uid, so the uid arg is ignored.
This function will update the self.messagelist dict to contain
the new message after sucessfully saving it, including labels.
See folder/Base for details. Note that savemessage() does not
check against dryrun settings, so you need to ensure that
savemessage is never called in a dryrun mode.
:param rtime: A timestamp to be used as the mail date
:returns: the UID of the new message as assigned by the server. If the
message is saved, but it's UID can not be found, it will
return 0. If the message can't be written (folder is
read-only for example) it will return -1."""
if not self.synclabels:
return super(GmailFolder, self).savemessage(uid, content, flags, rtime)
labels_str = self.message_getheader(content, self.labelsheader)
if labels_str: labels = self.parse_labels_string(self.labelsheader, labels_str)
|
brownlegion/testfleet | startup.py | Python | apache-2.0 | 994 | 0.016097 | #print(" _____ _ _ _ ");
#print("| __ \ (_) (_) | | ");
#print("| | \/_ _ _ __ ___ _ _ ___ | | _____ _____ ");
#print("| | __| | | | '_ ` _ \| | | / __| | |/ _ \ \ / / _ \ ");
#print("| |_\ | |_| | | | | | | | | \_ | _ \ | | (_) \ V | __/_ ");
#print(" \____/\__,_|_| |_| |_|_| |_|___/ |_|\___/ \_/ \___(_) ");
#print("| __ \ (_) (_) | (_)/ _| ");
#print("| | \/_ _ _ __ ___ _ _ ___ | |_| |_ ___ ");
#print("| | __| | | | '_ ` _ \| | | / __| | | | _/ _ \ ");
#print("| |_\ | |_| | | | | | | | | \__ \ | | | | || __/_ ");
#print(" \____/\__,_|_| |_| |_|_| |_|___/ |_|_|_| \___(_) ");
#print(" ");
print("/data is preserved!"); |
linuxscout/arramooz | scripts/nouns/t.py | Python | gpl-2.0 | 2,802 | 0.025241 | COMP_PREFIX_LIST_MODEL={
"":{'tags':(u"", ), "vocalized":(u"", )},
u'ب':{'tags':(u'جر', ), "vocalized":(u"بِ", )},
u'ل':{'tags':(u'جر', ), "vocalized":(u"لِ", )},
u'ال':{'tags':(u'تعريف', ), "vocalized":(u"الْ", )},
u'بال':{'tags':(u'جر', u'تعريف', ), "vocalized":(u" | بِالْ", )},
u'لل':{'tags':(u'جر', u'تعريف', ), "voc | alized":(u"لِلْ", )},
}
COMP_SUFFIX_LIST_MODEL=[
"",
u'ي',
u"كَ",
];
# affixes tags contains prefixes and suffixes tags
affix_tags = snconst.COMP_PREFIX_LIST_TAGS[procletic]['tags'] \
+snconst.COMP_SUFFIX_LIST_TAGS[encletic_nm]['tags'] \
+snconst.CONJ_SUFFIX_LIST_TAGS[suffix_conj_nm]['tags']
#test if the given word from dictionary accept those
# tags given by affixes
# دراسة توافق الزوائد مع خصائص الاسم،
# مثلا هل يقبل الاسم التأنيث.
if validate_tags(noun_tuple, affix_tags, procletic, encletic_nm, suffix_conj_nm):
## get all vocalized form of suffixes
for vocalized_encletic in snconst.COMP_SUFFIX_LIST_TAGS[encletic_nm]['vocalized']:
for vocalized_suffix in snconst.CONJ_SUFFIX_LIST_TAGS[suffix_conj_nm]['vocalized']:
## verify compatibility between procletics and affix
if self.is_compatible_proaffix_affix(noun_tuple, procletic, vocalized_encletic, vocalized_suffix):
vocalized, semi_vocalized = vocalize(infnoun, procletic, vocalized_suffix, vocalized_encletic)
#add some tags from dictionary entry as
#mamnou3 min sarf and broken plural
original_tags = []
if noun_tuple['mankous'] == u"Tk":
original_tags.append(u"منقوص")
# get affix tags
vocalized_affix_tags = snconst.COMP_PREFIX_LIST_TAGS[procletic]['tags']\
+snconst.COMP_SUFFIX_LIST_TAGS[vocalized_encletic]['tags']\
+snconst.CONJ_SUFFIX_LIST_TAGS[vocalized_suffix]['tags']
# if there are many cases like feminin plural with mansoub and majrour
if 'cases' in snconst.CONJ_SUFFIX_LIST_TAGS[vocalized_suffix]:
list_cases = snconst.CONJ_SUFFIX_LIST_TAGS[vocalized_suffix]['cases']
else:
list_cases = ('',)
|
bozzzzo/quark | quarkc/test/emit/expected/py/marshalling/m/__init__.py | Python | apache-2.0 | 3,469 | 0.004901 | from quark_runtime import *
import quark.reflect
import marshalling_md
import quark
class Inner(object):
def _init(self):
self.inner_int = None
self.inner_string = None
self.inner_float = None
self.inner_string_list = None
def __init__(self): self._init()
def setup(self, i):
| f = float(i);
(self).inner_int = i
(self).inner_string = (u"str of ") + (_toString(i))
(self).inner_float = (300.14) + (f)
(self).inner_s | tring_list = _List([u"i"])
return self
def _getClass(self):
return u"m.Inner"
def _getField(self, name):
if ((name) == (u"inner_int")):
return (self).inner_int
if ((name) == (u"inner_string")):
return (self).inner_string
if ((name) == (u"inner_float")):
return (self).inner_float
if ((name) == (u"inner_string_list")):
return (self).inner_string_list
return None
def _setField(self, name, value):
if ((name) == (u"inner_int")):
(self).inner_int = value
if ((name) == (u"inner_string")):
(self).inner_string = value
if ((name) == (u"inner_float")):
(self).inner_float = value
if ((name) == (u"inner_string_list")):
(self).inner_string_list = value
Inner.m_Inner_ref = marshalling_md.Root.m_Inner_md
Inner.quark_List_quark_String__ref = marshalling_md.Root.quark_List_quark_String__md
class Outer(object):
def _init(self):
self.outer_int = None
self.outer_string = None
self.outer_float = None
self.outer_inner_list = None
def __init__(self): self._init()
def setup(self, i):
f = float(i);
(self).outer_int = (i) * (10)
(self).outer_string = (u"str of ") + (_toString(i))
(self).outer_float = (314000.15) + (f)
(self).outer_inner_list = _List([(Inner()).setup(i), (Inner()).setup((i) + (42)), (Inner()).setup((i) * (42))])
return self
def _getClass(self):
return u"m.Outer"
def _getField(self, name):
if ((name) == (u"outer_int")):
return (self).outer_int
if ((name) == (u"outer_string")):
return (self).outer_string
if ((name) == (u"outer_float")):
return (self).outer_float
if ((name) == (u"outer_inner_list")):
return (self).outer_inner_list
return None
def _setField(self, name, value):
if ((name) == (u"outer_int")):
(self).outer_int = value
if ((name) == (u"outer_string")):
(self).outer_string = value
if ((name) == (u"outer_float")):
(self).outer_float = value
if ((name) == (u"outer_inner_list")):
(self).outer_inner_list = value
Outer.m_Outer_ref = marshalling_md.Root.m_Outer_md
Outer.quark_List_m_Inner__ref = marshalling_md.Root.quark_List_m_Inner__md
def test_marshalling():
actual = (Outer()).setup(101);
encoded = (quark.toJSON(actual, quark.reflect.Class.get(_getClass(actual)))).toString();
_println(encoded);
expected = Outer();
quark.fromJSON(quark.reflect.Class.get(_getClass(expected)), expected, _JSONObject.parse(encoded));
_println(_toString((expected).outer_int));
_println((expected).outer_string);
_println(repr((expected).outer_float));
_println(repr((((expected).outer_inner_list)[1]).inner_float));
|
google/strabo | client/python/setup.py | Python | apache-2.0 | 1,044 | 0.002874 | #
# Copyright 2015 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unle | ss required by applicable law or agreed to in writing, software
# distributed under | the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from setuptools import setup
setup(name='strabo',
version='0.1',
description='Client library for Strabo GIS library.',
url='http://github.com/cairn-labs/strabo',
author='Sam Brotherton',
author_email='sbrother@gmail.com',
license='MIT',
packages=['strabo'],
install_requires = [
'requests',
],
zip_safe=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.