repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
y-higuchi/ramcloud | scripts/transportgraph.py | Python | isc | 2,912 | 0.00103 | #!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for graphing latency of different transports.
"""
from __future__ import division, print_function
from common import *
import subprocess
from collections import defaultdict
from transportbench import *
import sys
uncached = True
if len(sys.argv) > 1 and sys.argv[1] == '-c':
print('Running cached')
uncached = False
numObjects = 10000
objectSizes = [128, 256, 262144, 512, 1024, 32768, 4096, 8192, 131072,
16384, 65536, 2048, 524288, 1048576]
transports = [
'infrc',
'fast+infud',
'unreliable+infud',
'fast+infeth',
'unreliable+infeth',
'tcp',
'fast+udp',
'unreliable+udp'
]
fields = ('latency', 'throughput')
def writeResultsToFiles(results):
for field in fields:
fileName = '%s/run/transport_%s.data' % (top_path, field)
dat = open(fileName, 'w', 1)
for transport in transports:
print('# %s' % transport , file=dat)
for objectSize in sorted(objectSizes):
r = results[field][transport][objectSize]
if r == 0:
continue
print(objectSize, r, file=dat)
print(fil | e=dat)
print(file=dat)
results = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for objectSize in objectSizes:
for transport in transports:
print('Running', transport, 'with',
numObjects, objectSize, 'byte obj | ects')
stats = transportBench(numObjects, objectSize, transport, uncached)
run = stats['run']
runTime = stats['ns'] / 1e9
readSize = stats['size']
readCount = stats['count']
mbPerSecond = (readSize * readCount) / (runTime * 2**20)
usPerObject = runTime * 1000000.0 / numObjects
print('Run', run, transport,
'%.2f MB/s %.2f us/read' % (mbPerSecond, usPerObject))
results['latency'][transport][objectSize] = usPerObject
results['throughput'][transport][objectSize] = mbPerSecond
writeResultsToFiles(results)
|
algorithm-ninja/territoriali-backend | terry/handlers/base_handler.py | Python | mpl-2.0 | 9,492 | 0.002739 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 2017-2019 - Edoardo Morassutto <edoardo.morassutto@gmail.com>
# Copyright 2017 - Luca Versari <veluca93@gmail.com>
# Copyright 2018 - William Di Luigi <williamdiluigi@gmail.com>
import json
from datetime import datetime
from werkzeug.exceptions import HTTPException, BadRequest
from werkzeug.wrappers import Response
from ..handler_params import HandlerParams
from ..config import Config
from ..database import Database
from ..logger import Logger
class BaseHandler:
@staticmethod
def raise_exc(cls, code, message):
"""
Raise an HTTPException with a code and a message sent in a json like
{
"code": code
"message": message
}
:param cls: HTTPException of the error, for example NotFound, BadRequest, NotAuthorized
:param code: A brief message for the exception, like MISSING_PARAMETER
:param message: A longer description of the error
:return: Nothing, raise the provided exception with the correct response
"""
response = Response()
response.mimetype = "application/json"
response.status_code = cls.code
response.data = json.dumps({
"code": code,
"message": message
})
Logger.warning(cls.__name__.upper(), code + ": " + message)
raise cls(response=response)
def handle(self, endpoint, route_args, request):
"""
Handle a request in the derived handler. The request is routed to the correct method using *endpoint*
:param endpoint: A string with the name of the class method to call with (route_args, request) as parameters,
this method should return a Response or call self.raise_exc. *NOTE*: the method MUST be implemented in the
derived class
:param route_args: The route parameters, the parameters extracted from the matching route in the URL
:param request: The Request object, request.args contains the query parameters of the request
:return: Return a Response if the request is successful, an HTTPException if an error occurred
"""
try:
data = BaseHandler._call(self.__getattribute__(endpoint), route_args, request)
response = Response()
if data is not None:
response.code = 200
response.mimetype = "application/json"
response.data = json.dumps(data)
else:
response.code = 204
return response
except HTTPException as e:
return e
def parse_body(self, request):
"""
Parse the body part of the request in JSON
:param request: The request to be parsed
:return: A dict with the content of the body
"""
return request.form
@staticmethod
def get_end_time(user_extra_time):
"""
Compute the end time for a user
:param user_extra_time: Extra time specific for the user in seconds
:return: The timestamp at which the contest will be finished for this user
"""
start = Database.get_meta("start_time", type=int)
if start is None:
return None
contest_duration = Database.get_meta("contest_duration", type=int, default=0)
contest_extra_time = Database.get_meta("extra_time", type=int, default=0)
if user_extra_time is None:
user_extra_time = 0
return start + contest_duration + contest_extra_time + user_extra_time
@staticmethod
def get_window_end_time(user_extra_time, start_delay):
"""
Compute the end time for a window started after `start_delay` and with `extra_time` delay for the user.
Note that this time may exceed the contest end time, additional checks are required.
:param user_extra_time: Extra time specific for the user in seconds
:param start_delay: The time (in seconds) after the start of the contest of when the window started
:return: The timestamp at which the window ends. If the contest has no window None is returned.
"""
if start_delay is None:
return None
start = Database.get_meta("start_time", type=int)
if start is None:
return None
window_duration = Database.get_meta("window_duration", None, type=int)
if window_duration is None:
return None
if user_extra_time is None:
user_extra_time = 0
return start + user_extra_time + start_delay + window_duration
@staticmethod
def format_dates(dct, fields=["date"]):
"""
Given a dict, format all the *fields* fields from int to iso format. The original dict is modified
:param dct: dict to format
:param fields: list of the names of the fields to format
:return: The modified dict
"""
for k, v in dct.items():
if isinstance(v, dict):
dct[k] = BaseHandler.format_dates(v, fields)
elif isinstance(v, list):
for item in v:
BaseHandler.format_dates(item, fields)
elif k in fields and v is not None:
dct[k] = datetime.fromtimestamp(v).isoformat()
return dct
@staticmethod
def _call(method, route_args, request):
"""
This function is MAGIC!
It takes a method, reads it's parameters and automagically fetch from the request the values. Type-ann | otation
is also supported for a simple type validation.
The values are fetched, in order, from:
- route_a | rgs
- request.form
- general_attrs
- default values
If a parameter is required but not sent a BadRequest (MISSING_PARAMETERS) error is thrown, if a parameter cannot
be converted to the annotated type a BadRequest (FORMAT_ERROR) is thrown.
:param method: Method to be called
:param route_args: Arguments of the route
:param request: Request object
:return: The return value of method
"""
kwargs = {}
params = HandlerParams.get_handler_params(method)
general_attrs = {
'_request': request,
'_route_args': route_args,
'_file': {
"content": BaseHandler._get_file_content(request),
"name": BaseHandler._get_file_name(request)
},
'_ip': BaseHandler.get_ip(request)
}
missing_parameters = []
for name, data in params.items():
if name in route_args and name[0] != "_":
kwargs[name] = route_args[name]
elif name in request.form and name[0] != "_":
kwargs[name] = request.form[name]
elif name in general_attrs:
kwargs[name] = general_attrs[name]
elif name == "file" and general_attrs["_file"]["name"] is not None:
kwargs[name] = general_attrs["_file"]
elif data["required"]:
missing_parameters.append(name)
if len(missing_parameters) > 0:
BaseHandler.raise_exc(BadRequest, "MISSING_PARAMETERS",
"The missing parameters are: " + ", ".join(missing_parameters))
for key, value in kwargs.items():
type = params[key]["type"]
if type is None: continue
try:
kwargs[key] = type(value)
except ValueError:
BaseHandler.raise_exc(BadRequest, "FORMAT_ERROR",
"The parameter %s cannot be converted to %s" % (key, type.__name__))
Logger.debug(
"HTTP",
"Received request from %s for endpoint %s%s" %
(
general_attrs['_ip'],
method.__name__,
", with parameters " + ", ".join(
"=".join((kv[0], str(kv[1]))) f |
DBrianKimmel/PyHouse | Project/src/Modules/House/Family/Upb/upb_data.py | Python | mit | 787 | 0 | """
@name: Modules/families/UPB/UPB_data.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2014-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Aug 6, 2014
@summary: This module is for communi | cating with UPB controllers.
"""
__updated__ = '2020-02-17'
# Import system type stuff
# Import PyMh files
from Modules.House.Lighting.Lights.lights import LightControlInformation
class UPBData(LightControlInformation):
"""
Locally held data about each of the UPB PIM controllers we find.
This is known only within the UPB family package.
"""
def __init__(self):
super(UPBData, self).__init__()
self.UPBAddress = 0
self.UPBPassword = 0
se | lf.UPBNetworkID = 0
# ## END DBK
|
DirectXMan12/nova-hacking | nova/virt/powervm/common.py | Python | apache-2.0 | 9,118 | 0.000658 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import ftplib
import os
import uuid
import paramiko
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.virt.powervm import constants
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
class Connection(object):
def __init__(self, host, username, password, port=22, keyfile=None):
self.host = host
self.username = username
self.password = password
self.port = port
self.keyfile = keyfile
def ssh_connect(connection):
"""Method to connect to remote system using ssh protocol.
:param connection: a Connection object.
:returns: paramiko.SSHClient -- an active ssh connection.
:raises: PowerVMConnectionFailed
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(connection.host,
username=connection.username,
password=connection.password,
port=connection.port,
key_filename=connection.keyfile,
timeout=constants.POWERVM_CONNECTION_TIMEOUT)
LOG.debug("SSH connection with %s established successfully." %
connection.host)
# send TCP keepalive packets every 20 seconds
ssh.get_transport().set_keepalive(20)
return ssh
except Exception:
LOG.exception(_('Connection error connecting PowerVM manager'))
raise exception.PowerVMConnectionFailed()
def check_connection(ssh, connection):
"""
Checks the SSH connection to see if the transport is valid.
If the connection is dead, a new connection is created and returned.
:param ssh: an existing paramiko.SSHClient connection.
:param connection: a Connection object.
:returns: paramiko.SSHClient -- an active ssh connection.
:raises: PowerVMConnectionFailed -- if the ssh connection fails.
"""
# if the ssh client is not set or the transport is dead, re-connect
if (ssh is None or
ssh.get_transport() is None or
not ssh.get_transport().is_active()):
LOG.debug("Connection to host %s will be established." %
connection.host)
ssh = ssh_connect(connection)
return ssh
def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True):
"""Method to execute remote command as root.
:param connection: an active paramiko.SSHClient connection.
:param command: string containing the command to run.
:returns: Tuple -- a tuple of (stdout, stderr)
:raises: processutils.ProcessExecutionError
"""
LOG.debug(_('Running cmd (SSH-as-root): %s') % cmd)
chan = ssh_connection._transport.open_session()
# This command is required to be executed
# in order to become root.
chan.exec_command('ioscli oem_setup_env')
bufsize = -1
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
# We run the command and then call 'exit' to exit from
# super user environment.
stdin.write('%s\n%s\n' % (cmd, 'exit'))
stdin.flush()
exit_status = chan.recv_exit_status()
# Lets handle the error just like processutils.ssh_execute does.
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
# TODO(mikal): I know this is weird, but it needs to be consistent
# with processutils.execute. I will move this method to oslo in
# a later commit.
raise processutils.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=''.join(cmd))
return (stdout, stderr)
def ftp_put_command(connection, local_path, remote_dir):
"""Method to transfer a file via ftp.
:param connection: a Connection object.
:param local_path: path to the local file
:param remote_dir: path to remote destination
:raises: PowerVMFileTransferFailed
"""
try:
ftp = ftplib.FTP(host=connection.host,
| user=connection.username,
passwd=connection.password)
ftp.cwd(remote_dir)
name = os.path.split(local_path)[1]
f = open(local_path, "rb")
ftp.storbinary("STOR " + name, f)
f.close()
ftp.close()
except Ex | ception:
LOG.error(_('File transfer to PowerVM manager failed'))
raise exception.PowerVMFTPTransferFailed(ftp_cmd='PUT',
source_path=local_path, dest_path=remote_dir)
def ftp_get_command(connection, remote_path, local_path):
"""Retrieve a file via FTP
:param connection: a Connection object.
:param remote_path: path to the remote file
:param local_path: path to local destination
:raises: PowerVMFileTransferFailed
"""
try:
ftp = ftplib.FTP(host=connection.host,
user=connection.username,
passwd=connection.password)
ftp.cwd(os.path.dirname(remote_path))
name = os.path.basename(remote_path)
LOG.debug(_("ftp GET %(remote_path)s to: %(local_path)s") % locals())
with open(local_path, 'w') as ftpfile:
ftpcmd = 'RETR %s' % name
ftp.retrbinary(ftpcmd, ftpfile.write)
ftp.close()
except Exception:
LOG.error(_("File transfer from PowerVM manager failed"))
raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',
source_path=remote_path, dest_path=local_path)
def aix_path_join(path_one, path_two):
"""Ensures file path is built correctly for remote UNIX system
:param path_one: string of the first file path
:param path_two: string of the second file path
:returns: a uniform path constructed from both strings
"""
if path_one.endswith('/'):
path_one = path_one.rstrip('/')
if path_two.startswith('/'):
path_two = path_two.lstrip('/')
final_path = path_one + '/' + path_two
return final_path
@contextlib.contextmanager
def vios_to_vios_auth(source, dest, conn_info):
"""Context allowing for SSH between VIOS partitions
This context will build an SSH key on the source host, put the key
into the authorized_keys on the destination host, and make the
private key file name available within the context.
The key files and key inserted into authorized_keys will be
removed when the context exits.
:param source: source IP or DNS name
:param dest: destination IP or DNS name
:param conn_info: dictionary object with SSH connection
information for both hosts
"""
KEY_BASE_NAME = "os-%s" % uuid.uuid4().hex
keypair_uuid = uuid.uuid4()
src_conn_obj = ssh_connect(conn_info)
dest_conn_info = Connection(dest, conn_info.username,
conn_info.password)
dest_conn_obj = ssh_connect(dest_conn_info)
def run_command(conn_obj, cmd):
stdout, stderr = processutils.ssh_execute(conn_obj, cmd)
return stdout.strip().splitlines()
def build_keypair_on_source():
mkkey = ('ssh-keygen -f %s -N "" -C %s' %
(KEY_BASE_NAME, keypair_uuid.hex))
ssh_command_as_root(src_conn_obj, mkkey)
chown_ |
userzimmermann/zetup.py | zetup/commands/test.py | Python | lgpl-3.0 | 1,403 | 0 | # zetup.py
#
# Zimmermann's Python package setup.
#
# Copyright (C) 2014-2015 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# zetup.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# zetup.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License fo | r more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with zetup.py. If not, see <http://www.gnu.org/licenses/>.
"""zetup.commands.test
Defines ``zetup test`` command.
.. moduleauthor:: Stefan Zimmermann <zimmermann.code@gmail.com>
"""
import os
from zetup.process import call
from zetup.zetup import Zetup
from zetup.commands.dev import dev
@Zetup.command()
def test(zfg, | args=None):
"""Run configured ``test commands``.
"""
dev(zfg) # first (re)install project in develop mode
for command in zfg.TEST_COMMANDS:
print("zetup: Running %s" % repr(command))
status = call(command, shell=True, env=os.environ)
if status: # ==> error
return status
|
khchine5/lino-welfare | lino_welfare/modlib/aids/mixins.py | Python | agpl-3.0 | 13,679 | 0.001755 | # -*- coding: UTF-8 -*-
# Copyright 2014-2017 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""
Model mixins for `lino_welfare.modlib.aids`.
.. autosummary::
"""
from __future__ import unicode_literals
from builtins import str
import logging
logger = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from djan | go.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy as pgettext
from lino.api import dd, rt
from lino import mixins
from etgen.html import E, tostring
from lino.utils.ranges import encompass
from lino.modlib.checkdata.choicelists import Checker
from lino.modlib.users.mixins import UserAuthored
from lino_xl.lib.contacts.mixins import ContactRelated
from lino_xl.lib.excerpts.mixins import Certifiable
from lino.mixins.periods impor | t rangefmt
from .choicelists import ConfirmationStates
from .roles import AidsStaff
def e2text(v):
return tostring(v)
# if isinstance(v, types.GeneratorType):
# return "".join([e2text(x) for x in v])
# if E.iselement(v):
# return tostring(v)
# return unicode(v)
class SignConfirmation(dd.Action):
"""Sign this database object.
This is available if signer is either empty or equals the
requesting user. Except for system managers who can sign as
somebody else by manually setting the signer field before running
this action.
"""
label = pgettext("aids", "Sign")
show_in_workflow = True
show_in_bbar = False
# icon_name = 'flag_green'
required_states = "requested"
help_text = _("You sign this confirmation, making most "
"fields read-only.")
def get_action_permission(self, ar, obj, state):
user = ar.get_user()
if obj.signer_id and obj.signer != user \
and not user.user_type.has_required_roles([AidsStaff]):
return False
return super(SignConfirmation,
self).get_action_permission(ar, obj, state)
def run_from_ui(self, ar, **kw):
obj = ar.selected_rows[0]
def ok(ar):
if not obj.signer_id:
obj.signer = ar.get_user()
obj.state = ConfirmationStates.confirmed
obj.save()
ar.set_response(refresh=True)
d = dict(text=obj.confirmation_text())
d.update(client=e2text(obj.client.get_full_name(nominative=True)))
msg = _("You confirm that %(client)s %(text)s") % d
ar.confirm(ok, msg, _("Are you sure?"))
class RevokeConfirmation(dd.Action):
label = pgettext("aids", "Revoke")
show_in_workflow = True
show_in_bbar = False
# icon_name = 'flag_green'
required_states = "confirmed"
help_text = _("You revoke your signatore from this confirmation.")
def get_action_permission(self, ar, obj, state):
user = ar.get_user()
if obj.signer != user and not user.user_type.has_required_roles([AidsStaff]):
return False
return super(RevokeConfirmation,
self).get_action_permission(ar, obj, state)
def run_from_ui(self, ar, **kw):
obj = ar.selected_rows[0]
def ok(ar):
# obj.signer = None
obj.state = ConfirmationStates.requested
obj.save()
ar.set_response(refresh=True)
d = dict(text=obj.confirmation_text())
d.update(client=e2text(obj.client.get_full_name(nominative=True)))
msg = _("You revoke your confirmation that %(client)s %(text)s") % d
ar.confirm(ok, msg, _("Are you sure?"))
class Confirmable(mixins.DateRange):
"""Base class for both :class:`Granting` and :class:`Confirmation`.
.. attribute:: signer
The agent who has signed or is expected to sign this item.
.. attribute:: state
The confirmation state of this object. Pointer to
:class:`ConfirmationStates`.
"""
class Meta:
abstract = True
manager_roles_required = dd.login_required()
workflow_state_field = 'state'
signer = dd.ForeignKey(
settings.SITE.user_model,
verbose_name=pgettext("aids", "Signer"),
blank=True, null=True,
related_name="%(app_label)s_%(class)s_set_by_signer",
)
state = ConfirmationStates.field(
default=ConfirmationStates.as_callable('requested'))
sign = SignConfirmation()
revoke = RevokeConfirmation()
@classmethod
def on_analyze(cls, site):
cls.CONFIRMED_FIELDS = dd.fields_list(
cls,
cls.get_confirmable_fields())
super(Confirmable, cls).on_analyze(site)
@classmethod
def get_confirmable_fields(cls):
return ''
@classmethod
def setup_parameters(cls, fields):
fields.update(signer=dd.ForeignKey(
settings.SITE.user_model,
verbose_name=pgettext("aids", "Signer"),
blank=True, null=True))
fields.update(state=ConfirmationStates.field(blank=True))
super(Confirmable, cls).setup_parameters(fields)
@classmethod
def get_simple_parameters(cls):
for p in super(Confirmable, cls).get_simple_parameters():
yield p
yield 'signer'
yield 'state'
def full_clean(self):
super(Confirmable, self).full_clean()
if self.signer is None and self.state == ConfirmationStates.confirmed:
self.state = ConfirmationStates.requested
# raise ValidationError(_("Cannot confirm without signer!"))
def get_row_permission(self, ar, state, ba):
"""A signed confirmation cannot be modified, even not by a privileged
user.
"""
if not super(Confirmable, self).get_row_permission(ar, state, ba):
return False
if self.state == ConfirmationStates.confirmed \
and self.signer is not None \
and self.signer != ar.get_user():
return ba.action.readonly
return True
def disabled_fields(self, ar):
if self.state != ConfirmationStates.requested:
return self.CONFIRMED_FIELDS
return super(Confirmable, self).disabled_fields(ar)
def get_printable_context(self, ar=None, **kw):
kw.update(when=self.get_period_text())
return super(Confirmable, self).get_printable_context(ar, **kw)
def confirmation_text(self):
kw = dict()
kw.update(when=self.get_period_text())
at = self.get_aid_type()
if at:
kw.update(what=str(at))
else:
kw.update(what=str(self))
return _("receives %(what)s %(when)s.") % kw
def confirmation_address(self):
at = self.get_aid_type()
if at and at.address_type:
addr = self.client.get_address_by_type(at)
else:
addr = self.client.get_primary_address()
if addr is not None:
return addr.living_at_text()
def get_excerpt_title(self):
at = self.get_aid_type()
if at:
return at.get_excerpt_title()
return str(self)
@dd.python_2_unicode_compatible
class Confirmation(
Confirmable, UserAuthored, ContactRelated,
mixins.Created, Certifiable):
"""Base class for all aid confirmations.
Subclassed by :class:`SimpleConfirmation
<lino_welfare.modlib.aids.models.SimpleConfirmation>`,
:class:`IncomeConfirmation
<lino_welfare.modlib.aids.models.IncomeConfirmation>` and
:class:`Refund |
atilag/qiskit-sdk-py | qiskit/extensions/standard/gatestools.py | Python | apache-2.0 | 1,248 | 0 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License | at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the | License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Gates tools."""
from qiskit import InstructionSet
from qiskit import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
def attach_gate(element, quantum_register, gate, gate_class):
"""Attach a gate."""
if isinstance(quantum_register, QuantumRegister):
gs = InstructionSet()
for _ in range(quantum_register.size):
gs.add(gate)
return gs
element._check_qubit(quantum_register)
return element._attach(gate_class)
|
sfstpala/png.py | png.py | Python | mit | 2,602 | 0.001922 |
# Copyright (C) 2011 by Stefano Palazzo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, W | HETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OT | HER DEALINGS IN
# THE SOFTWARE.
import zlib
import struct
def make(image: '[[[r, g, b, a, ], ], ]') -> bytes:
'''
Create PNG image from RGBA data
Expects a list of lines of pixels of R, G, B, and Alpha values.
I.e: [
[ [0, 0, 0, 0], [0, 0, 0, 0], ],
[ [0, 0, 0, 0], [0, 0, 0, 0], ],
]
'''
def cr_png(buf, width, height):
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) + chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
raw_data = (b"".join(b'\x00' + buf[span:span + (width * 4)]
for span in range((height - 1) * (width * 4), -1, - (width * 4))))
return b"".join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height,
8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
def make_buffer(image):
def bufgen(nested):
for i in nested[::-1]:
for j in i:
for k in j if len(j) == 4 else list(j) + [255]:
yield k
height, width = len(image), len(image[0])
return bytes(bufgen(image)), width, height
return cr_png(*make_buffer(image))
def show(png):
open("/tmp/test.png", "wb").write(png)
subprocess.getoutput("xdg-open /tmp/test.png")
subprocess.getoutput("rm /tmp/test.png")
|
caser789/xuejiao-blog | app/main/__init__.py | Python | mit | 219 | 0.013699 | from flask import Blueprint
main = Blueprint('main', __name__)
from . import errors, views
from ..models import Permission
@main.app_c | ontext_processor
def inject_permissions():
return dict(Permi | ssion=Permission)
|
croscon/fleaker | fleaker/marshmallow/json_schema.py | Python | bsd-3-clause | 7,584 | 0 | # ~*~ coding: utf-8 ~*~
"""Module that contains a Marshmallow schema that generate JSON schemas.
JSON Schemas can be a pain to write by hand. For example, the product
requirements change, thus your schema changes. If you are maintaining your
schemas by hand, you have to go through all of them and update them, or, even
worse, you just don't maintain them. With this class, you should never need to
hand write a JSON Schema again. Just pass your schema to it and it'll generate
it for you.
Example:
This module is super easy to use. All you need to do is pass a schema or
a Python path to a schema and this library will do the rest for you!
.. code-block:: python
# This is the schema we want to generate the schema for.
class UserSchema(Schema):
first_name = fields.String(**STR_REQUIRED)
last_name = fields.String(**STR_REQUIRED)
phone = PhoneNumberField(**REQUIRED)
company_id = ForeignKeyField(**REQUIRED)
joined = PendulumField(format='iso', **REQUIRED)
last_login = ArrowField(allow_none=True, format='iso')
class Meta(object):
# This will dictate the filename that this schema will be
# dumped to. If not provided, the filename will be
# UserSchema.json
json_schema_filename = 'user.json'
# You can dump the schema to a file in | a folder
json_schema = FleakerJSONSchema.write_schema_to_file(
# This library doesn't care if the schema has been initialized
UserSchema,
| # The folder to write this schema to
folder='docs/raml/schemas',
# The context can control certain things about how the schema will
# be dumped.
context={'dump_schema': True}
)
# Now, you can find the dumped schema in docs/raml/schemas/user.json
# You also have the end result stored in the json_schema variable
# If you'd like for fine grained control over the filename or want to
# use the file object further, a file pointer can be passed to the
# creation method.
with open('user_schema.json', 'w') as fp:
FleakerJSONSchema.write_schema_to_file(UserSchema, file_pointer=fp)
# Maybe you just want the schema in dict form. Super easy.
json_schema = FleakerJSONSchema.generate_json_schema(
# For all creation methods in this module can be loaded either by
# the instance/class of the schema or by passing a Python path to
# it, like so.
'app.schemata.user.UserSchema'
)
"""
import decimal
import json
import os.path
from inspect import isclass
from importlib import import_module
from sys import stdout
from marshmallow import Schema
from marshmallow_jsonschema import JSONSchema
from marshmallow_jsonschema.base import TYPE_MAP
from fleaker._compat import string_types
from fleaker.constants import DEFAULT_DICT, MISSING
# Update the built in TYPE_MAP to match our style better
TYPE_MAP.update({
int: {
'type': 'integer',
},
float: {
'type': 'number',
},
decimal.Decimal: {
'type': 'number',
},
})
class FleakerJSONSchema(JSONSchema):
"""Marshmallow schema that can be used to generate JSON schemas."""
@classmethod
def generate_json_schema(cls, schema, context=DEFAULT_DICT):
"""Generate a JSON Schema from a Marshmallow schema.
Args:
schema (marshmallow.Schema|str): The Marshmallow schema, or the
Python path to one, to create the JSON schema for.
Keyword Args:
file_pointer (file, optional): The path or pointer to the file
to write this schema to. If not provided, the schema will be
dumped to ``sys.stdout``.
Returns:
dict: The JSON schema in dictionary form.
"""
schema = cls._get_schema(schema)
# Generate the JSON Schema
return cls(context=context).dump(schema).data
@classmethod
def write_schema_to_file(cls, schema, file_pointer=stdout,
folder=MISSING, context=DEFAULT_DICT):
"""Given a Marshmallow schema, create a JSON Schema for it.
Args:
schema (marshmallow.Schema|str): The Marshmallow schema, or the
Python path to one, to create the JSON schema for.
Keyword Args:
file_pointer (file, optional): The pointer to the file to write
this schema to. If not provided, the schema will be dumped to
``sys.stdout``.
folder (str, optional): The folder in which to save the JSON
schema. The name of the schema file can be optionally
controlled my the schema's ``Meta.json_schema_filename``. If
that attribute is not set, the class's name will be used for
the filename. If writing the schema to a specific file is
desired, please pass in a ``file_pointer``.
context (dict, optional): The Marshmallow context to be pushed to
the schema generates the JSONSchema.
Returns:
dict: The JSON schema in dictionary form.
"""
schema = cls._get_schema(schema)
json_schema = cls.generate_json_schema(schema, context=context)
if folder:
schema_filename = getattr(
schema.Meta,
'json_schema_filename',
'.'.join([schema.__class__.__name__, 'json'])
)
json_path = os.path.join(folder, schema_filename)
file_pointer = open(json_path, 'w')
json.dump(json_schema, file_pointer, indent=2)
return json_schema
@classmethod
def _get_schema(cls, schema):
"""Method that will fetch a Marshmallow schema flexibly.
Args:
schema (marshmallow.Schema|str): Either the schema class, an
instance of a schema, or a Python path to a schema.
Returns:
marshmallow.Schema: The desired schema.
Raises:
TypeError: This is raised if the provided object isn't
a Marshmallow schema.
"""
if isinstance(schema, string_types):
schema = cls._get_object_from_python_path(schema)
if isclass(schema):
schema = schema()
if not isinstance(schema, Schema):
raise TypeError("The schema must be a path to a Marshmallow "
"schema or a Marshmallow schema.")
return schema
@staticmethod
def _get_object_from_python_path(python_path):
"""Method that will fetch a Marshmallow schema from a path to it.
Args:
python_path (str): The string path to the Marshmallow schema.
Returns:
marshmallow.Schema: The schema matching the provided path.
Raises:
TypeError: This is raised if the specified object isn't
a Marshmallow schema.
"""
# Dissect the path
python_path = python_path.split('.')
module_path = python_path[:-1]
object_class = python_path[-1]
if isinstance(module_path, list):
module_path = '.'.join(module_path)
# Grab the object
module = import_module(module_path)
schema = getattr(module, object_class)
if isclass(schema):
schema = schema()
return schema
|
stonebig/keras | keras/layers/convolutional.py | Python | mit | 5,924 | 0.011648 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
from theano.tensor.signal import downsamp | le
from .. import activations, i | nitializations
from ..utils.theano_utils import shared_zeros
from ..layers.core import Layer
class Convolution1D(Layer):
def __init__(self, nb_filter, stack_size, filter_length,
init='uniform', activation='linear', weights=None,
border_mode='valid', subsample_length=1,
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
nb_row = 1
nb_col = filter_length
self.nb_filter = nb_filter
self.stack_size = stack_size
self.filter_length = filter_length
self.subsample_length = subsample_length
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = (1, subsample_length)
self.border_mode = border_mode
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
conv_out = theano.tensor.nnet.conv.conv2d(X, self.W,
border_mode=self.border_mode, subsample=self.subsample)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
def get_config(self):
return {"name":self.__class__.__name__,
"nb_filter":self.nb_filter,
"stack_size":self.stack_size,
"filter_length":self.filter_length,
"init":self.init.__name__,
"activation":self.activation.__name__,
"border_mode":self.border_mode,
"subsample_length":self.subsample_length}
class MaxPooling1D(Layer):
def __init__(self, pool_length=2, ignore_border=True):
self.pool_length = pool_length
self.poolsize = (1, pool_length)
self.ignore_border = ignore_border
self.input = T.tensor4()
self.params = []
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"pool_length":self.pool_length,
"ignore_border":self.ignore_border}
class Convolution2D(Layer):
def __init__(self, nb_filter, stack_size, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', subsample=(1, 1),
W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
super(Convolution2D,self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = subsample
self.border_mode = border_mode
self.nb_filter = nb_filter
self.stack_size = stack_size
self.nb_row = nb_row
self.nb_col = nb_col
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
if W_regularizer:
W_regularizer.set_param(self.W)
self.regularizers.append(W_regularizer)
if b_regularizer:
b_regularizer.set_param(self.b)
self.regularizers.append(b_regularizer)
if activity_regularizer:
activity_regularizer.set_layer(self)
self.regularizers.append(activity_regularizer)
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
conv_out = theano.tensor.nnet.conv.conv2d(X, self.W,
border_mode=self.border_mode, subsample=self.subsample)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
def get_config(self):
return {"name":self.__class__.__name__,
"nb_filter":self.nb_filter,
"stack_size":self.stack_size,
"nb_row":self.nb_row,
"nb_col":self.nb_col,
"init":self.init.__name__,
"activation":self.activation.__name__,
"border_mode":self.border_mode,
"subsample":self.subsample}
class MaxPooling2D(Layer):
def __init__(self, poolsize=(2, 2), ignore_border=True):
super(MaxPooling2D,self).__init__()
self.input = T.tensor4()
self.poolsize = poolsize
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"poolsize":self.poolsize,
"ignore_border":self.ignore_border}
# class ZeroPadding2D(Layer): TODO
# class Convolution3D: TODO
# class MaxPooling3D: TODO
|
andhit-r/odoo-saas-tools | saas_portal/models/saas_portal.py | Python | lgpl-3.0 | 18,733 | 0.00315 | # -*- coding: utf-8 -*-
import openerp
from openerp import models, fields, api, SUPERUSER_ID, exceptions
from openerp.addons.saas_utils import connector, database
from openerp import http
from openerp.tools import config, scan_languages
from openerp.tools.translate import _
from openerp.addons.base.res.res_partner import _tz_get
import time
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from oauthlib import common as oauthlib_common
import urllib2
import simplejson
import werkzeug
import requests
import random
import logging
_logger = logging.getLogger(__name__)
class SaasPortalServer(models.Model):
_name = 'saas_portal.server'
_description = 'SaaS Server'
_rec_name = 'name'
_inherit = ['mail.thread']
_inherits = {'oauth.application': 'oauth_application_id'}
name = fields.Char('Database name')
oauth_application_id = fields.Many2one('oauth.application', 'OAuth Application', required=True, ondelete='cascade')
sequence = fields.Integer('Sequence')
active = fields.Boolean('Active', default=True)
request_scheme = fields.Selection([('http', 'http'), ('https', 'https')], 'Scheme', default='http', required=True)
verify_ssl = | fields.Boolean('Verify SSL', default=True, help="verify SSL certificates for HTTPS requests, just like a web browser")
request_port = fields.Integer('Request Port', default=80)
client_ids = fields.One2many('saas_portal.client', 'server_id', st | ring='Clients')
@api.model
def create(self, vals):
self = super(SaasPortalServer, self).create(vals)
self.create_access_token(self.oauth_application_id.id)
return self
@api.model
def create_access_token(self, oauth_application_id):
expires = datetime.now() + timedelta(seconds=60*60)
vals = {
'user_id': self.env.user.id,
'scope': 'userinfo',
'expires': expires.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'token': oauthlib_common.generate_token(),
'application_id': oauth_application_id
}
return self.env['oauth.access_token'].create(vals)
@api.one
def _request_params(self, path='/web', scheme=None, port=None, state={}, scope=None, client_id=None):
scheme = scheme or self.request_scheme
port = port or self.request_port
scope = scope or ['userinfo', 'force_login', 'trial', 'skiptheuse']
scope = ' '.join(scope)
client_id = client_id or self.env['saas_portal.client'].generate_client_id()
params = {
'scope': scope,
'state': simplejson.dumps(state),
'redirect_uri': '{scheme}://{saas_server}:{port}{path}'.format(scheme=scheme, port=port, saas_server=self.name, path=path),
'response_type': 'token',
'client_id': client_id,
}
return params
@api.one
def _request(self, **kwargs):
params = self._request_params(**kwargs)[0]
url = '/oauth2/auth?%s' % werkzeug.url_encode(params)
return url
@api.one
def _request_server(self, path=None, scheme=None, port=None, **kwargs):
scheme = scheme or self.request_scheme
port = port or self.request_port
params = self._request_params(**kwargs)[0]
access_token = self.env['oauth.access_token'].sudo().search([('application_id', '=', self.oauth_application_id.id)], order='id DESC', limit=1)
access_token = access_token[0].token
params.update({
'token_type': 'Bearer',
'access_token': access_token,
'expires_in': 3600,
})
url = '{scheme}://{saas_server}:{port}{path}?{params}'.format(scheme=scheme, saas_server=self.name, port=port, path=path, params=werkzeug.url_encode(params))
return url
@api.multi
def action_redirect_to_server(self):
r = self[0]
url = '{scheme}://{saas_server}:{port}{path}'.format(scheme=r.request_scheme, saas_server=r.name, port=r.request_port, path='/web')
return {
'type': 'ir.actions.act_url',
'target': 'new',
'name': 'Redirection',
'url': url
}
@api.model
def action_sync_server_all(self):
self.search([]).action_sync_server()
@api.one
def action_sync_server(self):
state = {
'd': self.name,
'client_id': self.client_id,
}
url = self._request_server(path='/saas_server/sync_server', state=state, client_id=self.client_id)[0]
res = requests.get(url, verify=(self.request_scheme == 'https' and self.verify_ssl))
if res.ok != True:
msg = """Status Code - %s
Reason - %s
URL - %s
""" % (res.status_code, res.reason, res.url)
raise Warning(msg)
data = simplejson.loads(res.text)
for r in data:
r['server_id'] = self.id
client = self.env['saas_portal.client'].search([
('client_id', '=', r.get('client_id')),
])
if not client:
database = self.env['saas_portal.database'].search([('client_id', '=', r.get('client_id'))])
if database:
database.write(r)
continue
client = self.env['saas_portal.client'].create(r)
else:
client.write(r)
return None
@api.model
def get_saas_server(self):
saas_server_list = self.env['saas_portal.server'].sudo().search([])
return saas_server_list[random.randint(0, len(saas_server_list) - 1)]
class SaasPortalPlan(models.Model):
_name = 'saas_portal.plan'
name = fields.Char('Plan', required=True)
summary = fields.Char('Summary')
template_id = fields.Many2one('saas_portal.database', 'Template')
demo = fields.Boolean('Install Demo Data')
def _get_default_lang(self):
return self.env.lang
def _default_tz(self):
return self.env.user.tz
lang = fields.Selection(scan_languages(), 'Language', default=_get_default_lang)
tz = fields.Selection(_tz_get, 'TimeZone', default=_default_tz)
sequence = fields.Integer('Sequence')
state = fields.Selection([('draft', 'Draft'), ('confirmed', 'Confirmed')],
'State', compute='_get_state', store=True)
expiration = fields.Integer('Expiration (hours)', help='time to delete database. Use for demo')
required_addons_ids = fields.Many2many('ir.module.module',
relation='plan_required_addons_rel',
column1='plan_id', column2='module_id',
string='Required Addons')
optional_addons_ids = fields.Many2many('ir.module.module',
relation='plan_optional_addons_rel',
column1='plan_id', column2='module_id',
string='Optional Addons')
_order = 'sequence'
dbname_template = fields.Char('DB Names', help='Template for db name. Use %i for numbering. Ignore if you use manually created db names', placeholder='crm-%i.odoo.com')
server_id = fields.Many2one('saas_portal.server', string='SaaS Server',
help='User this saas server or choose random')
website_description = fields.Text('Website description')
logo = fields.Binary('Logo')
@api.one
@api.depends('template_id.state')
def _get_state(self):
if self.template_id.state == 'template':
self.state = 'confirmed'
else:
self.state = 'draft'
@api.one
def _new_database_vals(self, vals):
if self.expiration:
now = datetime.now()
delta = timedelta(hours=self.expiration)
vals['expiration_datetime'] = (now + delta).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return vals
@api.one
def create_new_database(self, dbname=None, client_id=None, partner_id=None):
server = self.server_id
if not server:
|
softcert/vsroom | vsroom/common/sanitizers/tallinnairport.py | Python | mit | 1,903 | 0.008408 | import time
from vsroom.common import sanitizer
from vsroom.common import timeconversion
# sanitizer.Sanitizer is the base class for a simple sanitizer bot.
class TallinnAirportSanitizer(sanitizer.Sanitizer):
# .sanitize(event) is the hook method for sanitizing events. This
# is the only method you have to implement to create a basic
# normalizer, sanitizer, modifier or filter.
def sanitize(self, event):
# if only one key (id) -> clearing event. No use to
# add sanitized stuff
if len(event.keys()) < 2:
return [event]
# Modify and create events here.
utilization = event.value('utilization',None)
try:
uvalue = float(utilization)
except ValueError, e:
pass
else:
event.clear('status')
event.clear('problem')
if uvalue >99:
event.add('status','0')
elif uvalue > 93:
event.add('problem', '10')
event.add('status', '10')
else:
event.add('problem', '50')
event.add('status', '50')
event.add("sector","transport")
event.add("organization", "TLL")
event.add("type", "utilization")
event.add("asset","TLL")
event.add("event type", "utilization")
isotime = timeconversion.seconds2iso(time.time())
description | = "%s - %s - utilization: %s%% - %s" % \
(isotime,
event.value('organization',''),
utilization,
event.value('sou | rce','-'))
event.add("description", description)
# Return a list of events here. The list can contain 0-n events.
return [event]
if __name__ == "__main__":
# Execute the sanitizer bot based on the command line options.
TallinnAirportSanitizer.from_command_line().execute()
|
AdvancedClimateSystems/python-modbus | tests/system/validators.py | Python | mpl-2.0 | 2,302 | 0 | import struct
def validate_transaction_id(request_mbap, response):
""" Check if Transaction id in request and response is equal. """
assert struct.unpack('>H', request_mbap[:2]) == \
struct.unpack('>H', response[:2])
def validate | _protocol_id(request_mbap, response):
""" Check if Protocol id in request and response is e | qual. """
assert struct.unpack('>H', request_mbap[2:4]) == \
struct.unpack('>H', response[2:4])
def validate_length(response):
""" Check if Length field contains actual length of response. """
assert struct.unpack('>H', response[4:6])[0] == len(response[6:])
def validate_unit_id(request_mbap, response):
""" Check if Unit id in request and response is equal. """
assert struct.unpack('>B', request_mbap[6:7]) == \
struct.unpack('>B', response[6:7])
def validate_response_mbap(request_mbap, response):
""" Validate if fields in response MBAP contain correct values. """
validate_transaction_id(request_mbap, response)
validate_protocol_id(request_mbap, response)
validate_length(response)
validate_unit_id(request_mbap, response)
def validate_function_code(request, response):
""" Validate if Function code in request and response equal. """
assert struct.unpack('>B', request[7:8])[0] == \
struct.unpack('>B', response[7:8])[0]
def validate_single_bit_value_byte_count(request, response):
""" Check of byte count field contains actual byte count and if byte count
matches with the amount of requests quantity.
"""
byte_count = struct.unpack('>B', response[8:9])[0]
quantity = struct.unpack('>H', request[-2:])[0]
expected_byte_count = quantity // 8
if quantity % 8 != 0:
expected_byte_count = (quantity // 8) + 1
assert byte_count == len(response[9:])
assert byte_count == expected_byte_count
def validate_multi_bit_value_byte_count(request, response):
""" Check of byte count field contains actual byte count and if byte count
matches with the amount of requests quantity.
"""
byte_count = struct.unpack('>B', response[8:9])[0]
quantity = struct.unpack('>H', request[-2:])[0]
expected_byte_count = quantity * 2
assert byte_count == len(response[9:])
assert byte_count == expected_byte_count
|
dherbst/python-icontact | icontact/client.py | Python | apache-2.0 | 23,268 | 0.004255 | # Copyright 2008 Online Agility (www.onlineagility.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from django.utils import simplejson
except ImportError:
import simplejson
import httplib
import urllib
import urllib2
import urlparse
import logging
from datetime import tzinfo, timedelta
# python 2.5+ has ElementTree included in it's core
try:
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
except ImportError:
from elementtree import ElementTree
from elementtree.ElementTree import Element, SubElement
from dateutil.parser import parse
def json_to_obj(json):
if isinstance(json, list):
json = [json_to_obj(x) for x in json]
if not isinstance(json, dict):
return json
class Object(object):
def __repr__(self):
return 'icontact.client.Object(%s)' % repr(self.__dict__)
o = Object()
for k in json:
o.__dict__[k] = json_to_obj(json[k])
return o
class ExcessiveRetriesException(Exception):
"""
A standard exception that represents a potentially transient fault
where an an iContact API client fails to perform an operation more
than `self.max_retry_count` times.
"""
pass
class IContactServerError(Exception):
def __init__(self, http_status, errors):
self.http_status = http_status
self.errors = errors
def __str__(self):
return '%s: %s' % (self.http_status, '\n'.join(self.errors))
class IContactClient(object):
"""Perform operations on the iContact API."""
ICONTACT_API_URL = 'https://app.icontact.com/icp/'
ICONTACT_SANDBOX_API_URL = 'https://app.sandbox.icontact.com/icp/'
NAMESPACE = 'http://www.w3.org/1999/xlink'
def __init__(self, api_key, username, password, auth_handler=None,
max_retry_count=5, account_id=None, client_folder_id=None, url=ICONTACT_API_URL):
"""
- api_key: the API Key assigned for the OA iContact client
- username: the iContact web site login username
- password:
This is the password registered for the API client, also known
as the "API Application Password". It is *not* the standard
web site login password.
- max_retry_count: (Optional) Retry limit for logins or
rate-limited operations.
- auth_handler: (Optional) An object that implements two callback
methods that this client will invoke when it generates, or
requires, authentication credentials. The authentication handler
object can be used to easily share credentials among multiple
IContactClient instances.
The authentication handler object must implement credential
getter and setter methods::
get_credentials() => (token,sequence)
set_credentials(token,sequence)
"""
self.api_key = api_key
self.api_version = "2.2"
self.username = username
self.password = password
self.auth_handler = auth_handler
self.log = logging.getLogger('icontact')
self.max_retry_count = max_retry_count
self.account_id = account_id
self.client_folder_id = client_folder_id
# Track number of retries we have performed
self.retry_count = 0
self.url = url
def _get_account_id(self):
self.account_id = self.account().accountId
return self.account_id
def _get_client_folder_id(self):
self.client_folder_id = self.clientfolder(self.account_id).clientFolderId
return self.client_folder_id
def _do_request(self, call_path, parameters={}, method='get', type='json'):
"""
Performs an API request and returns the resultant json object.
If type='xml' is passed in, returns XML document as an
xml.etree.ElementTree node. An Exception is thrown if the operation
results in an error response from iContact, or if there is no
authentication information available (ie login has not been called)
This method does all the hard work for API operations: building the
URL path; adding auth headers; sending the request to iContact;
evaluating the response; and parsing the respones to an XML node.
"""
# Check whether this method call was a retry that exceeds the retry limit
if self.retry_count > self.max_retry_count:
raise ExcessiveRetriesException("Exceeded maximum retry count (%d)" % self.max_retry_count)
params = dict(parameters)
data = None
if method.lower() == 'get' and len(params) > 0:
url = "%s%s?%s" % (self.url, call_path, urllib.urlencode(params))
else:
url = "%s%s" % (self.url, call_path)
data = simplejson.dumps(params)
self.log.debug(u"Invoking API method %s with URL: %s" % (method, url))
if type == 'xml':
type_header = 'text/xml'
else:
type_header = 'application/json'
headers = {'Accept':type_header,
'Content-Type':type_header,
'Api-Version':self.api_version,
'Api-AppId':self.api_key,
'Api-Username':self.username,
'API-Password':self.password }
# TODO: try request for urllib2.HTTPError for 503 to do rate limit retry
if method.lower() != 'get':
# Perform a PUT request
self.log.debug(u'%s Request %s body: %s' % (method, url, data))
scheme, host, path, params, query, fragment = urlparse.urlparse(url)
conn = httplib.HTTPSConnection(host, 443)
conn.request(method.upper(), path , data, headers)
response = conn.getresponse()
self.log.debug("response.status=%s msg=%s headers=%s" %
(response.status, response.msg, response.getheaders(),))
| response_status = response.status
else:
# Perform a GET request
req = urllib2.Request(url, None, headers)
self.log.debug("GET headers=%s url=%s" % (req.headers,url))
response = urllib2.urlopen(req)
response_status = response.code
if type == 'xml':
result = ElementTree.fromstring(response.read())
self.log.debug(u'Response body:\n%s' % (ElementTree.tostring(result),))
else: |
# type is json
jsondata = response.read()
self.log.debug(u"json response=\n%s" % (jsondata,))
result = simplejson.loads(jsondata)
result = json_to_obj(result)
if response_status >= 400:
raise IContactServerError(response_status, result.errors)
# Reset retry count to 0 since we have a successful response
self.retry_count = 0
return result
def _get_query_string(self, params={}):
if params:
query_string = '?' + '&'.join([k+'='+urllib.quote(str(v)) for (k,v) in params.items()])
else:
query_string = ''
return query_string
def _parse_stats(self, node):
"""
Parses statistics information from a 'stats' XML node that will
be present in an iContact API response to the
message_delivery_details and message_stats methods. The parsed
information is returned as a dictionary of dictionaries.
"""
def summary_to_dict(stats_node):
if stats_node == None:
return None
summary = dict(
count=int(stats_node.get('count') or '0'),
percent=float( |
dts-ait/qgis-edge-bundling | summarize.py | Python | gpl-2.0 | 4,463 | 0.015685 | ##Edge bundling=group
##input_layer=vector
##cluster_field=field input_layer
##weight_field=field input_layer
##use_weight_field=boolean true
##max_distance=number 0.008
##collapsed_lines=output vector
from qgis.core import *
from qgis.gui import *
import qgis.utils
from PyQt4.QtCore import *
import processing
from processing.tools.vector import VectorWriter
from datetime import datetime
class Edge(QgsFeature):
def __init__(self, feature, weight=1):
super(Edge,self).__init__(feature)
if use_weight_field:
self.weight = float(self.attributes()[weight_index]) #sel | f[weight_field]
else:
self.weight = weight
self.agg_weight = self.weight
def increase_weight(self,value=1):
self.agg_weight += value
def get_weight(self):
return self.weight
def get_agg_weight(self):
return self.agg_weight
class EdgeCluster():
def __init__(self,edges):
self.edges | = edges
self.index = QgsSpatialIndex()
for e in self.edges:
self.index.insertFeature(e)
self.allfeatures = {edge.id(): edge for (edge) in self.edges}
def get_size(self):
return len(self.edges)
def collapse_lines(self):
ids_to_delete = []
for edge1 in self.edges:
geom1 = edge1.geometry()
# get other edges in the vicinty
tolerance = min(max_distance,geom1.length()/2)
ids = self.index.intersects(edge1.geometry().buffer(tolerance,4).boundingBox())
for id in ids:
edge2 = self.allfeatures[id]
if edge1.id()>edge2.id():
geom2 = edge2.geometry()
d0 = geom1.vertexAt(0).distance(geom2.vertexAt(0))
d1 = geom1.vertexAt(1).distance(geom2.vertexAt(1))
distance = d0 + d1
if d0 <= (tolerance/2) and d1 <= (tolerance/2):
edge1.increase_weight(edge2.get_weight())
edge2.increase_weight(edge1.get_weight())
return ids_to_delete
t_start = datetime.now()
print '{0}: Collapsing lines'.format(t_start)
layer = processing.getObject(input_layer)
crs = layer.crs()
provider = layer.dataProvider()
fields = provider.fields()
fields.append(QgsField('SEG_ID',QVariant.Int))
fields.append(QgsField('MERGED_N', QVariant.Double))
writer = processing.VectorWriter(collapsed_lines, None, fields, QGis.WKBLineString, crs)
features = list(processing.features(layer))
weight_index = provider.fieldNameIndex(weight_field)
# get all labels from the input features
labels = []
for feat in features:
labels.append(int(feat[cluster_field]))
# one cluster per label
clusters = []
for l in range(0,max(labels)+1):
clusters.append(list())
# populate clusters
vl = QgsVectorLayer("LineString", "line_segments", "memory")
pr = vl.dataProvider()
pr.addAttributes(fields)
vl.updateFields()
feature_id = 0
#all_segments = []
for i,label in enumerate(labels):
attrs = features[i].attributes()
polyline = features[i].geometry().asPolyline()
fet = QgsFeature()
for j in range(0,len(polyline)-1):
g = QgsGeometry.fromPolyline([polyline[j],polyline[j+1]])
fet.setGeometry(g)
fet.setAttributes(attrs+[j])
fet.setFeatureId(feature_id)
feature_id += 1
edge = Edge(fet)
#all_segments.append(edge)
if label >= 0:
clusters[label].append(edge)
else:
clusters.append(edge)
#pr.addFeatures(all_segments)
for i,cluster in enumerate(clusters):
clusters[i] = EdgeCluster(cluster)
# collapse lines
ids_to_delete = []
for i,cluster in enumerate(clusters):
print 'Cluster #{0} (size: {1})'.format(i,cluster.get_size())
ids_to_delete += cluster.collapse_lines()
# create output
for cluster in clusters:
for g,edge in enumerate(cluster.edges):
#if edge.id() not in ids_to_delete:
fet = QgsFeature()
fet.setGeometry( edge.geometry() )
attrs = edge.attributes()
attrs.append(int(edge.get_agg_weight()))
fet.setAttributes( attrs )
writer.addFeature( fet )
del writer
t_end = datetime.now()
print '{0}: Finished!'.format(t_end)
print 'Run time: {0}'.format(t_end - t_start) |
rodrigopitanga/django-smssync | smssync/__init__.py | Python | gpl-3.0 | 772 | 0 | # -*- coding: utf-8 -*-
#
# (C) 2016 Rodrigo Rodrigues da Silva <pitanga@members.fsf.org>
#
# This file is part of django- | smssync
#
# django-smssync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# d | jango-smssync is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-smssync. If not, see <http://www.gnu.org/licenses/>.
|
aammd/python-package-example | myPackage/__init__.py | Python | mit | 22 | 0.045455 | from . | import myModul | e |
rosecatherinek/TransNets | src/DatasetUtils/DataPairMgr.py | Python | gpl-3.0 | 1,772 | 0.02088 | '''
read the input data, parse to int list;
create mappings of (user,item) -> review int list
@author: roseck
@date Mar 15, 2017
'''
from __builtin__ import dict
import gzip
class DataPairMgr():
def _int_list(self,int_str):
'''utility fn for converting an int string to a list of int
'''
return [int(w) for w in int_str.split()]
def __init__(self, filename):
'''
filename: inits the UBRR data from the input file
'''
ub_map = dict()
ub_ratings = dict()
cnt = 0
#read the file
if filename.endswith('.gz'):
f = gzip.open(filename, 'r')
else:
f = open(filename, 'r')
for line in f:
vals = line.split("\t")
if len(vals) == 0:
continue
u = vals[0]
b = vals[1]
r = float(vals[2])
d = vals[3].strip()
ub_map[(u,b)] = self._int_list(d)
ub_ratings[(u,b)] = r
cnt += 1
self.user_item_map = ub_map
self.user_item_rating = ub_ratings
f.close()
print 'Data Pair Manager Initialized with ', cnt, ' reviews'
def get_int_review(self, user, item):
if (user,item) in self.user_item_map:
retur | n self.user_item_map[(user,item)]
else:
return [0]
def get_int_review_rating(self, user, item):
if (user,item) in self.user_item_map:
return self.user_item_map[(user,item)], self.user_item_rating[(user,item)]
else:
return [0], 3.0 #average rating
| |
michaelkuty/horizon-contrib | horizon_contrib/utils/__init__.py | Python | bsd-3-clause | 79 | 0 | from horiz | on_contrib.utils.dotdict import dotdict, | list_to_dotdict, to_dotdict
|
dimagi/commcare-hq | corehq/motech/fhir/tests/test_models.py | Python | bsd-3-clause | 21,406 | 0.00028 | import doctest
from contextlib import contextmanager
from django.core.exceptions import ValidationError
from django.db import IntegrityError, transaction
from django.db.models import ProtectedError
from django.test import TestCase
from nose.tools import assert_in
from corehq.apps.data_dictionary.models import CaseProperty, CaseType
from corehq.apps.users.models import CommCareUser
from corehq.motech.const import IMPORT_FREQUENCY_DAILY
from corehq.motech.exceptions import ConfigurationError
from corehq.motech.fhir import models
from corehq.motech.models import ConnectionSettings
from corehq.motech.value_source import CaseProperty as CasePropertyValueSource
from corehq.motech.value_source import ValueSource
from ..const import (
FHIR_VERSION_4_0_1,
OWNER_TYPE_GROUP,
OWNER_TYPE_LOCATION,
OWNER_TYPE_USER,
)
from ..models import (
FHIRImportConfig,
FHIRImportResourceProperty,
FHIRImportResourceType,
FHIRResourceProperty,
FHIRResourceType,
ResourceTypeRelationship,
)
DOMAIN = 'test-domain'
class TestCaseWithConnectionSettings(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.conn = ConnectionSettings.objects.create(
domain=DOMAIN,
name='Test ConnectionSettings',
url='https://example.com/api/',
)
@classmethod
def tearDownClass(cls):
cls.conn.delete()
super().tearDownClass()
class TestFHIRImportConfig(TestCaseWithConnectionSettings):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = CommCareUser(
domain=DOMAIN,
username=f'bob@{DOMAIN}.commcarehq.org',
)
cls.user.save()
@classmethod
def tearDownClass(cls):
cls.user.delete(DOMAIN, deleted_by=None)
super().tearDownClass()
def test_connection_settings_null(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
with self.assertRaises(IntegrityError), \
transaction.atomic():
import_config.save()
def test_connection_settings_protected(self):
import_config = FHIRImportConfig.objects.create(
domain=DOMAIN,
connection_settings=self.conn,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
self.addCleanup(import_config.delete)
with self.assertRaises(ProtectedError):
self.conn.delete()
def test_fhir_version_good(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
fhir_version=FHIR_VERSION_4_0_1,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
import_config.full_clean()
def test_fhir_version_bad(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
fhir_version='1.0.2',
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
def test_frequency_good(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
frequency=IMPORT_FREQUENCY_DAILY,
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
import_config.full_clean()
def test_frequency_bad(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
frequency='annually',
owner_id=self.user.user_id,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
def test_owner_id_missing(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ValidationError):
import_config.full_clean()
def test_owner_id_too_long(self):
uuid = '4d4e6255-2139-49e0-98e9-9418e83a4944'
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id=uuid + 'X',
owner_type=OWNER_TYPE_USER,
)
try:
import_config.full_clean()
except ValidationError as err:
errors = err.message_dict['owner_id']
self.assertEqual(
errors,
['Ensure this value has at most 36 characters (it has 37).'],
)
class TestFHIRImportConfigGetOwner(TestCaseWithConnectionSettings):
def test_owner_type_missing(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0b',
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_owner_type_bad(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0b',
owner_type='0rgunit',
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_user_does_not_exist(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0b',
owner_type=OWNER_TYPE_USER,
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_group_does_not_exist(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='the-clan-mcb0b',
owner_type=OWNER_TYPE_GROUP,
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
def test_location_does_not_exist(self):
import_config = FHIRImportConfig(
domain=DOMAIN,
connection_settings=self.conn,
owner_id='b0bton',
owner_type=OWNER_TYPE_LOCATION,
)
with self.assertRaises(ConfigurationError):
import_config.get_owner()
class TestCaseWithReferral(TestCaseWithConnectionSettings):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.import_config = FHIRImportConfig.objects.create(
domain=DOMAIN,
connection_settings=cls.conn,
owner_id='b0b',
)
cls.referral = CaseType.objects.create(
domain=DOMAIN,
name='referral',
)
@classmethod
def tearDownClass(cls):
cls.referral.delete()
cls.import_config.delete()
super().tearDownClass()
class TestFHIRImportResourceType(TestCaseWithReferral): |
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mother = CaseType.objects.create(
domain=DOMAIN,
name='mother',
)
@classmethod
def tearDownClass(cls):
cls.mother.delete()
super().tearDownClass()
def test_search_params_empty(self):
service_request = FHIRImportResourceType.objects.create(
| import_config=self.import_config,
name='ServiceRequest',
case_type=self.referral,
)
self.assertEqual(service_request.search_params, {})
def test_related_resource_types(self):
service_request = FHIRImportResourceType.objects.create(
import_config=self.import_config,
name='ServiceRequest',
case_type=self.referral,
)
patient = FHIRImportResourceType.objects.create(
import_config=self.import_config,
name='Patient',
case_type=self.mother,
)
ResourceTypeRelationship. |
darrylcousins/django-autocomplete | django_autocomplete/meta.py | Python | apache-2.0 | 1,979 | 0.002021 | # -*- coding: utf-8 -*-
class AutocompleteMeta:
"""
Simple meta class to allow the model to define aspects of the autocomplete.
:var name: used for the named url
:var path: the path to autocomplete view
:var follow_fks: when searching should ForeignKey fields be followed.
:var fields: list of fields, if empty then all searchable fields are used
:var permissions: bool, string or iter
* if ``permissions`` ``False`` (default) no authentication is checked.
* if ``permissions`` ``True`` then request.user must be authenticated.
* if ``permissions`` ``string`` then request.user must have the permission defined by ``string``.
* if ``permissions`` ``iter`` then request.user must have all the permissi | onis defined in the ``iter``
See :class:`django_autocomplete.views.AutocompleteView` for more clarification.
For example as a simple object:
>>> from django_autocomplete.meta import AutocompleteMeta
>>> class TestModel(object):
... autocomplete = AutocompleteMeta(
... name='silly',
... path='api/filt | er/silly',
... )
The model autocomplete configures the model for use:
>>> m = TestModel()
>>> m.autocomplete
<django_autocomplete.meta.AutocompleteMeta object at 0x...>
>>> m.autocomplete.path
'api/filter/silly'
>>> m.autocomplete.name
'silly'
>>> m.autocomplete.follow_fks
True
>>> m.autocomplete.fields
[]
"""
name = ''
path = ''
fields = []
permissions = None
follow_fks = True
def __init__(self, autocomplete=None, **kwargs):
if autocomplete:
autocomplete_attrs = autocomplete.__dict__
else:
autocomplete_attrs = kwargs
for attr in self.__class__.__dict__:
if attr in autocomplete_attrs:
self.__dict__[attr] = autocomplete_attrs[attr]
|
eduNEXT/edunext-platform | import_shims/lms/discussion/rest_api/tests/test_pagination.py | Python | agpl-3.0 | 446 | 0.008969 | """Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-s | uppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('discussion.rest_api.tests.test_pagination', 'lms.djangoapps.discussion.rest_ | api.tests.test_pagination')
from lms.djangoapps.discussion.rest_api.tests.test_pagination import *
|
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/klustaviewa/views/tests/utils.py | Python | gpl-3.0 | 5,313 | 0.005835 | """Utils for unit tests for views package."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import threading
import time
from qtools import QtGui, QtCore
from qtools import show_window
import mock_data as md
from klustaviewa.stats.correlograms import NCORRBINS_DEFAULT, CORRBIN_DEFAULT
from klustaviewa import USERPREF
import klustaviewa.views as v
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def assert_fun(statement):
assert statement
def get_data():
"""Return a dictionary with data variables, after the fixture setup
has been called."""
l = md.LOADER
# Get full data sets.
clusters_selected = [4, 2, 10]
l.select(clusters=clusters_selected)
data = dict(
clusters_selected=clusters_selected,
features=l.get_features(),
features_background=l.get_features_background(),
# features_full=l.get_features('all'),
masks=l.get_masks(),
waveforms=l.get_waveforms(),
clusters=l.get_clusters(),
cluster_colors=l.get_cluster_colors(),
cluster_colors_full=l.get_cluster_colors('all'),
cluster_groups=l.get_cluster_groups('all'),
group_colors=l.get_group_colors('all'),
group_names=l.get_group_names('all'),
cluster_sizes=l.get_cluster_sizes('all'),
# channel_names=l.get_channel_names(),
# channel_colors=l.self.kwa(),
# channel_groups=l.get_channel_groups(),
#
# channel_group_colors=l.get_channel_group_colors(),
# channel_group_names=l.get_channel_group_names(),
spiketimes=l.get_spiketimes(),
geometrical_positions=l.get_probe(),
freq=l.freq,
nchannels=l.nchannels,
nsamples=l.nsamples,
fetdim=l.fetdim,
nextrafet=l.nextrafet,
ncorrbins=NCORRBINS_DEFAULT, #l.ncorrbins,
duration=NCORRBINS_DEFAULT * CORRBIN_DEFAULT, #l.get_duration(),
)
return data
# -----------------------------------------------------------------------------
# View functions
# -----------------------------------------------------------------------------
def show_view(view_class, **kwargs):
operators = kwargs.pop('operators', None)
# Display a view.
class TestWindow(QtGui.QMainWindow):
operatorStarted = QtCore.pyqtSignal(int)
def __init__(self):
super(TestWindow, self).__init__()
self.setFocusPolicy(QtCore.Qt.WheelFocus)
self.setMouseTracking(True)
self.setWindowTitle("KlustaViewa")
self.view = view_class(self, getfocus=False)
self.view.set_data(**kwargs)
self.setCentralWidget(self.view)
self.move(100, 100)
self.show()
# Start "operator" asynchronously in the main thread.
if operators:
self.operator_list = operators
self.operatorStarted.connect(self.operator)
self._thread = threading.Thread(target=self._run_operator)
self._thread.start()
def _run_operator(self):
for i in xrange(len(self.operator_list)):
# Call asynchronously operation #i, after a given delay.
if type(self.operator_list[i]) == tuple:
dt = self.operator_list[i][1]
else:
# Default delay.
dt = USERPREF['test_operator_delay'] or .1
time.sleep(dt)
self.operatorStarted.emit(i)
def operator(self, i):
# Execute operation #i.
if type(self.operator_list[i]) == tuple:
fun = self.operator_list[i][0]
else:
fun = self.operator_list[i]
fun(self)
def keyPressEvent(self, e):
super(TestWindow, self).keyPressEvent(e)
self.view.keyPressEvent(e)
if e.key() == | QtCore.Qt.Key_Q:
self.close()
def keyReleaseEvent(self, e):
super(TestWindow, self).keyReleaseEvent(e)
self.view.keyReleaseEvent(e)
def closeEvent(self, e):
if operators:
self._thread.join()
return super(TestWindow, self).closeEvent(e)
window = show_window(TestWindow)
return window
# def show_waveformview(loader, cl | usters, **kwargs):
# loader.select(clusters=clusters)
# data = vd.get_waveformview_data(loader)
# kwargs.update(data)
# show_view(v.WaveformView, **kwargs)
# def show_featureview(loader, clusters, **kwargs):
# loader.select(clusters=clusters)
# data = vd.get_featureview_data(loader)
# kwargs.update(data)
# show_view(v.FeatureView, **kwargs)
|
SanketDG/networkx | networkx/algorithms/shortest_paths/weighted.py | Python | bsd-3-clause | 33,546 | 0.000865 | # -*- coding: utf-8 -*-
"""
Shortest path algorithms for weighed graphs.
"""
__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>',
'Loïc Séguin-C. <loicseguin@gmail.com>',
'Dan Schult <dschult@colgate.edu>',
'Niels van Adrichem <n.l.m.vanadrichem@tudelft.nl'])
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['dijkstra_path',
'dijkstra_path_length',
'bidirectional_dijkstra',
'single_source_dijkstra',
'single_source_dijkstra_path',
'single_source_dijkstra_path_length',
'all_pairs_dijkstra_path',
'all_pairs_dijkstra_path_length',
'dijkstra_predecessor_and_distance',
'bellman_ford',
'negative_edge_cycle',
'goldberg_radzik',
'johnson']
from collections import deque
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.utils import generate_unique_node
def dijkstra_path(G, source, target, weight='weight'):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path(G,0,4))
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
(length, path) = single_source_dijkstra(G, source, target=target,
weight=weight)
try:
return path[target]
except KeyError:
raise nx.NetworkXNoPath(
"node %s not reachable from %s" % (source, target))
def dijkstra_path_length(G, source, target, weight='weight'):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G,0,4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
if source == target:
return 0
if G.is_multigraph():
get_weight = lambda u, v, data: min(
eattr.get(weight, 1) for eattr in data.values())
else:
get_weight = lambda u, v, data: data.get(weight, 1)
length = _dijkstra(G, source, get_weight, target=target)
try:
return length[target]
except KeyError:
raise nx.NetworkXNoPath(
"node %s not reachable from %s" % (source, target))
def single_source_dijkstra_path(G, source, cutoff=None, weight='weight'):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.single_source_dijkstra_path(G,0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
( | length, path) = single_source_dijkstra(
G, source, cutoff=cutoff, weight=weight)
return path
def single_source_dijkstra_path_length(G, source, cutoff=None,
weight='weight'):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight' | )
Edge data key corresponding to the edge weight.
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
length : iterator
(target, shortest path length) iterator
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.single_source_dijkstra_path_length(G, 0))
>>> length[4]
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
if G.is_multigraph():
get_weight = lambda u, v, data: min(
eattr.get(weight, 1) for eattr in data.values())
else:
get_weight = lambda u, v, data: data.get(weight, 1)
return iter(_dijkstra(G, source, get_weight, cutoff=cutoff).items())
def single_source_dijkstra(G, source, target=None, cutoff=None, weight='weight'):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Dijkstra's algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance,path : dictionaries
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.single_source_dijkstra(G,0)
>>> print(length[4])
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path()
single_source_dijkstra_path_length()
"""
if source == target:
return ({source: 0}, {source: [source]})
if G.is_multigraph():
get_weight = lambda u, v, data: min(
eattr.get(weight, 1) for eattr in data.values())
else:
get_weight = lambda u, v, data: data.get(weight, 1)
paths = {source: [source]} # dictionary of paths
return (_dijkstra(G, source, get_weight, paths=paths, cutoff=cutoff,
target=target), paths)
def _dijkstra(G, source, get_weight, pred=None, paths=None, cutoff=None,
target=None):
"""Uses Dijkstra's algorithm to find shortest weighte |
BenMotz/cubetoolkit | toolkit/members/migrations/0006_auto_20180223_2138.py | Python | agpl-3.0 | 506 | 0.001976 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-2 | 3 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0005_trainingrecordtype'),
]
operations = [
migrations.AlterField(
model_name='volunteer',
name='portrait',
field=models.ImageField(blank=True, max_length=256, null=True, upload_to='volunteers'),
| ),
]
|
coders-creed/botathon | src/info/fetch_info.py | Python | mit | 2,889 | 0.023884 | # -*- coding: utf-8 -*-
# @Author: karthik
# @Date: 2016-12-10 21:40:07
# @Last Modified by: chandan
# @Last Modified time: 2016-12-11 12:55:27
from models.portfolio import Portfolio
from models.company import Company
from models.position import Position
import tenjin
from tenjin.helpers import *
import wikipedia
import matplotlib.pyplot as plt
from data_helpers import *
from stock_data import *
import BeautifulSoup as bs
import urllib2
import re
from datetime import date as dt
engine = tenjin.Engine(path=['templates'])
# info fetch handler
def send_info_handler(bot, update, args):
args = list(parse_args(args))
if len(args) == 0 or "portfolio" in [arg.lower() for arg in args] :
send_portfolio_info(bot, update)
else:
info_companies = get_companies(args)
send_companies_info(bot, update, info_companies)
# get portfolio function
def send_portfolio_info(bot, update):
print "Userid: %d requested portfolio information" %(update.message.chat_id)
context = {
'positions': Portfolio.instance.positions,
'wallet_value': Portfolio.instance.wallet_value,
}
html_str = engine.render('portfolio_info.pyhtml', context)
bot.sendMessage(parse_mode="HTML", chat_id=update.message.chat_id, text=html_str)
# get companies information
def send_companies_info(bot, update, companies):
print "Userid: requested information for following companies %s" %','.join([c.name for c in companies])
for company in companies:
context = {
'company': company,
| 'current_price': get_current_price(company),
'description': wikipedia.summary(company.name.split()[0], sentences=2)
}
wiki_page = wikipedia.page(company.name.split()[0])
html_page = urllib2.urlopen(wiki_page.url)
soup = bs.BeautifulSoup(html_page)
img_url = 'http:' + soup.find('td', { "class" : "logo" }).find('img')['src']
bot.sendPhoto(chat_id=update.message.chat_id, photo=img_url)
html_str = engine.render('company_template.pyhtml', context)
bot.sendMe | ssage(parse_mode="HTML", chat_id=update.message.chat_id, text=html_str)
symbols = [c.symbol for c in companies]
if len(symbols) >= 2:
symbol_string = ", ".join(symbols[:-1]) + " and " + symbols[-1]
else:
symbol_string = symbols[0]
last_n_days = 10
if len(companies) < 4:
create_graph(companies, last_n_days)
history_text = '''
Here's the price history for {} for the last {} days
'''.format(symbol_string, last_n_days)
bot.sendMessage(chat_id=update.message.chat_id, text=history_text)
bot.sendPhoto(chat_id=update.message.chat_id, photo=open("plots/temp.png",'rb'))
def create_graph(companies, timedel):
fig, ax = plt.subplots()
for company in companies:
dates, lookback_prices = get_lookback_prices(company, timedel)
# dates = [i.strftime('%d/%m') for i in dates]
h = ax.plot(dates, lookback_prices, label=company.symbol)
ax.legend()
plt.xticks(rotation=45)
plt.savefig('plots/temp.png')
|
saltstack/salt | tests/unit/modules/nxos/nxos_n5k.py | Python | apache-2.0 | 5,857 | 0.004098 | """
:codeauthor: Thomas Stoner <tmstoner@cisco.com>
"""
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.unit.modules.nxos.nxos_platform import NXOSPla | tform
class N5KPlatform(NXOSPlatform):
"""Cisco Systems N5K Platform Unit Test Object"""
chassis = "cisco Nexus 5672UP 16G-FC Chassis"
# Captured output from: show install all impact kickstart <kimage> system <image>
| show_install_all_impact = """
Verifying image bootflash:/$KIMAGE for boot variable "kickstart".
[####################] 100% -- SUCCESS
Verifying image bootflash:/$IMAGE for boot variable "system".
[####################] 100% -- SUCCESS
Verifying image type.
[####################] 100% -- SUCCESS
Extracting "system" version from image bootflash:/$IMAGE.
[####################] 100% -- SUCCESS
Extracting "kickstart" version from image bootflash:/$KIMAGE.
[####################] 100% -- SUCCESS
Extracting "bios" version from image bootflash:/$IMAGE.
[####################] 100% -- SUCCESS
Performing module support checks.
[####################] 100% -- SUCCESS
Compatibility check is done:
Module bootable Impact Install-type Reason
------ -------- -------------- ------------ ------
0 yes disruptive reset ISSD is not supported and switch will reset with ascii configuration
1 yes disruptive reset ISSD is not supported and switch will reset with ascii configuration
2 yes disruptive reset ISSD is not supported and switch will reset with ascii configuration
Images will be upgraded according to following table:
Module Image Running-Version New-Version Upg-Required
------ ---------------- ---------------------- ---------------------- ------------
0 system $CVER $NVER $REQ
0 kickstart $CVER $NVER $REQ
0 bios v0.1.9(03/09/2016) v0.1.6(12/03/2015) no
0 power-seq SF-uC:37, SF-FPGA:35 SF-uC:37, SF-FPGA:35 no
0 iofpga v0.0.0.39 v0.0.0.39 no
1 iofpga v0.0.0.18 v0.0.0.18 no
2 iofpga v0.0.0.18 v0.0.0.18 no
Warning : ISSD is not supported and switch will reset with ASCII configuration.
All incompatible configuration will be lost in the target release.
Please also refer the downgrade procedure documentation of the release for more details.
"""
# Captured output from: install all kickstart <kimage> system <image> '''
install_all_disruptive_success = """
Verifying image bootflash:/$KIMAGE for boot variable "kickstart".
[####################] 100% -- SUCCESS
Verifying image bootflash:/$IMAGE for boot variable "system".
[####################] 100% -- SUCCESS
Verifying image type.
[####################] 100% -- SUCCESS
Extracting "system" version from image bootflash:/$IMAGE.
[####################] 100% -- SUCCESS
Extracting "kickstart" version from image bootflash:/$KIMAGE.
[####################] 100% -- SUCCESS
Extracting "bios" version from image bootflash:/$IMAGE.
[####################] 100% -- SUCCESS
Performing module support checks.
[####################] 100% -- SUCCESS
Compatibility check is done:
Module bootable Impact Install-type Reason
------ -------- -------------- ------------ ------
0 yes disruptive reset ISSD is not supported and switch will reset with ascii configuration
1 yes disruptive reset ISSD is not supported and switch will reset with ascii configuration
2 yes disruptive reset ISSD is not supported and switch will reset with ascii configuration
Images will be upgraded according to following table:
Module Image Running-Version New-Version Upg-Required
------ ---------------- ---------------------- ---------------------- ------------
0 system $CVER $NVER $REQ
0 kickstart $CKVER $NKVER $KREQ
0 bios v0.1.9(03/09/2016) v0.1.6(12/03/2015) no
0 power-seq SF-uC:37, SF-FPGA:35 SF-uC:37, SF-FPGA:35 no
0 iofpga v0.0.0.39 v0.0.0.39 no
1 iofpga v0.0.0.18 v0.0.0.18 no
2 iofpga v0.0.0.18 v0.0.0.18 no
Warning : ISSD is not supported and switch will reset with ASCII configuration.
All incompatible configuration will be lost in the target release.
Please also refer the downgrade procedure documentation of the release for more details.
Install is in progress, please wait.
Performing runtime checks.
[####################] 100% -- SUCCESS
Setting boot variables.
[####################] 100% -- SUCCESS
Performing configuration copy.
[####################] 100% -- SUCCESS
Converting startup config.
[####################] 100% -- SUCCESS
Finishing the upgrade, switch will reboot in 10 seconds.
"""
|
talkoopaiva/talkoohakemisto-api | talkoohakemisto/migrations/versions/597b5983e01c_create_voluntary_work_type_table.py | Python | mit | 546 | 0.003663 | """Create `voluntary_work_type` table
Revision ID: 597b5983e01c
Revises: 2e20f | 23a8ffd
Create Date: 2014-02-08 17:14:35.812815
"""
# revision identifiers, used by Alembic.
revision = '597b5983e01c'
down_revision = '2e20f23a8ffd'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'voluntary_work_type',
sa.Column('id', sa.Integer(), nullable=False, primary_key=True),
sa.Column('name', sa.Unicode(50), nu | llable=False),
)
def downgrade():
op.drop_table('voluntary_work_type')
|
pp-mo/iris | docs/iris/example_code/General/orca_projection.py | Python | lgpl-3.0 | 1,647 | 0 | """
Tri-Polar Grid Projected Plotting
=================================
This example demonstrates cell plots of data on the semi-structured ORCA2 model
grid.
First, the data is projected into the PlateCarree coordinate reference system.
Second four pcolormesh plots are created from this projected dataset,
using different projections for the output image.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
im | port iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load data
filepath = iris.sample_data_path("orca2_votemper.nc")
cube = iris.load_cube(filepath)
| # Choose plot projections
projections = {}
projections["Mollweide"] = ccrs.Mollweide()
projections["PlateCarree"] = ccrs.PlateCarree()
projections["NorthPolarStereo"] = ccrs.NorthPolarStereo()
projections["Orthographic"] = ccrs.Orthographic(
central_longitude=-90, central_latitude=45
)
pcarree = projections["PlateCarree"]
# Transform cube to target projection
new_cube, extent = iris.analysis.cartography.project(
cube, pcarree, nx=400, ny=200
)
# Plot data in each projection
for name in sorted(projections):
fig = plt.figure()
fig.suptitle("ORCA2 Data Projected to {}".format(name))
# Set up axes and title
ax = plt.subplot(projection=projections[name])
# Set limits
ax.set_global()
# plot with Iris quickplot pcolormesh
qplt.pcolormesh(new_cube)
# Draw coastlines
ax.coastlines()
iplt.show()
if __name__ == "__main__":
main()
|
mtury/scapy | scapy/supersocket.py | Python | gpl-2.0 | 11,654 | 0 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
SuperSocket.
"""
from __future__ import absolute_import
from select import select, error as select_error
import errno
import os
import socket
import struct
import time
from scapy.config import conf
from scapy.consts import LINUX, DARWIN, WINDOWS
from scapy.data import MTU, ETH_P_IP
from scapy.compat import raw, bytes_encode
from scapy.error import warning, log_runtime
import scapy.modules.six as six
import scapy.packet
from scapy.utils import PcapReader, tcpdump
class _SuperSocket_metaclass(type):
def __repr__(self):
if self.desc is not None:
return "<%s: %s>" % (self.__name__, self.desc)
else:
return "<%s>" % self.__name__
class SuperSocket(six.with_metaclass(_SuperSocket_metaclass)):
desc = None
closed = 0
nonblocking_socket = False
read_allowed_exceptions = ()
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): # noqa: E501
self.ins = socket.socket(family, type, proto)
self.outs = self.ins
self.promisc = None
def send(self, x):
sx = raw(x)
try:
x.sent_time = time.time()
except AttributeError:
pass
return self.outs.send(sx)
def recv_raw(self, x=MTU):
"""Returns a tuple containing (cls, pkt_data, time)"""
return conf.raw_layer, self.ins.recv(x), None
def recv(self, x=MTU):
cls, val, ts = self.recv_raw(x)
if not val or not cls:
return
try:
pkt = cls(val)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
from scapy.sendrecv import debug
debug.crashed_on = (cls, val)
raise
pkt = conf.raw_layer(val)
if ts:
pkt.time = ts
return pkt
def fileno(self):
return self.ins.fileno()
def close(self):
if self.closed:
return
self.closed = True
if getattr(self, "outs", None):
if getattr(self, "ins", None) != self.outs:
if WINDOWS or self.outs.fileno() != -1:
self.outs.close()
if getattr(self, "ins", None):
if WINDOWS or self.ins.fileno() != -1:
self.ins.close()
def sr(self, *args, **kargs):
from scapy import sendrecv
return sendrecv.sndrcv(self, *args, **kargs)
def sr1(self, *args, **kargs):
from scapy import sendrecv
a, b = sendrecv.sndrcv(self, *args, **kargs)
if len(a) > 0:
return a[0][1]
else:
return None
def sniff(self, *args, **kargs):
from scapy import sendrecv
return sendrecv.sniff(opened_socket=self, *args, **kargs)
def tshark(self, *args, **kargs):
from scapy import sendrecv
return sendrecv.tshark(opened_socket=self, *args, **kargs)
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
"""This function is called during sendrecv() routine to select
the available sockets.
:param sockets: an array of sockets that need to be selected
:returns: an array of sockets that were selected and
the function to be called next to get the packets (i.g. recv)
"""
try:
inp, _, _ = select(sockets, [], [], remain)
except (IOError, select_error) as exc:
# select.error has no .errno attribute
if exc.args[0] != errno.EINTR:
raise
return inp, None
def __del__(self):
"""Close the socket"""
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Close the socket"""
self.close()
class L3RawSocket(SuperSocket):
desc = "Layer 3 using Raw sockets (PF_INET/SOCK_RAW)"
def __init__(self, type=ETH_P_IP, filter=None, iface=None, promisc=None, nofilter=0): # noqa: E501
self.outs = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW) # noqa: E501
self.outs.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) # noqa: E501
if iface is not None:
self.ins.bind((iface, type))
def recv(self, x=MTU):
pkt, sa_ll = self.ins.recvfrom(x)
if sa_ll[2] == socket.PACKET_OUTGOING:
return None
if sa_ll[3] in conf.l2types:
cls = conf.l2types[sa_ll[3]]
lvl = 2
elif sa_ll[1] in conf.l3types:
cls = conf.l3types[sa_ll[1]]
lvl = 3
else:
cls = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s", sa_ll[0], sa_ll[1], sa_ll[3], cls.name) # noqa: E501
| lvl = 3
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
if lvl == 2:
pkt = pkt.payload
if pkt is not None:
from scapy.arch import get_last_packet_timestamp
pkt.time = get_last_packet_timestamp(self.ins)
return pkt
def send(self, x):
try:
| sx = raw(x)
x.sent_time = time.time()
self.outs.sendto(sx, (x.dst, 0))
except socket.error as msg:
log_runtime.error(msg)
class SimpleSocket(SuperSocket):
desc = "wrapper around a classic socket"
def __init__(self, sock):
self.ins = sock
self.outs = sock
class StreamSocket(SimpleSocket):
desc = "transforms a stream socket into a layer 2"
nonblocking_socket = True
def __init__(self, sock, basecls=None):
if basecls is None:
basecls = conf.raw_layer
SimpleSocket.__init__(self, sock)
self.basecls = basecls
def recv(self, x=MTU):
pkt = self.ins.recv(x, socket.MSG_PEEK)
x = len(pkt)
if x == 0:
return None
pkt = self.basecls(pkt)
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
from scapy.packet import NoPayload
while pad is not None and not isinstance(pad, NoPayload):
x -= len(pad.load)
pad = pad.payload
self.ins.recv(x)
return pkt
class SSLStreamSocket(StreamSocket):
desc = "similar usage than StreamSocket but specialized for handling SSL-wrapped sockets" # noqa: E501
def __init__(self, sock, basecls=None):
self._buf = b""
super(SSLStreamSocket, self).__init__(sock, basecls)
# 65535, the default value of x is the maximum length of a TLS record
def recv(self, x=65535):
pkt = None
if self._buf != b"":
try:
pkt = self.basecls(self._buf)
except Exception:
# We assume that the exception is generated by a buffer underflow # noqa: E501
pass
if not pkt:
buf = self.ins.recv(x)
if len(buf) == 0:
raise socket.error((100, "Underlying stream socket tore down"))
self._buf += buf
x = len(self._buf)
pkt = self.basecls(self._buf)
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
while pad is not None and not isinstance(pad, scapy.packet.NoPayload):
x -= len(pad.load)
pad = pad.payload
self._buf = self._buf[x:]
return pkt
class L2ListenTcpdump(SuperSocket):
desc = "read packets at layer 2 using tcpdump"
def __init__(self, iface=None, promisc=None, filter=None, nofilter=False,
prog= |
eranimo/historia | historia/world/biome.py | Python | mit | 2,771 | 0.000361 | from historia.enums.dict_enum import DictEnum
class Biome(DictEnum):
__exports__ = ['title', 'color', 'id']
arctic = {
'id': 1,
'title': 'Arctic',
'color': (224, 224, 224),
'fertility': 1,
'can_farm': False,
'has_forest': False,
'base_favorability': 10
}
tundra = {
'id': 2,
'title': 'Tundra',
'color': (114, 153, 128),
'fertility': 15,
'can_farm': False,
'has_forest': False,
'base_favorability': -30
}
alpine_tundra = {
'id': 3,
'title': 'Alpine Tundra',
'color': (97, 130, 106),
'fertility': 10,
'can_farm': False,
'has_forest': False,
'base_favorability': -20
}
desert = {
'id': 4,
'title': 'Desert',
'color': (237, 217, 135),
'fertility': 5,
'can_farm': False,
'has_forest': False,
'base_favorability': -10
}
shrubland = {
'id': 5,
'title': 'Shrubland',
'color': (194, 210, 136),
'fertility': 20,
'can_farm': True,
'has_forest': False,
'base_favorability': 20
}
savanna = {
'id': 6,
'title': 'Savanna',
'color': (219, 230, 158),
'fertility': 80,
'can_farm': True,
'has_forest': False,
'base_favorability': 30
}
grasslands = {
'id': 7,
'title': 'Grasslands',
'color': (166, 223, 106),
'fertility': 150,
'can_farm': True,
'has_forest': False,
'base_favorability': 50
}
boreal_forest = {
'id': 8,
'title': 'Boreal Forest',
'color': (28, 94, 74),
'fertility': 30,
'can_farm': True,
'has_forest': True,
'base_favorability': -10
}
temperate_forest = {
'id': 9,
'title': 'Temp | erate Forest',
'color': (76, 192, 0),
'fertility': 100,
'can_farm': True,
'has_forest': True,
'base_favorability': 50
}
temperate_rainforest = {
'id': 10,
'titl | e': 'Temperate Rainforest',
'color': (89, 129, 89),
'fertility': 100,
'can_farm': True,
'has_forest': True,
'base_favorability': -10
}
tropical_forest = {
'id': 11,
'title': 'Tropical Forest',
'color': (96, 122, 34),
'fertility': 70,
'can_farm': True,
'has_forest': True,
'base_favorability': 5
}
tropical_rainforest = {
'id': 12,
'title': 'Tropical Rainforest',
'color': (0, 70, 0),
'fertility': 60,
'can_farm': True,
'has_forest': True,
'base_favorability': 0
}
|
bjornaa/ladim | examples/line/plot_holoviews.py | Python | mit | 1,481 | 0.000675 | # Use holoviews to plot the particle distribution at given time
from pathlib import Path
import numpy as np
import xarray as xr
import holoviews as hv
from postladim import ParticleFile
hv.extension("bokeh")
# --- Settings ---
tstep = 40 # Time step to show |
# Output file (and type)
output_file = "line_hv.png"
#output_file = "line_hv.html"
scale = 5 # Figure size factor
# --- Data files ---
ladim_dir = Path("../../")
grid_file = ladim_dir / "examples/data/ocean_avg_0014.nc"
particle_file = ladim_dir / "examples/line/line.nc"
# --- Read particle data ---
pf = ParticleFile(particle_file)
X, Y = pf.position(tstep)
|
# --- Background bathymetry data ---
# Read bathymetry and land mask
with xr.open_dataset(grid_file) as A:
H = A.h
M = A.mask_rho
jmax, imax = M.shape
H = H.where(M > 0) # Mask out land
M = M.where(M < 1) # Mask out sea
# --- Holoviews elements ---
# Land image
land = hv.Image(data=M, kdims=["xi_rho", "eta_rho"], group="Land")
# Bathymetry image
topo = hv.Image(data=-np.log10(H), kdims=["xi_rho", "eta_rho"], group="Topo")
# Particle distribution
spread = hv.Scatter(data=(X, Y))
# Overlay
h = topo * land * spread
# --- Plot options ---
h.opts(frame_width=scale * imax, frame_height=scale * jmax)
h.opts("Scatter", color="red")
h.opts("Image.Topo", cmap="blues_r", alpha=0.7)
h.opts("Image.Land", cmap=["#80B040"])
# --- Save output ---
if output_file.endswith("png"):
h.opts(toolbar=None)
hv.save(h, filename=output_file)
|
jhermann/neutrino-wand | src/neutrino_wand/cli.py | Python | apache-2.0 | 1,396 | 0.002869 | # -*- coding: utf-8 -*-
""" Command | Line Interface.
"""
# Copyright © 2014 Jürgen Hermann
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, e | ither express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, with_statement
import sys
import logging
from neutrino_wand import pkg_info
class NeutrinoWand(object):
""" The main `wand` command line application.
"""
log = logging.getLogger(__name__)
def __init__(self):
"""Set up main command."""
project = pkg_info()
#super(NeutrinoWand, self).__init__(
# description=project["description"],
# version='0.1', # TODO: need to get version at runtime
#)
def run(argv=None):
"""Main CLI entry point."""
cli = NeutrinoWand()
#return cli.run(sys.argv[1:] if argv is None else argv)
return 1 # Not implemented
if __name__ == "__main__":
# When started via "python -m"
sys.exit(run())
|
faylau/oVirt3.3WebAPITest | src/TestData/Profile/ITC090402_UpdateProfile_DupName.py | Python | apache-2.0 | 1,804 | 0.003898 | #encoding:utf-8
__authors__ = ['"Wei Keke" <keke.wei@cs2c.com.cn>']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/09 初始版本 Wei Keke
#---------------------------------------------------------------------------------
'''
from TestAPIs.DataCenterAPIs import DataCenterAPIs
from TestData.Profile import ITC09_SetUp as ModuleData
'''
@note: PreData
'''
nw_name = 'network_ITC09'
dc_name = ModuleData.dc_name
dc_id = DataCenterAPIs().getDataCenterIdByName(dc_name)
nw_info = '''
<network>
<name>%s</name>
<data_center id= "%s"/>
</network>
''' %(nw_name, dc_id)
profile_name1 = 'p001'
profile_name2 = 'p002'
profile_info1 = '''
<vnic_profile>
<name>p001</name>
<description>shelled</description>
<network id="%s"/>
</vnic_profile>
'''
profile_info2 = '''
<vnic_profile>
<name>p002</name>
<description>s | helled</description>
<network id="%s"/>
</vnic_profile>
'''
'''
@note: TestData
'''
update_info = '''
<vnic_profile>
<name>p002</name>
<description>shelled</description>
| <port_mirroring>true</port_mirroring>
</vnic_profile>
'''
'''
@note: ExpectedData
'''
expected_status_code = 409
expected_info = '''
<fault>
<reason>Operation Failed</reason>
<detail>[Cannot edit VM network interface profile. The VM network interface profile's name is already used by an existing profile for the same network.
-Please choose a different name.]</detail>
</fault>
'''
|
trunca/enigma2 | skin.py | Python | gpl-2.0 | 39,906 | 0.034506 | from Tools.Profile import profile
profile("LOAD:ElementTree")
import xml.etree.cElementTree
import os
profile("LOAD:enigma_skin")
from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, addFont, gRGB, eWindowStyleSkinned, getDesktop
from Components.config import ConfigSubsection, ConfigText, config, ConfigYesNo, ConfigSelection, ConfigNothing
from Components.Converter.Converter import Converter
from Components.Sources.Source import Source, ObsoleteSource
from Components.SystemInfo import SystemInfo
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_SKIN_IMAGE, SCOPE_FONTS, SCOPE_ACTIVE_SKIN, SCOPE_ACTIVE_LCDSKIN, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists
from Tools.Import import my_import
from Tools.LoadPixmap import LoadPixmap
from Components.RcModel import rc_model
from boxbranding import getBoxType
config.vfd = ConfigSubsection()
config.vfd.show = ConfigSelection([("skin_text.xml", _("Channel Name")), ("skin_text_clock.xml", _("Clock"))], "skin_text.xml")
if not os.path.exists("/usr/share/enigma2/skin_text.xml"):
config.vfd.show = ConfigNothing()
colorNames = {}
colorNamesHuman = {}
fonts = {
"Body": ("Regular", 18, 22, 16),
"ChoiceList": ("Regular", 20, 24, 18),
}
parameters = {}
constant_widgets = {}
variables = {}
DEFAULT_SKIN = "OPD-Blue-Line/skin.xml"
DEFAULT_DISPLAY_SKIN = "skin_display.xml"
def dump(x, i=0):
print " " * i + str(x)
try:
for n in x.childNodes:
dump(n, i + 1)
except:
None
class SkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "[Skin] {%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg)
class DisplaySkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "[Skin] {%s}: %s. Please contact the skin's author!" % (config.skin.display_skin.value, self.msg)
dom_skins = [ ]
def addSkin(name, scope = SCOPE_SKIN):
if name is None or not len(name):
print "[SKIN ERROR] attempt to add a skin without filename"
return False
filename = resolveFilename(scope, name)
if fileExists(filename):
mpath = os.path.dirname(filename) + "/"
try:
file = open(filename, 'r')
dom_skins.append((mpath, xml.etree.cElementTree.parse(file).getroot()))
except:
print "[SKIN ERROR] error in %s" % filename
return False
else:
return True
return False
def get_modular_files(name, scope = SCOPE_SKIN):
dirname = resolveFilename(scope, name + 'mySkin/')
file_list = []
if fileExists(dirname):
skin_files = (os.listdir(dirname))
if len(skin_files):
for f in skin_files:
if f.startswith('skin_') and f.endswith('.xml'):
file_list.append(("mySkin/" + f))
file_list = sorted(file_list, key=str.lower)
return file_list
def skin_user_skinname():
name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml"
filename = resolveFilename(SCOPE_CONFIG, name)
if fileExists(filename):
return name
return None
config.skin = ConfigSubsection()
config.skin.primary_skin = ConfigText(default = DEFAULT_SKIN)
if SystemInfo["FrontpanelDisplay"] or SystemInfo["LcdDisplay"] or SystemInfo["OledDisplay"] or SystemInfo["FBLCDDisplay"]:
config.skin.display_skin = ConfigText(default = "skin_display.xml")
else:
config.skin.display_skin = ConfigText(default = "skin_display_text.xml")
def skinExists(skin = False):
if not skin or not isinstance(skin, skin):
skin = config.skin.primary_skin.value
skin = resolveFilename(SCOPE_SKIN, skin)
if not fileExists(skin):
if fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)):
config.skin.primary_skin.value = DEFAULT_SKIN
else:
config.skin.primary_skin.value = "skin.xml"
config.skin.primary_skin.save()
skinExists()
def getSkinPath():
primary_skin_path = config.skin.primary_skin.value.replace('skin.xml', '')
if not primary_skin_path.endswith('/'):
primary_skin_path = primary_skin_path + '/'
return primary_skin_path
primary_skin_path = getSkinPath()
profile("LoadSkin")
res = None
name = skin_user_skinname()
if name:
res = addSkin(name, SCOPE_CONFIG)
if not name or not res:
addSkin('skin_user.xml', SCOPE_CONFIG)
addSkin('skin_box.xml')
addSkin('skin_second_infobar.xml')
display_skin_id = 1
if SystemInfo["FrontpanelDisplay"] or SystemInfo["LcdDisplay"] or SystemInfo["OledDisplay"] or SystemInfo["FBLCDDisplay"]:
if fileExists('/usr/share/enigma2/display/skin_display.xml'):
if fileExists(resolveFilename(SCOPE_CONFIG, config.skin.display_skin.value)):
addSkin(config.skin.display_skin.value, SCOPE_CONFIG)
else:
addSkin('display/' + config.skin.display_skin.value)
if addSkin('skin_display.xml'):
display_skin_id = 2
try:
addSkin(config.vfd.show.value)
except:
addSkin('skin_text.xml')
addSkin('skin_subtitles.xml')
try:
addSkin(primary_skin_path + 'skin_user_colors.xml', SCOPE_SKIN)
print "[SKIN] loading user defined colors for skin", (primary_skin_path + 'skin_user_colors.xml')
except (SkinError, IOError, AssertionError), err:
print "[SKIN] not loading user defined colors for skin"
try:
addSkin(primary_skin_path + 'skin_us | er_header.xml', SCOPE_SKIN)
print "[SKIN] loading user defined header file for skin", (primary_skin_path + 'skin_user_header.xml')
except (SkinError, IOError, AssertionError), err:
print "[SKIN] not loading user defined header file for skin"
def load_modular_files():
modular_files = get_modular_files(primary_skin_path, SCOPE_SKIN)
if len(modular_files):
for f in modular_files:
try:
addSkin(primary_skin_path + f, SCOPE_SKIN)
print "[SKIN] loading modular skin file : ", (primary_skin_p | ath + f)
except (SkinError, IOError, AssertionError), err:
print "[SKIN] failed to load modular skin file : ", err
load_modular_files()
try:
if not addSkin(config.skin.primary_skin.value):
raise SkinError, "primary skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_SKIN
if config.skin.primary_skin.value == skin:
skin = 'skin.xml'
print "defaulting to standard skin...", skin
config.skin.primary_skin.value = skin
addSkin(skin)
del skin
addSkin('skin_default.xml')
profile("LoadSkinDefaultDone")
def parseCoordinate(s, e, size=0, font=None):
s = s.strip()
if s == "center":
if not size:
val = 0
else:
val = (e - size)/2
elif s == '*':
return None
else:
if s[0] is 'e':
val = e
s = s[1:]
elif s[0] is 'c':
val = e/2
s = s[1:]
else:
val = 0
if s:
if s[-1] is '%':
val += e * int(s[:-1]) / 100
elif s[-1] is 'w':
val += fonts[font][3] * int(s[:-1])
elif s[-1] is 'h':
val += fonts[font][2] * int(s[:-1])
else:
val += int(s)
if val < 0:
val = 0
return val
def getParentSize(object, desktop):
size = eSize()
if object:
parent = object.getParent()
if parent and parent.size().isEmpty():
parent = parent.getParent()
if parent:
size = parent.size()
elif desktop:
size = desktop.size()
return size
def parsePosition(s, scale, object = None, desktop = None, size = None):
if s in variables:
s = variables[s]
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width(), size and size.width())
yval = parseCoordinate(y, parentsize.height(), size and size.height())
return ePoint(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseSize(s, scale, object = None, desktop = None):
if s in variables:
s = variables[s]
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width())
yval = parseCoordinate(y, parentsize.height())
return eSize(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseFont(s, scale):
try:
f = fonts[s]
name = f[0]
size = f[1]
except:
name, size = s.split(';')
return gFont(name, int(size) * scale[0][0] / scale[0][1])
def parseColor(s):
if s[0] != '#':
try:
return colorNames[s]
except:
raise SkinError("color '%s' must be #a |
riccardomarotti/slidown | tests/test_config.py | Python | gpl-3.0 | 1,706 | 0.002345 | # -*- coding: utf-8 -*-
import os
import tempfile
from slidown import config
def test_load_not_existing_config():
with tempfile.TemporaryDirectory() as temp_dir:
configuration_file = os.path.join(temp_dir, 'config.json')
import appdirs
appdirs.user_config_dir = lambda any_appname: temp_dir
config_hash = config.load()
assert open(configuration_file).read() == "{}"
assert config_hash == {}
def test_load_existing_config():
with tempfile.TemporaryDirectory() as temp_dir:
configuration_file = os.path.join(temp_dir, 'config.json')
open(configuration_file, 'w+').write('{"a json": "config"}')
import appdirs
appdirs.user_config_dir = lambda any_appname: temp_dir
config_hash = config.load()
assert open(configuration_file).read() == '{"a json": "config"}'
assert config_hash == {'a json' | : 'config'}
def test_save():
config_hash = {
'hash': 'of configuration',
'with': 'some',
'interesting': 'values'
}
with tempfile.TemporaryDirectory() as temp_dir:
configuration_file = os.path.join(temp_dir, 'config.json')
import appdirs
appdirs.user_config_dir = lambda any_appname: temp_dir
config.save(config_hash)
assert config.load() == config_hash
def test_not_exis | ting_config_directory():
temp_dir_name = tempfile.TemporaryDirectory().name
configuration_file = os.path.join(temp_dir_name, 'config.json')
import appdirs
appdirs.user_config_dir = lambda any_appname: temp_dir_name
config_hash = config.load()
assert open(configuration_file).read() == "{}"
assert config_hash == {}
|
inspirehep/invenio | modules/bibcheck/lib/plugins/rename_subfield.py | Python | gpl-2.0 | 1,062 | 0.013183 | # -*- coding: utf-8 -*-
##
## Thi | s file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any l | ater version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibcheck plugin to move (rename) a subfield"""
def check_record(record, source_field, new_code):
""" Changes the code of a subfield to new_code """
import invenio.bibcheck_plugins.rename_subfield_filter as p
p.check_record(record, source_field, new_code)
|
CMPUT410W15T02/CMPUT410W15-project | social_distribution/authors/migrations/0002_auto_20150301_2125.py | Python | gpl-2.0 | 857 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('authors', '0001_initial'),
]
operations = [
migrations.AddField(
model_name= | 'profile',
name='displayname',
field=models.CharField(max_length=128, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='profile',
name='host',
field=mo | dels.CharField(max_length=32, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='profile',
name='uuid',
field=models.CharField(default=uuid.uuid1, max_length=32),
preserve_default=True,
),
]
|
theblacklion/project-service | scripts/mysql-dump-splitter.py | Python | mit | 5,954 | 0.000168 | #!/usr/bin/env python3
#
# Usage: mysql-dump-splitter.py dump.sql.gz
# Usage: mysql-dump-splitter.py dump.sql.bz2
#
# @author Oktay Acikalin <oktay.acikalin@gmail.com>
# @copyright Oktay Acikalin
# @license MIT (LICENSE.txt)
import sys
import re
import gzip
import bz2
# HEADER_REGEX = r''
SECTION_INDICATOR = r'\n--\n-- (?P<description>[^\n]+)\n--\n'
SECTION_DATABASE = r'Current Database: `(?P<name>[^`]+)`'
SECTION_TABLE_STRUCTURE = r'Table structure for table `(?P<name>[^`]+)`'
SECTION_TABLE_DATA = r'Dumping data for table `(?P<name>[^`]+)`'
SECTION_TEMP_VIEW = r'Temporary table structure for view `(?P<name>[^`]+)`'
SECTION_FINAL_VIEW = r'Final view structure for view `(?P<name | >[^`]+)`'
# SECTION_INDICATOR_FOOTER = r'UNLOCK TABLES;\n\/\*\!'
SECTION_TYPE_HEADER = 1
SECTION_TYPE_DATABASE = 2
SECTION_TYPE_TABLE_STRUCTURE = 3
SECTION_TYPE_TABLE_DATA = 4
SECTION_TYPE_TEMP_VI | EW = 5
SECTION_TYPE_FINAL_VIEW = 6
SECTION_TYPE_FOOTER = 90
SECTION_TYPE_UNKNOWN = 99
def write_data_to_disk(file, section_type, section_name, data, excludes):
stype = (
'database' if section_type == SECTION_TYPE_DATABASE else
'structure' if section_type == SECTION_TYPE_TABLE_STRUCTURE else
'temp_view' if section_type == SECTION_TYPE_TEMP_VIEW else
'final_view' if section_type == SECTION_TYPE_FINAL_VIEW else
'data' if section_type != SECTION_TYPE_UNKNOWN else
None
)
# Just skip unknown sections completely.
if not stype:
return file
filename = '%s.%s.sql.gz' % (section_name, stype)
if section_name in excludes and stype == 'data':
# Make sure that we don't write to files we want to skip.
if file is None or (type(file) is str and file != filename) or \
(type(file) is not str and file.name != filename):
file = filename
print('Skipping file: %s' % filename)
return file
if file is None or type(file) is str or file.name != filename:
print('Writing file: %s' % filename)
file = gzip.open(filename, 'w')
file.write(data)
return file
def main(source_file, excludes):
regex_indicator = re.compile(SECTION_INDICATOR, re.I | re.M)
regex_database = re.compile(SECTION_DATABASE, re.I | re.M)
regex_table_structure = re.compile(SECTION_TABLE_STRUCTURE, re.I | re.M)
regex_table_data = re.compile(SECTION_TABLE_DATA, re.I | re.M)
regex_temp_view = re.compile(SECTION_TEMP_VIEW, re.I | re.M)
regex_final_view = re.compile(SECTION_FINAL_VIEW, re.I | re.M)
# regex_footer = re.compile(SECTION_INDICATOR_FOOTER, re.I | re.M)
cur_section_type = SECTION_TYPE_HEADER
cur_section_name = 'header'
cur_file = None
if source_file.endswith('.sql.gz'):
file = gzip.open(source_file, 'rb')
elif source_file.endswith('.sql.bz2'):
file = bz2.open(source_file, 'rb')
elif source_file.endswith('.sql'):
file = open(source_file, 'rb')
else:
raise Exception('Unknown file type. Filename can have the following endings: .sql, .sql.gz, .sql.bz2')
buffer = ''
last_lines = dict()
footer = []
for data in file:
line = str(data, 'UTF-8', 'replace')
if footer:
match = regex_indicator.search(buffer + line)
if not match:
buffer += line
footer.append(data)
continue
else:
footer = []
try:
last_lines[0] = last_lines[1]
last_lines[1] = line
except KeyError:
last_lines[0] = ''
last_lines[1] = line
if last_lines[0] == 'UNLOCK TABLES;\n' and line.startswith('/*!'):
footer.append(data)
continue
if not line.startswith('--') and not line.strip() == '' and \
cur_section_type:
cur_file = write_data_to_disk(
cur_file, cur_section_type, cur_section_name, data, excludes)
buffer = ''
buffer += line
match = regex_indicator.search(buffer)
if match:
buffer = ''
result = match.groupdict()
description = result['description']
database_result = regex_database.search(description)
structure_result = regex_table_structure.search(description)
data_result = regex_table_data.search(description)
temp_view_result = regex_temp_view.search(description)
final_view_result = regex_final_view.search(description)
if database_result:
cur_section_name = database_result.group('name')
cur_section_type = SECTION_TYPE_DATABASE
elif structure_result:
cur_section_name = structure_result.group('name')
cur_section_type = SECTION_TYPE_TABLE_STRUCTURE
elif data_result:
cur_section_name = data_result.group('name')
cur_section_type = SECTION_TYPE_TABLE_DATA
elif temp_view_result:
cur_section_name = temp_view_result.group('name')
cur_section_type = SECTION_TYPE_TEMP_VIEW
elif final_view_result:
cur_section_name = final_view_result.group('name')
cur_section_type = SECTION_TYPE_FINAL_VIEW
else:
# raise Exception('Unknown section type: %s' % description)
print('Unknown section type: %s' % description)
cur_section_name = None
cur_section_type = SECTION_TYPE_UNKNOWN
cur_section_type = SECTION_TYPE_FOOTER
cur_section_name = 'footer'
for data in footer:
cur_file = write_data_to_disk(
cur_file, cur_section_type, cur_section_name, data, excludes)
if __name__ == '__main__':
try:
excludes = sys.argv[2].split(',')
except IndexError:
excludes = []
main(sys.argv[1], excludes)
|
ronekko/chainer | tests/chainer_tests/training_tests/extensions_tests/test_evaluator.py | Python | mit | 8,187 | 0 | import unittest
import numpy
import chainer
from chainer import dataset
from chainer import iterators
from chainer import testing
from chainer.training import extensions
class DummyModel(chainer.Chain):
def __init__(self, test):
super(DummyModel, self).__init__()
self.args = []
self.test = test
def forward(self, x):
self.args.append(x)
chainer.report({'loss': x.sum()}, self)
class DummyModelTwoArgs(chainer.Chain):
def __init__(self, test):
super(DummyModelTwoArgs, self).__init__()
self.args = []
self.test = test
def forward(self, x, y):
self.args.append((x, y))
chainer.report({'loss': x.sum() + y.sum()}, self)
class DummyIterator(dataset.Iterator):
def __init__(self, return_values):
self.iterator = iter(return_values)
self.finalized = False
def __next__(self):
return next(self.iterator)
def finalize(self):
self.finalized = True
class DummyConverter(object):
def __init__(self, return_values):
self.args = []
self.iterator = iter(return_values)
def __call__(self, batch, device):
self.args.append({'batch': batch, 'device': device})
return next(self.iterator)
class TestEvaluator(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.batches = [
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModel(self)
self.evaluator = extensions.Evaluator(
self.iterator, self.target, converter=self.converter)
self.expect_mean = numpy.mean([numpy.sum(x) for x in self.batches])
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
mean = self.evaluator.evaluate()
# No observation is reported to the current reporter. Instead the
# evaluator collect results in order to calculate their mean.
self.assertEqual(len(reporter.observation), 0)
# The converter gets results of the iterator.
self.assertEqual(len(self.converter.args), len(self.data))
for i in range(len(self.data)):
numpy.testing.assert_array_equal(
self.converter.args[i]['batch'], self.data[i])
self.assertIsNone(self.converter.args[i]['device'])
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i], self.batches[i])
self.assertAlmostEqual(mean['target/loss'], self.expect_mean, places=4)
self.evaluator.finalize()
self.assertTrue(self.iterator.finalized)
def test_call(self):
mean = self.evaluator()
# 'main' is used by default
self.assertAlmostEqual(mean['main/loss'], self.expect_mean, places=4)
def test_evaluator_name(self):
self.evaluator.name = 'eval'
mean = self.evaluator()
# name is used as a prefix
self.assertAlmostEqual(
mean['eval/main/loss'], self.expect_mean, places=4)
def test_current_report(self):
reporter = chainer.Reporter()
with reporter:
mean = self.evaluator()
# The result is reported to the current reporter.
self.assertEqual(reporter.observation, mean)
class TestEvaluatorTupleData(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.batches = [
(numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'))
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModelTwoArgs(self)
self.evaluator = extensions.Evaluator(
self.iterator, self.target, converter=self.converter, device=1)
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
mean = self.evaluator.evaluate()
# The converter gets results of the iterator and the device number.
self.assertEqual(len(self.converter.args), len(self.data))
for i in range(len(self.data)):
numpy.testing.assert_array_equal(
self.converter.args[i]['batch'], self.data[i])
self.assertEqual(self.converter.args[i]['device'], 1)
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i], self.batches[i])
expect_mean = numpy.mean([numpy.sum(x) for x in self.batches])
self.assertAlmostEqual(mean['target/loss'], expect_mean, places=4)
class TestEvaluatorDictData(unittest.TestCase):
def setUp(self):
self.data = range(2)
self.batches = [
{'x': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
'y': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')}
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModelTwoArgs(self)
self.evaluator = extensions.Evaluator(
self.iterator, self.target, converter=self.converter)
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
mean = self.evaluator.evaluate()
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i][0], self.batches[i]['x'])
numpy.testing.assert_array_equal(
self.target.args[i][1], self.batches[i]['y'])
expect_mean = numpy.mean(
[numpy.sum(x['x']) + numpy.sum(x['y']) for x in self.batches])
self.assertAlmostEqual(mean['target/loss'], expect_mean, places=4)
class TestEvaluatorWithEvalFunc(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.batches = [
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModel(self)
self.evaluator = extensions.Evaluator(
self.iterator, {}, converter=self.converter,
eval_func=self.target)
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
self.evaluator.evaluate()
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i], self.batches[i])
@testing.parameterize(*testing.product({
'repeat': [True, False],
'iterator_class': [iterators.SerialIterator,
| iterators.MultiprocessIterator,
iterators.MultithreadIterator]
}))
class TestEvaluatorRepeat(unittest.TestCase):
def test_user_warning(self):
dataset = numpy.ones((4, 6))
iterator = self.iterator_class(dataset, 2, repeat=self.repeat)
| if self.repeat:
with testing.assert_warns(UserWarning):
extensions.Evaluator(iterator, {})
testing.run_module(__name__, __f |
tyarkoni/transitions | tests/test_codestyle.py | Python | mit | 710 | 0 | import unittest
import pycodestyle
from os.path import exists
class TestCodeFormat(unittest.TestCase | ):
def test_conformance(self):
"""Test that we conform to PEP-8."""
style = pycodestyle.StyleGuide(quiet=False, ignore=['E501', 'W605'])
if exists('transitions'): # when run from root directory (e.g. tox)
style.input_dir('transitions')
style.input_dir('tests')
else: # when run from te | st directory (e.g. pycharm)
style.input_dir('../transitions')
style.input_dir('.')
result = style.check_files()
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
|
cirosantilli/sulley | sulley/primitives.py | Python | gpl-2.0 | 32,873 | 0.00721 | import random
import struct
########################################################################################################################
class base_primitive (object):
'''
The primitive base class implements common functionality shared across most primitives.
'''
def __init__ (self):
self.fuzz_complete = False # this flag is raised when the mutations are exhausted.
self.fuzz_library = [] # library of static fuzz heuristics to cycle through.
self.fuzzable = True # flag controlling whether or not the given primitive is to | be fuzzed.
self.mutant_index = 0 # current mutation index into the fuzz library.
self.original_value = None # | original value of primitive.
self.rendered = "" # rendered value of primitive.
self.value = None # current value of primitive.
def exhaust (self):
'''
Exhaust the possible mutations for this primitive.
@rtype: Integer
@return: The number of mutations to reach exhaustion
'''
num = self.num_mutations() - self.mutant_index
self.fuzz_complete = True
self.mutant_index = self.num_mutations()
self.value = self.original_value
return num
def mutate (self):
'''
Mutate the primitive by stepping through the fuzz library, return False on completion.
@rtype: Boolean
@return: True on success, False otherwise.
'''
# if we've ran out of mutations, raise the completion flag.
if self.mutant_index == self.num_mutations():
self.fuzz_complete = True
# if fuzzing was disabled or complete, and mutate() is called, ensure the original value is restored.
if not self.fuzzable or self.fuzz_complete:
self.value = self.original_value
return False
# update the current value from the fuzz library.
self.value = self.fuzz_library[self.mutant_index]
# increment the mutation count.
self.mutant_index += 1
return True
def num_mutations (self):
'''
Calculate and return the total number of mutations for this individual primitive.
@rtype: Integer
@return: Number of mutated forms this primitive can take
'''
return len(self.fuzz_library)
def render (self):
'''
Nothing fancy on render, simply return the value.
'''
self.rendered = self.value
return self.rendered
def reset (self):
'''
Reset this primitive to the starting mutation state.
'''
self.fuzz_complete = False
self.mutant_index = 0
self.value = self.original_value
########################################################################################################################
class delim (base_primitive):
def __init__ (self, value, fuzzable=True, name=None):
'''
Represent a delimiter such as :,\r,\n, ,=,>,< etc... Mutations include repetition, substitution and exclusion.
@type value: Character
@param value: Original value
@type fuzzable: Boolean
@param fuzzable: (Optional, def=True) Enable/disable fuzzing of this primitive
@type name: String
@param name: (Optional, def=None) Specifying a name gives you direct access to a primitive
'''
self.value = self.original_value = value
self.fuzzable = fuzzable
self.name = name
self.s_type = "delim" # for ease of object identification
self.rendered = "" # rendered value
self.fuzz_complete = False # flag if this primitive has been completely fuzzed
self.fuzz_library = [] # library of fuzz heuristics
self.mutant_index = 0 # current mutation number
#
# build the library of fuzz heuristics.
#
# if the default delim is not blank, repeat it a bunch of times.
if self.value:
self.fuzz_library.append(self.value * 2)
self.fuzz_library.append(self.value * 5)
self.fuzz_library.append(self.value * 10)
self.fuzz_library.append(self.value * 25)
self.fuzz_library.append(self.value * 100)
self.fuzz_library.append(self.value * 500)
self.fuzz_library.append(self.value * 1000)
# try ommitting the delimiter.
self.fuzz_library.append("")
# if the delimiter is a space, try throwing out some tabs.
if self.value == " ":
self.fuzz_library.append("\t")
self.fuzz_library.append("\t" * 2)
self.fuzz_library.append("\t" * 100)
# toss in some other common delimiters:
self.fuzz_library.append(" ")
self.fuzz_library.append("\t")
self.fuzz_library.append("\t " * 100)
self.fuzz_library.append("\t\r\n" * 100)
self.fuzz_library.append("!")
self.fuzz_library.append("@")
self.fuzz_library.append("#")
self.fuzz_library.append("$")
self.fuzz_library.append("%")
self.fuzz_library.append("^")
self.fuzz_library.append("&")
self.fuzz_library.append("*")
self.fuzz_library.append("(")
self.fuzz_library.append(")")
self.fuzz_library.append("-")
self.fuzz_library.append("_")
self.fuzz_library.append("+")
self.fuzz_library.append("=")
self.fuzz_library.append(":")
self.fuzz_library.append(": " * 100)
self.fuzz_library.append(":7" * 100)
self.fuzz_library.append(";")
self.fuzz_library.append("'")
self.fuzz_library.append("\"")
self.fuzz_library.append("/")
self.fuzz_library.append("\\")
self.fuzz_library.append("?")
self.fuzz_library.append("<")
self.fuzz_library.append(">")
self.fuzz_library.append(".")
self.fuzz_library.append(",")
self.fuzz_library.append("\r")
self.fuzz_library.append("\n")
self.fuzz_library.append("\r\n" * 64)
self.fuzz_library.append("\r\n" * 128)
self.fuzz_library.append("\r\n" * 512)
########################################################################################################################
class group (base_primitive):
def __init__ (self, name, values):
'''
This primitive represents a list of static values, stepping through each one on mutation. You can tie a block
to a group primitive to specify that the block should cycle through all possible mutations for *each* value
within the group. The group primitive is useful for example for representing a list of valid opcodes.
@type name: String
@param name: Name of group
@type values: List or raw data
@param values: List of possible raw values this group can take.
'''
self.name = name
self.values = values
self.fuzzable = True
self.s_type = "group"
self.value = self.values[0]
self.original_value = self.values[0]
self.rendered = ""
self.fuzz_complete = False
self.mutant_index = 0
# sanity check that values list only contains strings (or raw data)
if self.values != []:
for val in self.values:
assert type(val) is str, "Value list may only contain strings or raw data"
def mutate (self):
'''
Move to the next item in the values list.
@rtype: False
@return: False
'''
if self.mutant_index == self.num_mutations():
self.fuzz_complete = True
# if fuzzing was disabled or complete, and mutate() is called, ensure the original value is restored.
if not self.fuzzable or self.fuzz_complete:
self.value = self.values[0]
return False
# step through the value list.
self.value = se |
lvh/txeasymail | txeasymail/interface.py | Python | isc | 541 | 0.001848 | """
Interface definition.
"""
from zope import interface
class IMailer(interface.Interface):
"""
An ob | ject that sends e-mail.
"""
def send(sender, recipient, content):
"""
S | ends the content to the recipient as the sender.
"""
class ITemplate(interface.Interface):
"""
An e-mail template.
"""
def evaluate(context):
"""
Evaluates a template given a context.
Returns a pair of headers and parts that can be used to build a MIME
message.
"""
|
llvmpy/llvmpy | llvm/tests/test_exact.py | Python | bsd-3-clause | 1,076 | 0.003717 | import unittest
from llvm.core import (Module, Type, Builder)
from .support import TestCase, tests
class TestExact(TestCase):
def make_module(self):
mod = Module.new('asdfa')
fnty = Type.function(Type.void(), [Type.int()] * 2)
func = mod.add_function(fnty, 'foo')
bldr = Builder.new(func.append_basic_block(''))
return mod, func, bldr
def has_exact(self, inst, op):
self.assertTrue(('%s exact' % op) in str(inst), "exact flag does not work")
def _test_template(self, opf, opname):
mod, func, bldr = self.make_module()
a, b = func.args
self.has_exact(opf(bldr, a, b, exact=True), opna | me)
def test_udiv_exact(self):
self._test_template(Builder.udiv, 'udiv')
def test_sdiv_exact(self):
self._test_template(Builder.sdiv, 'sdiv')
def test_lshr_exact(self):
self._test_template(Builder.lshr, 'lshr')
def test_ashr_exact(self):
self._test_template(Builder.ashr, | 'ashr')
tests.append(TestExact)
if __name__ == '__main__':
unittest.main()
|
dchaplinsky/pep.org.ua | pepdb/tasks/management/commands/load_companies.py | Python | mit | 11,229 | 0.001812 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from random import randrange
import requests
import os.path
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
import logging
from io import TextIOWrapper, open
from unicodecsv import DictReader
from itertools import islice
from zipfile import ZipFile
from cStringIO import StringIO
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from elasticsearch_dsl import Index
from elasticsearch_dsl.connections import connections
from elasticsearch.helpers import bulk
from dateutil.parser import parse
from tasks.elastic_models import EDRPOU
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("reader")
class EDRImportException(Exception):
pass
class EDR_Reader(object):
"""
Simple reader class which allows to iterate over Zipped/not Zipped XML/CSV file
"""
def __init__(self, in_file, timestamp, revision, file_type="zip"):
"""
Initializes EDR_Reader class
:param in_file: file object (zipped or not)
:type in_file: StringIO or file handler
:param timestamp: date of export of the file
:type timestamp: datetime
:param revision: revision of the dump
:type revision: string
:param file_type: type of the file (usually extension)
:type file_type: string
"""
self.file = in_file
self.file_type = file_type
self.timestamp = timestamp
self.revision = revision
def iter_docs(self):
"""
Reads input file record by record.
:returns: iterator over company records from registry
:rtype: collections.Iterable[dict]
"""
if self.file_type == "zip":
with ZipFile(self.file) as zip_arch:
for fname in zip_arch.namelist():
try:
dec_fname = unicode(fname)
except UnicodeDecodeError:
dec_fname = fname.decode("cp866")
if "uo" in dec_fname.lower() or "юо" in dec_fname.lower():
logger.info("Reading {} file from archive {}".format(dec_fname, self.file))
if dec_fname.lower().endswith(".xml"):
with zip_arch.open(fname, 'r') as fp_raw:
for l in self._iter_xml(fp_raw):
yield EDRPOU(**l).to_dict(True)
if dec_fname.lower().endswith(".csv"):
with zip_arch.open(fname, 'r') as fp_raw:
for l in self._iter_csv(fp_raw):
yield EDRPOU(**l).to_dict(True)
elif self.file_type == "xml":
for l in self._iter_xml(self.file):
yield EDRPOU(**l).to_dict(True)
elif self.file_type == "csv":
for l in self._iter_csv(self.file):
yield EDRPOU(**l).to_dict(True)
def _iter_xml(self, fp_raw):
"""
Regex magic is required t | o
| cover records that was incorrectly exported and incomplete, thus
make whole XML file invalid (happens sometime)
"""
with TextIOWrapper(fp_raw, encoding="cp1251") as fp:
mapping = {
'NAME': 'name',
'SHORT_NAME': 'short_name',
'EDRPOU': 'edrpou',
'ADDRESS': 'location',
'BOSS': 'head',
'KVED': 'company_profile',
'STAN': 'status',
'FOUNDERS': 'founders',
"Найменування": 'name',
"Скорочена_назва": 'short_name',
"Код_ЄДРПОУ": 'edrpou',
"Місцезнаходження": 'location',
"ПІБ_керівника": 'head',
"Основний_вид_діяльності": 'company_profile',
"Стан": 'status',
"C0": ""
}
content = fp.read()
if "RECORD" in content[:1000]:
regex = '<RECORD>.*?</RECORD>'
else:
regex = '<ROW>.*?</ROW>'
for i, chunk in enumerate(re.finditer(regex, content, flags=re.S | re.U)):
company = {}
founders_list = []
try:
# Fucking ET!
etree = ET.fromstring(chunk.group(0).replace("Місцезнаходження", "ADDRESS").encode("utf-8"))
except ParseError:
logger.error('Cannot parse record #{}, {}'.format(i, chunk))
continue
for el in etree.getchildren():
if el.tag == 'EDRPOU' and el.text and el.text.lstrip('0'):
company[mapping[el.tag]] = int(el.text)
elif el.tag == 'FOUNDERS':
for founder in el.getchildren():
founders_list.append(founder.text)
else:
if el.tag in mapping:
company[mapping[el.tag]] = el.text
company[mapping['FOUNDERS']] = founders_list
company["last_update"] = self.timestamp
company["file_revision"] = self.revision
if i and i % 50000 == 0:
logger.warning('Read {} companies from XML feed'.format(i))
yield company
def _iter_csv(self, fp_raw):
r = DictReader(fp_raw, delimiter=str(";"), encoding="cp1251")
mapping = {
"Найменування": 'name',
"Скорочена назва": 'short_name',
"Код ЄДРПОУ": 'edrpou',
"Місцезнаходження": 'location',
"ПІБ керівника": 'head',
"Основний вид діяльності": 'company_profile',
"Стан": 'status',
}
for i, chunk in enumerate(r):
company = {}
for k, v in chunk.items():
if k.strip():
if mapping[k] == "edrpou" and v:
company[mapping[k]] = int(v)
else:
company[mapping[k]] = v
company['founders'] = []
company["last_update"] = self.timestamp
company["file_revision"] = self.revision
if i and i % 50000 == 0:
logger.warning('Read {} companies from CSV feed'.format(i))
yield company
class Command(BaseCommand):
help = ('Loads XML with data from registry of companies of Ukraine into '
'elastic index for further matching with companies in DB')
def add_arguments(self, parser):
parser.add_argument(
'--revision',
help='EDR dump revision to retrieve (leave empty to retrieve latest)',
)
parser.add_argument(
'--guid',
default="06bbccbd-e19c-40d5-9e18-447b110c0b4c",
help='Dataset to retrieve',
)
parser.add_argument(
'--filename',
help='Filename of the dump to load file manually',
)
parser.add_argument(
'--dump_date',
help='Date of dump, obtained manually, day first',
)
def handle(self, *args, **options):
self.proxies = {}
if hasattr(settings, "PROXY"):
self.proxies["http"] = settings.PROXY
self.proxies["https"] = settings.PROXY
GUID = options["guid"]
fp = None
if not options["revision"]:
latest = EDRPOU.search().aggs.metric("max_last_update", "max", field="last_update")[:1].execute()
if latest:
update_after = latest[0].last_update
self.stdout.write("Only loading dumps after {}".format(update_after))
else:
raise EDRImportException("Current index is empty, please run manual import. For fuck sake")
if not options["filename"]:
data_url = None
timestamp = None
revision = None
try:
respons |
luanfonceca/econet | econet/accounts/migrations/0001_initial.py | Python | mit | 5,920 | 0.007264 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'User'
db.create_table(u'accounts_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.Boole | anField')(default=False)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=254)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, null=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=254, null=True)),
('date_joined', self.gf('django.db.models.fields.DateTime | Field')(default=datetime.datetime.now)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_colector', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'accounts', ['User'])
# Adding M2M table for field groups on 'User'
m2m_table_name = db.shorten_name(u'accounts_user_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'accounts.user'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
m2m_table_name = db.shorten_name(u'accounts_user_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'accounts.user'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['user_id', 'permission_id'])
def backwards(self, orm):
# Deleting model 'User'
db.delete_table(u'accounts_user')
# Removing M2M table for field groups on 'User'
db.delete_table(db.shorten_name(u'accounts_user_groups'))
# Removing M2M table for field user_permissions on 'User'
db.delete_table(db.shorten_name(u'accounts_user_user_permissions'))
models = {
u'accounts.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_colector': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] |
iwob/evoplotter | app/cdgp2/cdgp_processor.py | Python | mit | 20,853 | 0.005946 | from evoplotter import utils
from evoplotter import printer
from evoplotter import reporting
from evoplotter.dims import *
import numpy
def load_correct_props(folders, name = ""):
props0 = utils.load_properties_dirs(folders, exts=[".txt.cdgp"])
all_logs = utils.load_properties_dirs(folders, exts=[".txt.cdgp", ".txt"])
print("\n****** Loading props: " + name)
def is_correct(p):
return "status" in p and (p["status"] == "completed" or p["status"] == "initialized") and\
"result.best.eval" in p and "benchmark" in p
def is_obsolete(p):
return False #p["searchAlgorithm"].endswith("SteadyState") and\
#"info.steadyStateWithZeroInitialTests" not in p
# Printing names of files which finished with error status or are incomplete.
props_errors = [p for p in props0 if not is_correct(p)]
if len(props_errors) > 0:
print("Files with error status:")
for p in props_errors:
if "thisFileName" in p:
print(p["thisFileName"])
else:
print("'thisFileName' not specified! Printing content instead: " + str(p))
# Filtering props so only correct ones are left
props = [p for p in props0 if is_correct(p) and not is_obsolete(p)]
print("Loaded: {0} correct property files, {1} incorrect; All log files: {2}".format(len(props), len(props_errors), len(all_logs)))
return props
def produce_status_matrix(grid, props):
text = "["
for config in grid:
numRuns = len(config.filter_props(props))
benchmark = config.stored_values["benchmark"]
method = config.stored_values["method"]
sa = config.stored_values["searchAlgorithm"]
text += "('{0}', '{1}', '{2}', {3}), ".format(benchmark, method, sa, numRuns)
return text + "]"
def p_GP(p):
return p["searchAlgorithm"] == "GP"
def p_GPSteadyState(p):
return p["searchAlgorithm"] == "GPSteadyState"
def p_Lexicase(p):
return p["searchAlgorithm"] == "Lexicase"
def p_LexicaseSteadyState(p):
return p["searchAlgorithm"] == "LexicaseSteadyState"
def p_method0(p):
return p["method"] == "CDGP"
def p_method1(p):
return p["method"] == "CDGPcons"
def p_method2(p):
return p["method"] == "GPR"
def p_Generational(p):
return p["searchAlgorithm"] == "Lexicase" or p["searchAlgorithm"] == "GP"
def p_SteadyState(p):
return p["searchAlgorithm"] == "LexicaseSteadyState" or p["searchAlgorithm"] == "GPSteadyState"
def p_sel_lexicase(p):
return p["searchAlgorithm"] == "LexicaseSteadyState" or p["searchAlgorithm"] == "Lexicase"
def p_sel_tourn(p):
return p["searchAlgorithm"] == "GPSteadyState" or p["searchAlgorithm"] == "GP"
d1 = "benchmarks/LIA/cdgp_gecco17/other/"
d2 = "benchmarks/LIA/cdgp_gecco17/"
benchmarks_simple_names = {d1 + "ArithmeticSeries3.sl": "IsSeries3",
d1 + "ArithmeticSeries4.sl": "IsSeries4",
d1 + "CountPositive2.sl": "CountPos2",
d1 + "CountPositive3.sl": "CountPos3",
d1 + "Median3.sl": "Median3",
d1 + "Range3.sl": "Range3",
d1 + "SortedAscending4.sl": "IsSorted4",
d2 + "fg_array_search_2.sl": "Search2",
d2 + "fg_array_search_4.sl": "Search4",
d2 + "fg_array_sum_2_15.sl": "Sum2",
d2 + "fg_array_sum_4_15.sl": "Sum4",
d2 + "fg_max2.sl": "Max2",
d2 + "fg_max4.sl": " | Max4"}
dim_method = | Dim([Config("CDGP non-conservative", p_method0, method="CDGP"),
Config("CDGP conservative", p_method1, method="CDGPcons"),
Config("GPR", p_method2, method="GPR")])
dim_sa = Dim([Config("GP", p_GP, searchAlgorithm="GP"),
Config("GPSS", p_GPSteadyState, searchAlgorithm="GPSteadyState"),
Config("Lex", p_Lexicase, searchAlgorithm="Lexicase"),
Config("LexSS", p_LexicaseSteadyState, searchAlgorithm="LexicaseSteadyState")])
dim_ea_type = Dim([Config("Gener.", p_Generational),
Config("SteadySt.", p_SteadyState)])
dim_sel = Dim([Config("$Tour$", p_sel_tourn),
Config("$Lex$", p_sel_lexicase)])
dim_sa_ss = Dim([
Config("GPSS", p_GPSteadyState, searchAlgorithm="GPSteadyState"),
Config("LexSS", p_LexicaseSteadyState, searchAlgorithm="LexicaseSteadyState")])
# dim_sa = Dim([Config("$CDGP$", p_GP),
# Config("$CDGP^{ss}$", p_GPSteadyState),
# Config("$CDGP_{lex}$", p_Lexicase),
# Config("$CDGP_{lex}^{ss}$", p_LexicaseSteadyState)])
def is_optimal_solution(p):
return "result.best.isOptimal" in p and p["result.best.isOptimal"] == "true"
def get_num_optimal(props):
props2 = [p for p in props if is_optimal_solution(p)]
return len(props2)
def get_num_computed(filtered):
return len(filtered)
def fun_successRates_full(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_optimal(filtered)
return "{0}/{1}".format(str(num_opt), str(len(filtered)))
def fun_successRates(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_optimal(filtered)
sr = float(num_opt) / float(len(filtered))
return "{0}".format("%0.2f" % sr)
def get_stats_size(props):
vals = [float(p["result.best.size"]) for p in props]
if len(vals) == 0:
return "-"#-1.0, -1.0
else:
return "%0.1f" % numpy.mean(vals)#, numpy.std(vals)
def get_stats_maxSolverTime(props):
if len(props) == 0:
return "-"
times = []
for p in props:
timesMap = p["cdgp.solverAllTimesCountMap"]
parts = timesMap.split(", ")[-1].split(",")
times.append(float(parts[0].replace("(", "")))
return max(times)
def get_stats_avgSolverTime(props):
if len(props) == 0:
return "-"
sum = 0.0
sumWeights = 0.0
for p in props:
timesMap = p["cdgp.solverAllTimesCountMap"]
pairs = timesMap.split(", ")
if len(pairs) == 0:
continue
for x in pairs:
time = float(x.split(",")[0].replace("(", ""))
weight = float(x.split(",")[1].replace(")", ""))
sum += time * weight
sumWeights += weight
if sumWeights == 0.0:
return "%0.3f" % 0.0
else:
return "%0.3f" % (sum / sumWeights)
def get_avgSolverTotalCalls(props):
if len(props) == 0:
return "-"
vals = [float(p["cdgp.solverTotalCalls"]) for p in props]
return "%d" % round(numpy.mean(vals))
def get_numSolverCallsOverXs(props):
if len(props) == 0:
return "-"
TRESHOLD = 0.5
sum = 0
for p in props:
timesMap = p["cdgp.solverAllTimesCountMap"]
pairs = timesMap.split(", ")
if len(pairs) == 0:
continue
for x in pairs:
time = float(x.split(",")[0].replace("(", ""))
if time > TRESHOLD:
# print("Name of file: " + p["thisFileName"])
weight = int(x.split(",")[1].replace(")", ""))
sum += weight
return sum
def get_avg_totalTests(props):
vals = [float(p["cdgp.totalTests"]) for p in props]
if len(vals) == 0:
return "-" # -1.0, -1.0
else:
x = numpy.mean(vals)
if x < 1e-5:
x = 0.0
return "%0.1f" % x
def get_avg_fitness(props):
vals = []
for p in props:
if "result.best.passedTestsRatio" in p:
ratio = float(p["result.best.passedTestsRatio"])
vals.append(ratio)
else:
raise Exception("Information about fitness is unavailable!")
if len(vals) == 0:
return "-" # -1.0, -1.0
else:
return "%0.2f" % numpy.mean(vals) # , numpy.std(vals)
def get_avg_runtime_helper(vals):
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
return "%0.1f" % numpy.mean(vals) # , numpy.std(vals)
def get_avg_runtimeOnlySuccessful(props):
if len(props) == 0:
return "-"
else:
vals = [float(p["result.totalTimeSyste |
tejal29/pants | src/python/pants/backend/jvm/tasks/eclipse_gen.py | Python | apache-2.0 | 6,935 | 0.00721 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.tasks.ide_gen import IdeGen
from pants.base.build_environment import get_buildroot
from pants.base.generator import Generator, TemplateData
from pants.util.dirutil import safe_delete, safe_mkdir, safe_open
_TEMPLATE_BASEDIR = os.path.join('templates', 'eclipse')
_VERSIONS = {
'3.5': '3.7', # 3.5-3.7 are .project/.classpath compatible
'3.6': '3.7',
'3.7': '3.7',
}
_SETTINGS = (
'org.eclipse.core.resources.prefs',
'org.eclipse.jdt.ui.prefs',
)
class EclipseGen(IdeGen):
@classmethod
def register_options(cls, register):
super(EclipseGen, cls).register_options(register)
register('--version', choices=sorted(list(_VERSIONS.keys())), default='3.6',
help='The Eclipse version the project configuration should be generated for.')
def __init__(self, *args, **kwargs):
super(EclipseGen, self).__init__(*args, **kwargs)
version = _VERSIONS[self.get_options().version]
self.project_template = os.path.join(_TEMPLATE_BASEDIR, 'project-%s.mustache' % version)
self.classpath_template = os.path.join(_TEMPLATE_BASEDIR, 'classpath-%s.mustache' % version)
self.apt_template = os.path.join(_TEMPLATE_BASEDIR, 'factorypath-%s.mustache' % version)
self.pydev_template = os.path.join(_TEMPLATE_BASEDIR, 'pydevproject-%s.mustache' % version)
self.debug_template = os.path.join(_TEMPLATE_BASEDIR, 'debug-launcher-%s.mustache' % version)
self.coreprefs_template = os.path.join(_TEMPLATE_BASEDIR,
'org.eclipse.jdt.core.prefs-%s.mustache' % version)
self.project_filename = os.path.join(self.cwd, '.project')
self.classpath_filename = os.path.join(self.cwd, '.classpath')
self.apt_filename = os.path.join(self.cwd, '.factorypath')
self.pydev_filename = os.path.join(self.cwd, '.pydevproject')
self.coreprefs_filename = os.path.join(self.cwd, '.settings', 'org.eclipse.jdt.core.prefs')
def generate_project(self, project):
def linked_folder_id(source_set):
return source_set.source_base.replace(os.path.sep, '.')
def base_path(source_set):
return os.path.join(source_set.root_dir, source_set.source_base)
def create_source_base_template(source_set):
source_base = base_path(source_set)
return source_base, TemplateData(
id=linked_folder_id(source_set),
path=source_base
)
source_bases = dict(map(create_source_base_template, project.sources))
if project.has_python:
source_bases.update(map(create_source_base_template, project.py_sources))
source_bases.update(map(create_source_base_template, project.py_libs))
def create_source_template(base_id, includes=None, excludes=None):
return TemplateData(
base=base_id,
includes='|'.join(OrderedSet(includes)) if includes else None,
excludes='|'.join(OrderedSet(excludes)) if excludes else None,
)
def create_sourcepath(base_id, sources):
def normalize_path_pattern(path):
return '%s/' % path if not path.endswith('/') else path
includes = [normalize_path_pattern(src_set.path) for src_set in sources if src_set.path]
excludes = []
for source_set in sources:
excludes.extend(normalize_path_pattern(exclude) for exclude in source_set.excludes)
return create_source_template(base_id, includes, excludes)
pythonpaths = []
if project.has_python:
for source_set in project.py_sources:
pythonpaths.append(create_source_template(linked_folder_id(source_set)))
for source_set in project.py_libs:
lib_path = source_set.path if source_set.path.endswith('.egg') else '%s/' % source_set.path
pythonpaths.append(create_source_template(linked_folder_id(source_set),
includes=[lib_path]))
configured_project = TemplateData(
name=self.project_name,
java=TemplateData(
jdk=self.java_jdk,
language_level=('1.%d' % self.java_language_level)
),
python=project.has_python,
scala=project.has_scala and not project.skip_scala,
source_bases=source_bases.values(),
pythonpaths=pythonpaths,
debug_port=project.debug_port,
)
outdir = os.path.abspath(os.path.join(self.gen_project_workdir, 'bin'))
safe_mkdir(outdir)
source_sets = defaultdict(OrderedSet) # base_id -> source_set
for source_set in project.sources:
source_sets[linked_folder_id(source_set)].add(source_set)
sourcepaths = [create_sourcepath(base_id, sources) for base_id, sources in source_sets.items()]
libs = list(project.internal_jars)
libs.extend(project.external_jars)
configured_classpath = TemplateData(
sourcepaths=sourcepaths,
has_tests=project.has_tests,
libs=libs,
scala=project.has_scala,
# Eclipse insists the outdir | be a relative path unlike other paths
outdir=os.path.relpath(outdir, get_buildroot()),
)
def apply_template(output_path, template_relpath, **template_data):
with s | afe_open(output_path, 'w') as output:
Generator(pkgutil.get_data(__name__, template_relpath), **template_data).write(output)
apply_template(self.project_filename, self.project_template, project=configured_project)
apply_template(self.classpath_filename, self.classpath_template, classpath=configured_classpath)
apply_template(os.path.join(self.gen_project_workdir,
'Debug on port %d.launch' % project.debug_port),
self.debug_template, project=configured_project)
apply_template(self.coreprefs_filename, self.coreprefs_template, project=configured_project)
for resource in _SETTINGS:
with safe_open(os.path.join(self.cwd, '.settings', resource), 'w') as prefs:
prefs.write(pkgutil.get_data(__name__, os.path.join(_TEMPLATE_BASEDIR, resource)))
factorypath = TemplateData(
project_name=self.project_name,
# The easiest way to make sure eclipse sees all annotation processors is to put all libs on
# the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
jarpaths=libs
)
apply_template(self.apt_filename, self.apt_template, factorypath=factorypath)
if project.has_python:
apply_template(self.pydev_filename, self.pydev_template, project=configured_project)
else:
safe_delete(self.pydev_filename)
print('\nGenerated project at %s%s' % (self.gen_project_workdir, os.sep))
|
manahl/arctic | arctic/store/audit.py | Python | lgpl-2.1 | 6,512 | 0.005375 | """
Handle audited data changes.
"""
import logging
from functools import partial
from pymongo.errors import OperationFailure
from .versioned_item import VersionedItem, ChangedItem
from .._util import are_equals
from ..decorators import _get_host
from ..exceptions import NoDataFoundException, ConcurrentModificationException
logger = logging.getLogger(__name__)
class DataChange(object):
"""
Object representing incoming data change
"""
def __init__(self, date_range, new_data):
self.date_range = date_range
| self.new_data = new_data
class ArcticTransaction(object):
"""Use this context manager if you want to modify data in a version store while ensuring that no other writes
interfere with your own.
To use, base your modifications on the `base_ts` context manager field and put your newly created timeseries and
call the `write` method of the context manager to output changes. The changes will only be written when the block
exits.
NB changes may be audited.
Exa | mple:
-------
with ArcticTransaction(Arctic('hostname')['some_library'], 'symbol') as mt:
ts_version_info = mt.base_ts
# do some processing, come up with a new ts for 'symbol' called new_symbol_ts, presumably based on ts_version_info.data
mt.write('symbol', new_symbol_ts, metadata=new_symbol_metadata)
The block will raise a ConcurrentModificationException if an inconsistency has been detected. You will have to
retry the whole block should that happens, as the assumption is that you need to base your changes on a different
starting timeseries.
"""
def __init__(self, version_store, symbol, user, log, modify_timeseries=None, audit=True,
*args, **kwargs):
"""
Parameters
----------
version_store: `VersionStore` Arctic Library
Needs to support write, read, list_versions, _delete_version this is the underlying store that we'll
be securing for write
symbol: `str`
symbol name for the item that's being modified
user: `str`
user making the change
log: `str`
Log message for the change
modify_timeseries:
if given, it will check the assumption that this is the latest data available for symbol in version_store
Should not this be the case, a ConcurrentModificationException will be raised. Use this if you're
interacting with code that read in the data already and for some reason you cannot refactor the read-write
operation to be contained within this context manager
audit: `bool`
should we 'audit' the transaction. An audited write transaction is equivalent to a snapshot
before and after the data change - i.e. we won't prune versions of the data involved in an
audited transaction. This can be used to ensure that the history of certain data changes is
preserved indefinitely.
all other args:
Will be passed into the initial read
"""
self._version_store = version_store
self._symbol = symbol
self._user = user
self._log = log
self._audit = audit
logger.info("MT: {}@{}: [{}] {}: {}".format(_get_host(version_store).get('l'),
_get_host(version_store).get('mhost'),
user, log, symbol))
try:
self.base_ts = self._version_store.read(self._symbol, *args, **kwargs)
except NoDataFoundException:
versions = [x['version'] for x in self._version_store.list_versions(self._symbol, latest_only=True)]
versions.append(0)
self.base_ts = VersionedItem(symbol=self._symbol, library=None,
version=versions[0], metadata=None, data=None, host=None)
except OperationFailure:
# TODO: Current errors in mongo "Incorrect Number of Segments Returned"
# This workaround should be removed once underlying problem is resolved.
self.base_ts = self._version_store.read_metadata(symbol=self._symbol)
if modify_timeseries is not None and not are_equals(modify_timeseries, self.base_ts.data):
raise ConcurrentModificationException()
self._do_write = False
def change(self, symbol, data_changes, **kwargs):
"""
Change, and audit 'data' under the specified 'symbol' name to this library.
Parameters
----------
symbol: `str`
symbol name for the item
data_changes: `list DataChange`
list of DataChange objects
"""
pass
def write(self, symbol, data, prune_previous_version=True, metadata=None, **kwargs):
"""
Records a write request to be actioned on context exit. Takes exactly the same parameters as the regular
library write call.
"""
if data is not None:
# We only write data if existing data is None or the Timeseries data has changed or metadata has changed
if self.base_ts.data is None or not are_equals(data, self.base_ts.data) or metadata != self.base_ts.metadata:
self._do_write = True
self._write = partial(self._version_store.write, symbol, data, prune_previous_version=prune_previous_version,
metadata=metadata, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
if self._do_write:
written_ver = self._write()
versions = [x['version'] for x in self._version_store.list_versions(self._symbol)]
versions.append(0)
versions.reverse()
base_offset = versions.index(self.base_ts.version)
new_offset = versions.index(written_ver.version)
if len(versions[base_offset: new_offset + 1]) != 2:
self._version_store._delete_version(self._symbol, written_ver.version)
raise ConcurrentModificationException("Inconsistent Versions: {}: {}->{}".format(
self._symbol, self.base_ts.version, written_ver.version))
changed = ChangedItem(self._symbol, self.base_ts, written_ver, None)
if self._audit:
self._version_store._write_audit(self._user, self._log, changed)
|
nim65s/django-cineclub | setup.py | Python | gpl-3.0 | 1,266 | 0.00079 | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), "README.md")) as readme:
README = readme.read()
with open(os.path.join(os.path.dirname(__file__), "requirements.in")) as requirements:
REQUIREMENTS = [
req.split("#egg=")[1] if "#egg=" in req else req
for req in requirements.readlines()
]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-cineclub",
version="3.0.0",
packages=["cine"],
install_requires=REQUIREMENTS,
include_package_data=True,
license="GPL License",
description="A Django app to manage a cineclub.",
long_description=README,
url="https://saurel.me/",
author="Guilhem Saurel",
autho | r_email="webmaster@saurel.me",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: GPL License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
"Topic :: I | nternet :: WWW/HTTP :: Dynamic Content",
],
)
|
puhitaku/AutoChime | ntptime_kai.py | Python | mit | 1,216 | 0.002467 | # Copy of ntptime.py with millisecond-accurate sync
try:
import usocket as socket
except:
import socket
try:
import ustruct as struct
except:
import struct
# (date(2000, 1, 1) - date(1900, 1, 1)).days * 24*60*60
NTP_DELTA = 3155673600
MILLIS_PER_SECOND = 1000
host = "pool.ntp.org"
def time(ms_accuracy=False):
NTP_QUERY = bytearray(48)
NTP_QUERY[0] = 0x1b
addr = socket.getaddrinfo(host, 123)[0][-1]
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(1)
res = s.sendto(NTP_QUERY, addr)
msg = s.recv(48)
s.close()
sec = struct.unpack("!I", msg[40:44])[0] - NTP_DELTA
frac = struct.unpack("!I", msg[44:48])[0] * MILLIS_PER_SECOND >> 32
if ms_accuracy:
return sec, frac
return sec
# There's currently no timezone suppor | t in MicroPython, so
# utime.localtime() will return | UTC time (as if it was .gmtime())
def settime(ms_accuracy=False):
import machine
import utime
if ms_accuracy:
t, ms = time(ms_accuracy=True)
else:
t, ms = time(ms_accuracy=False), 0
tm = utime.localtime(t)
tm = tm[0:3] + (0,) + tm[3:6] + (ms,)
r = machine.RTC()
r.datetime(tm)
print(r.datetime())
|
jayceyxc/hue | desktop/core/ext-py/navoptapi-0.1.0/navoptapi/old_serialize.py | Python | apache-2.0 | 3,364 | 0.000595 | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict # Python 2.6
import json
class Serializer(object):
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
# Don't serialize any parameter with a None value.
filtered_parameters = OrderedDict(
(k, v) for k, v in parameters.items() if v is not None)
serialized = {}
# serialized['method'] = operation_model.http['method']
# serialized['headers'] = {'Content-Type': 'application/json'}
# serialized['url_path'] = operation_model.http['requestUri']
serialized_body = OrderedDict()
if len(filtered_parameters) != 0:
self._serialize(serialized_body, filtered_parameters, None)
serialized['body'] = json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
# serialize_method_name = '_serialize_type_%s' % shape.type_name
# method = getattr(self, serialize_method_name, self._default_serialize)
| self._default_serialize(serialized, value, shape, key)
def _serialize_type_object(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive call, so we
# need to add a new child dict as the value of the passed in dict.
# Below we will add all the structure members to the new serialized
# dictionary we just created.
serialized[key] = OrderedDict()
| serialized = serialized[key]
for member_key, member_value in value.items():
member_shape = shape.members[member_key]
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_array(self, serialized, value, shape, key):
array_obj = []
serialized[key] = array_obj
for array_item in value:
wrapper = {}
# JSON list serialization is the only case where we aren't setting
# a key on a dict. We handle this by using a __current__ key on a
# wrapper dict to serialize each list item before appending it to
# the serialized list.
self._serialize(wrapper, array_item, shape.member, "__current__")
array_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
if key:
serialized[key] = value
else:
for member_key, member_value in value.items():
serialized[member_key] = member_value
|
mediatum/mediatum | web/edit/modules/content.py | Python | gpl-3.0 | 5,793 | 0.002935 | """
mediatum - a multimedia content repository
Copyright (C) 2009 Arne Seifert <seiferta@in.tum.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
from core import db
from contenttypes import Data, Home, Collection, Collections
from core.systemtypes import Root
from web.edit.edit_common import showdir, shownav, showoperations, default_edit_nodes_per_page,\
edit_node_per_page_values, searchbox_navlist_height
from web.frontend.frame import render_edit_search_box
from utils.utils import dec_entry_log
from core.translation import translate, lang, t
from schema.schema import get_permitted_schemas
from web.edit.edit import get_ids_from_req
from web.edit.edit_common import get_searchparams, delete_g_nodes_entry
import urllib
import web.common.sort as _sort
q = db.query
def elemInList(elemlist, name):
for item in elemlist:
if item.getName() == name:
return True
return False
@dec_entry_log
def getContent(req, ids):
def getDatatypes(_req, _schemes):
_dtypes = []
datatypes = | Data.get_all_datatypes()
for scheme in _schemes:
for dtype in scheme.getDatatypes():
if dtype not in _dtypes:
for _t in datatypes:
# if _t.getName() == dtype and not elemInList(dtypes, _t.getName()):
dtypes.append(_t)
_dtypes.sort(lambda x, y: cmp(translate(x.getLongName(), request=_req).lower(), translate(y.getLongName(), request=req).lower()))
return _dtypes |
def get_ids_from_query():
ids = get_ids_from_req(req)
return ",".join(ids)
node = q(Data).get(long(ids[0]))
if "action" in req.params:
if req.params.get('action') == "resort":
field = req.params.get('value', '').strip()
res = showdir(req, node, sortfield=field)
res = json.dumps({'state': 'ok', 'values': res}, ensure_ascii=False)
req.write(res)
return None
elif req.params.get('action') == "save": # save selection for collection
field = req.params.get('value')
if field.strip() == "" or field.strip() == "off":
if node.get('sortfield'):
node.removeAttribute('sortfield')
else:
node.set('sortfield', field)
nodes_per_page = req.params.get('nodes_per_page')
if nodes_per_page.strip() == "":
if node.get('nodes_per_page'):
node.removeAttribute('nodes_per_page')
else:
node.set('nodes_per_page', nodes_per_page)
req.write(json.dumps({'state': 'ok'}))
db.session.commit()
return None
if node.isContainer():
schemes = []
dtypes = []
item_count = []
items = showdir(req, node, item_count=item_count)
nav = shownav(req, node)
v = {"operations": showoperations(req, node), "items": items, "nav": nav}
if node.has_write_access():
schemes = get_permitted_schemas()
dtypes = getDatatypes(req, schemes)
if "globalsort" in req.params:
node.set("sortfield", req.params.get("globalsort"))
if req.params.get("sortfield", "") != "":
v['collection_sortfield'] = req.params.get("sortfield")
else:
v['collection_sortfield'] = node.get("sortfield")
if req.params.get("nodes_per_page"):
v['npp_field'] = req.params.get("nodes_per_page", default_edit_nodes_per_page)
else:
v['npp_field'] = node.get("nodes_per_page")
if not v['npp_field']:
v['npp_field'] = default_edit_nodes_per_page
search_html = render_edit_search_box(node, lang(req), req, edit=True)
searchmode = req.params.get("searchmode")
navigation_height = searchbox_navlist_height(req, item_count)
if not isinstance(node, (Root, Collections, Home)):
sortchoices = _sort.get_sort_choices(container=node,off="off",t_off=t(req, "off"),t_desc=t(req, "descending"))
else:
sortchoices = ()
count = item_count[0] if item_count[0] == item_count[1] else "%d from %d" % (item_count[0], item_count[1])
v['sortchoices'] = tuple(sortchoices)
v['types'] = dtypes
v['schemes'] = schemes
v['id'] = ids[0]
v['count'] = count
v['language'] = lang(req)
v['search'] = search_html
v['navigation_height'] = navigation_height
v['parent'] = node.id
v['query'] = req.query.replace('id=','src=')
searchparams = get_searchparams(req)
searchparams = {k: unicode(v).encode("utf8") for k, v in searchparams.items()}
v['searchparams'] = urllib.urlencode(searchparams)
v['get_ids_from_query'] = get_ids_from_query
v['edit_all_objects'] = t(lang(req), "edit_all_objects").format(item_count[1])
v['t'] = t
res = req.getTAL("web/edit/modules/content.html", v, macro="edit_content")
delete_g_nodes_entry(req)
return res
if hasattr(node, "editContentDefault"):
return node.editContentDefault(req)
return ""
|
ingted/crmsh | modules/xmlutil.py | Python | gpl-2.0 | 37,119 | 0.001051 | # Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# See COPYING for license information.
import os
import subprocess
from lxml import etree, doctestcompare
import copy
import bz2
from collections import defaultdict
from . import config
from . import options
from . import schema
from . import constants
from .msg import common_err, common_error, common_debug, cib_parse_err, err_buf
from . import userdir
from . import utils
from .utils import add_sudo, str2file, str2tmp, get_boolean
from .utils import get_stdout, stdout2list, crm_msec, crm_time_cmp
from .utils import olist, get_cib_in_use, get_tempdir
def xmlparse(f):
try:
cib_elem = etree.parse(f).getroot()
except Exception, msg:
common_err("cannot parse xml: %s" % msg)
return None
return cib_elem
def file2cib_elem(s):
try:
f = open(s, 'r')
except IOError, msg:
common_err(msg)
return None
cib_elem = xmlparse(f)
f.close()
if options.regression_tests and cib_elem is None:
print "Failed to read CIB from file: %s" % (s)
return cib_elem
def compressed_file_to_cib(s):
try:
if s.endswith('.bz2'):
import bz2
f = bz2.BZ2File(s)
elif s.endswith('.gz'):
import gzip
f = gzip.open(s)
else:
f = open(s)
except IOError, msg:
common_err(msg)
return None
cib_elem = xmlparse(f)
if options.regression_tests and cib_elem is None:
print "Failed to read CIB from file %s" % (s)
f.seek(0)
print f.read()
f.close()
return cib_elem
cib_dump = "cibadmin -Ql"
def sudocall(cmd):
cmd = add_sudo(cmd)
if options.regression_tests:
print ".EXT", cmd
| p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
outp, errp = p.communicate()
p.wait()
return p.returncode, outp, errp
except IOError, msg:
common_err("running %s: %s" % (cmd, msg))
return None, None, None
def cibdump2file(fname):
_, outp, _ = sudocall(cib_dump)
if outp is not None:
return str2file(outp, fname)
return None
def cibdump2tmp():
try:
_, outp, _ = sudocall(cib_ | dump)
if outp is not None:
return str2tmp(outp)
except IOError, msg:
common_err(msg)
return None
def cibtext2elem(cibtext):
"""
Convert a text format CIB to
an XML tree.
"""
try:
return etree.fromstring(cibtext)
except Exception, err:
cib_parse_err(err, cibtext)
return None
def cibdump2elem(section=None):
if section:
cmd = "%s -o %s" % (cib_dump, section)
else:
cmd = cib_dump
rc, outp, errp = sudocall(cmd)
if rc == 0:
return cibtext2elem(outp)
elif rc != constants.cib_no_section_rc:
common_error("running %s: %s" % (cmd, errp))
return None
def read_cib(fun, params=None):
cib_elem = fun(params)
if cib_elem is None or cib_elem.tag != "cib":
return None
return cib_elem
def sanity_check_nvpairs(id, node, attr_list):
rc = 0
for nvpair in node.iterchildren("nvpair"):
n = nvpair.get("name")
if n and n not in attr_list:
common_err("%s: attribute %s does not exist" % (id, n))
rc |= utils.get_check_rc()
return rc
def sanity_check_meta(id, node, attr_list):
rc = 0
if node is None or not attr_list:
return rc
for c in node.iterchildren():
if c.tag == "meta_attributes":
rc |= sanity_check_nvpairs(id, c, attr_list)
return rc
def get_interesting_nodes(node, nodes_l):
'''
All nodes which can be represented as CIB objects.
'''
for c in node.iterchildren():
if is_cib_element(c):
nodes_l.append(c)
get_interesting_nodes(c, nodes_l)
return nodes_l
def get_top_cib_nodes(node, nodes_l):
'''
All nodes which can be represented as CIB objects, but not
nodes which are children of other CIB objects.
'''
for c in node.iterchildren():
if is_cib_element(c):
nodes_l.append(c)
else:
get_top_cib_nodes(c, nodes_l)
return nodes_l
class RscState(object):
'''
Get the resource status and some other relevant bits.
In particular, this class should allow for a bit of caching
of cibadmin -Q -o resources output in case we need to check
more than one resource in a row.
'''
rsc_status = "crm_resource -W -r '%s'"
def __init__(self):
self.current_cib = None
self.rsc_elem = None
self.prop_elem = None
self.rsc_dflt_elem = None
def _init_cib(self):
cib = cibdump2elem("configuration")
self.current_cib = cib
self.rsc_elem = get_first_conf_elem(cib, "resources")
self.prop_elem = get_first_conf_elem(cib, "crm_config/cluster_property_set")
self.rsc_dflt_elem = get_first_conf_elem(cib, "rsc_defaults/meta_attributes")
def rsc2node(self, id):
'''
Get a resource XML element given the id.
NB: this is called from almost all other methods.
Hence we initialize the cib here. CIB reading is
expensive.
'''
if self.rsc_elem is None:
self._init_cib()
if self.rsc_elem is None:
return None
# does this need to be optimized?
expr = './/*[@id="%s"]' % id
try:
return self.rsc_elem.xpath(expr)[0]
except (IndexError, AttributeError):
return None
def is_ms(self, id):
'''
Test if the resource is master-slave.
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return False
return is_ms(rsc_node)
def rsc_clone(self, id):
'''
Return id of the clone/ms containing this resource
or None if it's not cloned.
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return None
pnode = rsc_node.getparent()
if pnode is None:
return None
if is_group(pnode):
pnode = pnode.getparent()
if is_clonems(pnode):
return pnode.get("id")
return None
def is_managed(self, id):
'''
Is this resource managed?
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return False
# maintenance-mode, if true, overrides all
attr = get_attr_value(self.prop_elem, "maintenance-mode")
if attr and is_xs_boolean_true(attr):
return False
# then check the rsc is-managed meta attribute
rsc_meta_node = get_rsc_meta_node(rsc_node)
attr = get_attr_value(rsc_meta_node, "is-managed")
if attr:
return is_xs_boolean_true(attr)
# then rsc_defaults is-managed attribute
attr = get_attr_value(self.rsc_dflt_elem, "is-managed")
if attr:
return is_xs_boolean_true(attr)
# finally the is-managed-default property
attr = get_attr_value(self.prop_elem, "is-managed-default")
if attr:
return is_xs_boolean_true(attr)
return True
def is_running(self, id):
'''
Is this resource running?
'''
if not is_live_cib():
return False
test_id = self.rsc_clone(id) or id
rc, outp = get_stdout(self.rsc_status % test_id, stderr_on=False)
return outp.find("running") > 0 and outp.find("NOT") == -1
def is_group(self, id):
'''
Test if the resource is a group
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return False
return is_group(rsc_node)
def can_delete(self, id):
'''
Can a resource be deleted?
The order below is important!
'''
return not (self.is_running(id) and not self.is_group(id) and self.is_managed(id))
def resources_xml():
return cibdump2elem("resources")
def is_normal_node(n):
return n.tag == "node" and |
mattkoskela/py-pdf-collate | pdf_collate/pdf.py | Python | mit | 1,217 | 0.000822 | ##
# This file contains an class for pdf_collate
#
# @package pdf_collate
# @author Matt Koskela <mattkoskela@gmail.com>
##
"""
Example.py
This file contains an example class with a basic function.
"""
import os
import hashlib
from PyPDF2 import PdfFileReader, PdfFileWriter
class PDF():
"""
This is the example Math class in pybase
"""
def __init__(self):
pass
def collate_pages(self, source_filename, output_filename=""):
if not output_filename:
random_filename = os.getcwd() + "/" + hashlib.md5().hexdigest() + ".pdf"
output_filename = random_filename
else:
random_filename = None
| pdfTest = PdfFileReader(file(source_filename, "rb"))
output = PdfFileWriter()
number_of_pages = pdfTest.getNumPages()
for page_number in range(0, number_of_pages/2):
output.addPage(pdfTest.getPage(page_ | number))
output.addPage(pdfTest.getPage(page_number + number_of_pages/2))
outputStream = file(output_filename, "wb")
output.write(outputStream)
outputStream.close()
if random_filename:
os.rename(random_filename, source_filename)
|
ebu/PlugIt | tests/proxy_internal/test_proxyviews_generate.py | Python | bsd-3-clause | 3,938 | 0.001524 | from test_proxyviews import TestProxyViews
import uuid
class TestProxyViewsGenerate(TestProxyViews):
def test_generate_user_nobody(self):
assert(not self.views.generate_user(None, None))
def test_generate_user_ano(self):
user = self.views.generate_user('ano')
assert(user)
assert(not user.is_authenticated())
assert(not user.ebuio_member)
assert(not user.ebuio_admin)
assert(not user.ebuio_orga_member)
assert(not user.ebuio_orga_admin)
def test_generate_user_log(self):
user = self.views.generate_user('log')
assert(user)
assert(user.is_authenticated())
assert(not user.ebuio_member)
assert(not user.ebuio_admin)
assert(not user.ebuio_orga_member)
assert(not user.ebuio_orga_admin)
def test_generate_user_mem(self):
user = self.views.generate_user('mem')
assert(user)
assert(user.is_authenticated())
assert(user.ebuio_member)
assert(not user.ebuio_admin)
assert(user.ebuio_orga_member)
assert(not user.ebuio_orga_admin)
def test_generate_user_adm(self):
user = self.views.generate_user('adm')
assert(user)
assert(user.is_authenticated())
assert(user.ebuio_member)
assert(user.ebuio_admin)
assert(user.ebuio_orga_member)
assert(user.ebuio_orga_admin)
def test_generate_user_compat_mode(self):
user = self.views.generate_user(None, 3)
assert(user)
assert(user.is_authenticated())
assert(not user.ebuio_member)
assert(not user.ebuio_admin)
assert(not user.ebuio_orga_member)
assert(not user.ebuio_orga_admin)
def test_gen_404(self):
raison = str(uuid.uuid4())
url = self.random_base_url()
usermode = str(uuid.uuid4())
request = self.build_request('/')
request.session['plugit-standalone-usermode'] = usermode
response = self.views.gen404(request, | url, raison)
assert(response)
assert(response.status_code == 404)
assert(response.content.strip() == '404,{},{},{}'.format(raison, url, usermode) | )
self.restore_base_url()
def test_gen_500(self):
url = self.random_base_url()
usermode = str(uuid.uuid4())
request = self.build_request('/')
request.session['plugit-standalone-usermode'] = usermode
response = self.views.gen500(request, url)
assert(response)
assert(response.status_code == 500)
assert(response.content.strip() == '500,{},{}'.format(url, usermode))
self.restore_base_url()
def test_gen_403(self):
raison = str(uuid.uuid4())
project = str(uuid.uuid4())
url = self.random_base_url()
usermode = str(uuid.uuid4())
request = self.build_request('/')
request.session['plugit-standalone-usermode'] = usermode
response = self.views.gen403(request, url, raison, project)
assert(response)
assert(response.status_code == 403)
assert(response.content.strip() == '403,{},{},{},{}'.format(raison, url, usermode, project))
self.restore_base_url()
class O1:
pk = str(uuid.uuid4())
class O2:
pk = str(uuid.uuid4())
headers = {'cache_time': 1, 'template_tag': ''}
request = self.build_request('/')
assert(self.views.get_cache_key(request, headers, False, O1) == self.views.get_cache_key(request, headers, False, O1))
assert(self.views.get_cache_key(request, headers, False, O1) == self.views.get_cache_key(request, headers, False, O2))
assert(self.views.get_cache_key(request, headers, True, O1) == self.views.get_cache_key(request, headers, True, O1))
assert(self.views.get_cache_key(request, headers, True, O1) != self.views.get_cache_key(request, headers, True, O2))
|
anthrotype/nototools | nototools/noto_data.py | Python | apache-2.0 | 16,915 | 0.003074 | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed | under | the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Noto-specific data about division of ranges between fonts.
"""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
CJK_RANGES_TXT = """
# Core
3400..4DBF; CJK Unified Ideographs Extension A
4E00..9FFF; CJK Unified Ideographs
F900..FAFF; CJK Compatibility Ideographs
20000..2A6DF; CJK Unified Ideographs Extension B
2A700..2B73F; CJK Unified Ideographs Extension C
2B740..2B81F; CJK Unified Ideographs Extension D
2F800..2FA1F; CJK Compatibility Ideographs Supplement
AC00..D7AF; Hangul Syllables
1100..11FF; Hangul Jamo
A960..A97F; Hangul Jamo Extended-A
D7B0..D7FF; Hangul Jamo Extended-B
3130..318F; Hangul Compatibility Jamo
3040..309F; Hiragana
1B000..1B0FF; Kana Supplement
30A0..30FF; Katakana
31F0..31FF; Katakana Phonetic Extensions
3100..312F; Bopomofo
31A0..31BF; Bopomofo Extended
# Others
3000..303F; CJK Symbols and Punctuation
3190..319F; Kanbun
31C0..31EF; CJK Strokes
3200..32FF; Enclosed CJK Letters and Months
FE10..FE1F; Vertical Forms
FE30..FE4F; CJK Compatibility Forms
FE50..FE6F; Small Form Variants
FF00..FFEF; Halfwidth and Fullwidth Forms
3300..33FF; CJK Compatibility
2FF0..2FFF; Ideographic Description Characters
2E80..2EFF; CJK Radicals Supplement
2F00..2FDF; Kangxi Radicals
"""
SYMBOL_RANGES_TXT = """
20A0..20CF; Currency Symbols
20D0..20FF; Combining Diacritical Marks for Symbols
2100..214F; Letterlike Symbols
2190..21FF; Arrows
2200..22FF; Mathematical Operators
2300..23FF; Miscellaneous Technical
2400..243F; Control Pictures
2440..245F; Optical Character Recognition
2460..24FF; Enclosed Alphanumerics
2500..257F; Box Drawing
2580..259F; Block Elements
25A0..25FF; Geometric Shapes
2600..26FF; Miscellaneous Symbols
2700..27BF; Dingbats
27C0..27EF; Miscellaneous Mathematical Symbols-A
27F0..27FF; Supplemental Arrows-A
2800..28FF; Braille Patterns
2900..297F; Supplemental Arrows-B
2980..29FF; Miscellaneous Mathematical Symbols-B
2A00..2AFF; Supplemental Mathematical Operators
2B00..2BFF; Miscellaneous Symbols and Arrows
2E00..2E7F; Supplemental Punctuation
4DC0..4DFF; Yijing Hexagram Symbols
A700..A71F; Modifier Tone Letters
FFF0..FFFF; Specials
10100..1013F; Aegean Numbers
10140..1018F; Ancient Greek Numbers
10190..101CF; Ancient Symbols
101D0..101FF; Phaistos Disc
1D000..1D0FF; Byzantine Musical Symbols
1D100..1D1FF; Musical Symbols
1D200..1D24F; Ancient Greek Musical Notation
1D300..1D35F; Tai Xuan Jing Symbols
1D360..1D37F; Counting Rod Numerals
1D400..1D7FF; Mathematical Alphanumeric Symbols
1F000..1F02F; Mahjong Tiles
1F030..1F09F; Domino Tiles
1F0A0..1F0FF; Playing Cards
1F100..1F1FF; Enclosed Alphanumeric Supplement
1F200..1F2FF; Enclosed Ideographic Supplement
1F700..1F77F; Alchemical Symbols
"""
UNDER_DEVELOPMENT_RANGES_TXT = """
0F00..0FFF; Tibetan
"""
DEEMED_UI_SCRIPTS_SET = frozenset({
'Armn', # Armenian
'Cher', # Cherokee
'Ethi', # Ethiopic
'Geor', # Georgian
'Hebr', # Hebrew
'Sinh', # Sinhala
'Zsye', # Emoji
})
# Range spec matches "Noto Nastaliq requirements" doc, Tier 1.
URDU_RANGES = """
0600..0604,060b..0614,061b,061c,061e..061f,0620,0621..063a,
0640..0659,065e..066d,0670..0673,0679,067a..067b,067c,067d,
067e,067f..0680,0681,0683..0684,0685..0686,0687,0688..0689,
068a,068b,068c..068d,068e,068f,0691,0693,0696,0698,0699,
069a,069e,06a6,06a9,06ab,06af..06b0,06b1,06b3,06b7,06ba,
06bb,06bc,06be,06c0..06c4,06cc..06cd,06d0,06d2..06d5,
06dd..06de,06e9,06ee..06ef,06f0..06f9,06ff,0759,075c,0763,
0767..0769,076b..077d,08ff,fbb2..fbc1,fd3e..fd3f,fdf2,
fdfa..fdfd"""
# Only these two characters are required for Urdu from the Arabic
# extra characters needed.
URDU_EXTRA = "2010..2011"
def _char_set(compact_set_text):
result = set()
prev = -1
for part in compact_set_text.split(','):
sep_index = part.find('..')
if sep_index == -1:
cp = int(part, base=16)
assert cp > prev
# print '%04x' % cp
result.add(cp)
prev = cp
else:
start = int(part[:sep_index], base=16)
end = int(part[sep_index + 2:], base=16)
# print '%04x..%04x' % (start, end)
assert start > prev
assert end > start
for cp in range(start, end + 1):
result.add(cp)
prev = end
return result
def urdu_set():
return _char_set(URDU_RANGES) | _char_set(URDU_EXTRA)
def ascii_letters():
return _char_set('0041..005a,0061..007a')
def char_range(start, end):
return range(start, end+1)
COPTIC_EPACT = char_range(0x102E0, 0x102FB)
ARABIC_MATH = char_range(0x1EE00, 0x1EEF1)
P3_EXTRA_CHARACTERS_NEEDED = {
# nothing additional outside block
'Ahom': [ ],
# According to Roozbeh (and existing fonts) the following punctuation and
# digits are used with and interact with Arabic characters.
'Arab': char_range(0x0030, 0x0039) + [
# exclamation mark, comma, full stop, colon, NBS, guillimets
0x0021, 0x002c, 0x002e, 0x003a, 0x00a0, 0x00ab, 0x00bb,
0x06dd, # Arabic end of Ayah
0x2010, 0x2011, # Hyphen and non-breaking hyphen need different shapes
0x204F, 0x2E41, # For Sindhi
0xfd3e, 0xfd3f], # ornate left and right paren (in Noto Naskh)
# like Arabic, but Sindi is not written in Nastaliq so omitted.
'Aran': char_range(0x0030, 0x0039) + [
# exclamation mark, comma, full stop, colon, NBS, guillimets
0x0021, 0x002c, 0x002e, 0x003a, 0x00a0, 0x00ab, 0x00bb,
0x06dd, # Arabic end of Ayah
0x2010, 0x2011, # Hyphen and non-breaking hyphen need different shapes
0xfd3e, 0xfe3f], # ornate left and right paren (in Noto Naskh)
# Characters referenced in Armenian encoding cross ref page as shown in
# see http://www.unicode.org/L2/L2010/10354-n3924-armeternity.pdf
# also see http://man7.org/linux/man-pages/man7/armscii-8.7.html
# left and right paren, comma, hyphen-minus, period, section,
# no break space, left and right guillimet, hyphen, em dash, ellipsis
# presentation forms FB13-FB17
'Armn': [0x0028, 0x0029, 0x002C, 0x002D, 0x002E, 0x00A0,
0x00A7, 0x00AB, 0x00BB, 0x2010, 0x2014, 0x2026],
'Avst': [0x2E30, 0x2E31, # From Core Specification and NamesList.txt
0x200C], # www.unicode.org/L2/L2007/07006r-n3197r-avestan.pdf
# From http://www.unicode.org/L2/L2014/14064r-n4537r-cherokee.pdf section 8
'Cher': [
0x0300, 0x0301, 0x0302, 0x0304, 0x030B,
0x030C, 0x0323, 0x0324, 0x0330, 0x0331],
# From Core Specification:
# period, colon, semicolon, middle dot
# combining: grave, macron, overline, dot above, double overline
# greek numeral sign, greek lower numeral sign, comb macrons (lh, rh, cj)
# from http://std.dkuug.dk/JTC1/SC2/WG2/docs/n2636.pdf
# oblique double hyphen, diaeresis, apostrophe, comb. circumflex, acute,
# hyphen-minus, hyphen
'Copt': [
0x002E, 0x003A, 0x003B, 0x00B7,
0x0300, 0x0304, 0x0305, 0x0307, 0x033F,
0x0374, 0x0375, 0xFE24, 0xFE25, 0xFE26,
0x2E17, 0x0308, 0x2019, 0x0302, 0x0301,
0x002D, 0x2010,
],
# Elbasan
# see http://www.unicode.org/L2/L2011/11050-n3985-elbasan.pdf
# adds combining overbar and greek numerals for ones and tens, and
# both stigma/digamma for 6.
# greek capital alpha beta gamma delta epsilon stigma/digamma zeta eta theta
# iota kappa lambda mu nu xi omicron pi koppa
'Elba': [0x00B7, 0x0305,
0x0391, 0x0392, 0x0393, 0x0394, 0x0395,
0x03DA, |
SteffenBauer/mia_elixir | python/mia_client1.py | Python | mit | 2,536 | 0.024054 | #!/usr/bin/env python
import socket
import random
SERVERHOST = 'localhost'
SERVERPORT = 4080
LOCALIP = '127.0.0.2'
LOCALPORT = 4082
LOCALNAME = "30_PERCENT_SEE"
def higher(dice_a, dice_b):
ad1, ad2 = dice_a[0], dice_a[1]
bd1, bd2 = dice_b[0], dice_b[1]
if ad1 == bd1 and ad2 == bd2: return False
if ad1 == "2" and ad2 == "1": return True
if bd1 == "2" and bd2 == "1": return False
if ad1 == ad2 and bd1 == bd2: return int(ad1) > int(bd1)
if ad1 == ad2: return True
if bd1 == bd2: return False
if ad1 == bd1: return int(ad2) > int(bd2)
return int(ad1) > int(bd1)
def one_higher(dice):
d1, d2 = dice[0],dice[1]
if d1 == "6" and d2 == "6":
return "2,1"
if d1 == d2:
return str(int(d1)+1)+","+str(int(d1)+1)
if d1 == "6" and d2 == "5":
return "1,1"
if int(d1) == int(d2)+1:
return str(int(d1)+1)+",1"
return d1+","+str(int(d2)+1)
def connect_to_miaserver(sock):
sock.settimeout(2)
while True:
sock.sendto("REGISTER;" + LOCALNAME, (SERVERHOST, SERVERPORT))
try:
data = sock.recv(1024)
if "REGISTERED" in data:
break
else:
print "Received '" + data + "'"
except socket.timeout:
print "MIA Server does not respond, retrying"
print "Registered at MIA Server"
sock.setblocking(1)
def play_mia(sock):
announced = None
while True:
data = sock.recv(1024)
if data.startswith("ROUND STARTING;"):
_, _, token = data.strip().partition(";")
sock.sendto("JOIN;" + token, (SERVERHOST, SERVERPORT))
announce | d = None
elif data.startswith("AN | NOUNCED;"):
d1, _, d2 = data.strip().split(";")[2].partition(",")
announced = (d1, d2)
elif data.startswith("YOUR TURN;"):
_, _, token = data.strip().partition(";")
if announced == None or random.uniform(0,100) > 30.0:
sock.sendto("ROLL;" + token, (SERVERHOST, SERVERPORT))
else:
sock.sendto("SEE;" + token, (SERVERHOST, SERVERPORT))
elif data.startswith("ROLLED;"):
token = data.split(";")[2]
d1, _, d2 = data.strip().split(";")[1].partition(",")
if announced == None or higher((d1,d2), announced):
sock.sendto("ANNOUNCE;"+d1+","+d2+";"+token, (SERVERHOST, SERVERPORT))
else:
sock.sendto("ANNOUNCE;"+one_higher(announced)+";"+token, (SERVERHOST, SERVERPORT))
def mia_client_start():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((LOCALIP, LOCALPORT))
connect_to_miaserver(sock)
play_mia(sock)
if __name__ == "__main__":
mia_client_start()
|
ellipsis14/dolfin-adjoint | tests_firedrake/projection/projection.py | Python | lgpl-3.0 | 886 | 0.01693 | import random
from firedrake import *
from firedrake_adjoint import *
import sys
mesh = UnitSquareMesh(4, 4)
V3 = FunctionSpace(mesh, "CG", 3)
V2 = FunctionSpace(mesh, "CG", 2)
firedrake.parameters["adjoint"]["record_all"] = True
def main(ic, annotate=False):
soln = project(ic, V2, annotate=annotate)
return soln
if __name__ == "__main_ | _":
ic = project(Expression("x[0]*(x[0]-1)*x[1]*(x[1]-1)"), V3)
soln = main(ic, annotate=True)
adj_html("projection_forward.html", "forward")
assert replay_dolfin(tol=1e-12, stop=True)
J = Functional(soln*soln*dx*dt[FINISH_TIME])
Jic = assemble(soln*soln*dx)
dJdic = compute_gradient(J, InitialConditionParameter(ic), forget=False)
def J(ic):
soln = main(ic, annotate=False)
return assemble(soln*soln*dx)
minconv = taylor_test(J, InitialConditionParameter(ic), | Jic, dJdic)
if minconv < 1.9:
sys.exit(1)
|
makism/dyfunconn | examples/fc_mi.py | Python | bsd-3-clause | 390 | 0.002564 | # -*- coding: utf-8 -*-
import numpy as np
np.set_printoptions(precision=3, l | inewidth=256)
from dyconnmap.fc import mi
if __name__ == "__main__":
data = np.load(
"/home/makism/Github/dyconnmap/examples/data/eeg_32chans_10secs.npy")
data | = data[0:5, :]
fs = 128
fb_lo = [1.0, 4.0]
fb_hi = [8.0, 13.0]
ts, avg = mi(data, fb_lo, fb_hi, fs)
print(avg)
|
cydenix/OpenGLCffi | OpenGLCffi/GL/EXT/ARB/ES3_2_compatibility.py | Python | mit | 209 | 0.019139 | from OpenGLCffi.GL import params
@params(api='gl', prms=['minX', 'minY', 'minZ', 'minW', 'maxX', 'maxY', 'ma | xZ', 'maxW'])
def g | lPrimitiveBoundingBoxARB(minX, minY, minZ, minW, maxX, maxY, maxZ, maxW):
pass
|
kcl-ddh/chopin-online | ocve/scripts/ocvejp2converter.py | Python | gpl-3.0 | 10,997 | 0.010003 | #!/usr/bin/python
"""Module for batch converting images to JPEG 2000 format.
Takes an input directory and recreates the directory structure, with
converted images, in the specified output directory.
WARNING: If the same directory contains two or more images with the
same name (excepting the extension), only one of these images will
end up converted.
WARNING: kdu_compress does not handle filenames containing a comma,
even if the entire string is quoted or the comma quoted with a \.
Refactored by EH for use as a specialsed upload converter/archiver only for tiffs in OCVE upload dir
"""
import copy_reg
import logging
import multiprocessing
import os
import shlex
import subprocess
import types
import re
from shutil import move
import progressbar as pb
import fcntl
import sys
logging.basicConfig(format='%(asctime)-15s %(message)s')
logger = logging.getLogger('Main logger')
logger.setLevel(logging.INFO)
# Guard against multiple instances with a fcntl lock
def lockFile(lockfile):
fd = os.open(lockfile, os | .O_CREAT | os.O_TRUNC | os.O_WRONLY)
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
return False
return True
if not lockFile("/vol/ocve3/webroot/stg/logs/.imageconvert.lock"):
logger.error('Another instance detected as lock exists, exiting.')
sys.exit(0)
# End lock guard
class ArchiveJP2Converter (object):
def __init__ (self, in_dir, out_dir, archive_dir,fail_dir, force=False, show_ | progress=False):
"""Initialise this object.
:param in_dir: path to directory containing images to be converted
:type in_dir: `str`
:param out_dir: path to directory to output converted images
:type out_dir: `str`
:param out_dir: archive directory for successfully converted images
:type out_dir: `str`
:param force: whether to overwrite existing converted images
:type force: `bool`
:param show_progress: whether to show a progress bar during conversion
:type show_progress: `bool`
"""
self._in_dir = os.path.abspath(in_dir)
self._out_dir = os.path.abspath(out_dir)
if len(archive_dir) > 0:
self._archive_dir = os.path.abspath(archive_dir)
else:
self._archive_dir = None
if len(fail_dir) > 0:
self._failed_dir = os.path.abspath(fail_dir)
else:
self._fail_dir = None
self._force = force
self._progress = show_progress
self._bar = None
self._has_errors = False
def convert (self, lossless=False):
"""Converts images.
Returns True if there were no errors.
:param compress: whether to use a lossless conversion
:type compress: `bool`
:rtype: `bool`
"""
self._has_errors = False
if self._progress:
max_val = 0
for root, dirs, files in os.walk(self._in_dir):
max_val += len(files)
self._bar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar()],
maxval=max_val).start()
pool = multiprocessing.Pool()
for root, dirs, files in os.walk(self._in_dir):
out_rel_path = os.path.relpath(root, self._in_dir)
out_full_path = os.path.abspath( os.path.join(self._out_dir, out_rel_path) )
if not os.path.isdir(out_full_path):
logger.info('Creating directory [%s]' % (out_full_path) )
try:
os.makedirs(out_full_path)
except OSError as e:
logger.fatal('Unable to create directory [%s], Reason: %s' % (out_full_path,e.strerror) )
sys.exit(1)
for name in files:
# Changed to only look at Tifs, the OCVE image format
if re.match('.*\.tif',name,re.IGNORECASE):
basename = os.path.splitext(name)[0]
in_file = os.path.join(root, name)
base_out_file = os.path.join(out_full_path, basename)
tiff_file = '%s.tif' % base_out_file
jp2_file = '%s.jp2' % base_out_file
if self._force or not(os.path.isfile(jp2_file)):
params = (in_file, tiff_file, jp2_file, lossless)
pool.apply_async(self._convert, params,
callback=self._result_callback)
elif self._progress:
self._bar.update(self._bar.currval + 1)
pool.close()
pool.join()
if self._progress:
self._bar.finish()
#Added by EH for archiving originals outside input dir (for OCVE)
if self._archive_dir is not None:
for root, dirs, files in os.walk(self._in_dir):
out_rel_path = os.path.relpath(root, self._in_dir)
out_full_path = os.path.abspath(
os.path.join(self._out_dir, out_rel_path))
for name in files:
if re.match('.*\.[tif|TIF]',name,re.IGNORECASE):
basename = os.path.splitext(name)[0]
in_file = os.path.join(root, name)
jp2_file=basename+'.jp2'
jp2= os.path.join(out_full_path, jp2_file)
archive_full_path = ''
if os.path.isfile(jp2) and os.path.getsize(jp2) > 0:
#JP2 is successfully converted, archive original
archive_full_path = os.path.abspath(os.path.join(self._archive_dir, out_rel_path))
else:
#Move to failed dir
archive_full_path = os.path.abspath(os.path.join(self._failed_dir, out_rel_path))
if len(archive_full_path) > 0:
if not os.path.isdir(archive_full_path):
logger.info('Creating directory [%s]' % archive_full_path)
try:
os.makedirs(archive_full_path)
except OSError as e:
logger.fatal('Unable to create directory [%s], Reason: %s' % (archive_full_path, e.strerror))
sys.exit(1)
src = in_file
dst=os.path.join(archive_full_path,name)
move(src,dst)
logger.info('Image archived at [%s]' % dst)
#If folder is now empty, remove it.
if len(os.listdir(root)) == 0:
if os.path.isdir(root):
os.rmdir(root)
else:
logger.info('Archive folder not found')
return not(self._has_errors)
def _convert (self, in_file, tiff_file, jp2_file, lossless):
logger.info('convert [%s]->[%s]' % (in_file, tiff_file) )
cmd = [ 'convert',
'-compress',
'None',
'-alpha',
'off',
in_file,
tiff_file
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = proc.communicate()[1]
if err:
logger.error('Error converting to TIFF [%s]->[%s], Reason: %s' % (in_file,tiff_file,err))
# In some cases, at least, an error may be reported
# (typically "unknown field") but the TIFF is in fact
# converted in a visually satisfactory way. In such cases,
# where the converted TIFF file is created, continue on
# with the JPEG 2000 conversion.
if not os.path.exists(tiff_file):
return False
logger.info('convert [%s]->[%s] completed' % (in_file,tiff_file))
if lossless:
logger.info('(lossless) kdu_compress [%s]->[%s]' % ( tiff_file, jp2_file ) )
cmd = [
'kdu_compress',
'-i',
|
michael-ball/sublime-text | sublime-text-3/Packages/User/linters/eclim.py | Python | unlicense | 2,139 | 0.000468 | import json
import os
import tempfile
from lint import Linter
from lint.util import find
class Eclim(Linter):
language = 'java'
cmd = ('eclim', '-command', 'java_src_update')
regex = r'.'
defaults = {
'disable': True,
}
def run(self, cmd, code):
project = find(os.path.dirname(self.filename), '.project', True)
if not project:
return
filename = self.filename.replace(project, '', 1).lstrip(os.sep)
project = os.path.basename(project)
# can't stdin or temp use file - hack time?
# this *could* become a tmp directory
# but I'd need to know all files to copy
# from the source project
tmp = tempfile.mktemp()
os.rename(self.filename, tmp)
# at least we get some inode protection on posix
inode = None
|
with open(self.filename, 'wb') as f:
f.write(code)
if os.name == 'posix':
inode = os.stat(self.filename).st_ino
try:
cmd = cmd + ('-p', project, '-f', filename, '-v')
output = self.communicate(cmd, '')
finally:
if inode is not None:
new_inode = os.stat(self.filename).st_ino
| if new_inode != inode:
# they saved over our tmp file, bail
return output
os.unlink(self.filename)
os.rename(tmp, self.filename)
return output
def find_errors(self, output):
try:
obj = json.loads(output)
for item in obj:
# TODO: highlight warnings in a different color?
# warning = item['warning']
line, col = item['line']-1, item['column']-1
message = item['message']
yield True, line, col, message, None
except Exception:
error = 'eclim error'
if 'Connection refused' in output:
error += ' Connection Refused'
yield True, 0, None, error, None
# maybe do this on line one?
# yield {"eclim_exception": str(e)}
|
matiskay/html-cluster | html_cluster/commands/make_graph.py | Python | bsd-3-clause | 1,507 | 0.001991 | import os
im | por | t json
import click
def generate_graph(similarity_file, threshold=85):
edges = dict()
hosts_used = set()
with open(similarity_file, 'r') as json_file:
scores = json.load(json_file)
score_dict = {s['similarity']: (s['path1'], s['path2']) for s in scores}
for score, paths in score_dict.items():
if score < threshold:
continue
host1 = paths[0]
host2 = paths[1]
hosts_used.add(host1)
hosts_used.add(host2)
edges[host1, host2] = (score - threshold) / 2
print('graph {')
print(' graph [overlap=scale, splines=true];')
print(' node [shape=box, fixedsize=false, fontsize=8, margin="0.05", width="0", height="0"];')
print()
for k, v in edges.items():
u1, u2 = k
weight = v
print(' "%s" -- "%s" [weight=%0.1f, penwidth=%0.1f]' % (u1, u2, weight, weight))
print()
for host in hosts_used:
image_path = '{}.png'.format(host.replace('.html', ''))
if os.path.exists(image_path):
print(' "{host}" [label="{host}", image="{image_path}"]'.format(host=host, image_path=image_path)) # NOQA
else:
print(' "{host}" [label="{host}"]'.format(host=host))
print('}')
# Validate Threshold
@click.command(short_help='Generate a Graphviz Dot file.')
@click.argument('similarity_file')
@click.option('--threshold', default=55)
def cli(similarity_file, threshold):
generate_graph(similarity_file, threshold)
|
philgyford/django-spectator | devproject/manage.py | Python | mit | 822 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devpro | ject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is miss | ing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa: F401
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
peterjliu/rate_limit | test_lib.py | Python | mit | 2,134 | 0.005623 | #!/usr/bin/env python
"""
Test rate-limiter lib
"""
import logging
import sys
import time
import unittest
from google.appengine.api import memcache
from google.appengine.ext import testbed
from lib import Limiter, QuotaKey
class Events:
USER_READ = 1
USER_WRITE = 2
RATE_LIMIT_SPEC = {
Events.USER_READ: (2, 1),
Events.USER_WRITE: (5, 1)
}
class RateLimitTestCase(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_memcache_stub()
self.limiter = Limiter(RATE_LIMIT_SPEC)
def tearDown(self):
self.testbed.deactivate()
def testRateLimiter(self):
q = QuotaKey("user1", Events.USER_READ)
# Unfortunately there's no obvious way to inject a clock into the
# memcache stub, so we assume the following runs in less than 1 sec.
for _ in range(0, 2):
self.assertTrue(self.limiter.CanSpend(q))
# We used up our budget of 2 in less than 1 second
self.assertFalse(self.limiter.CanSpend(q))
q = QuotaKey("user2", Events.USER_WRITE)
for _ in range(0, 5):
self.assertTrue(self.limiter.CanSpend(q))
self.assertFalse(self.limiter.CanSpend(q))
def testRateLimiterWithExpiration(self):
l = Limiter(RATE_LIMIT_SPEC)
q = QuotaKey("user1", Events.USER_READ)
log = logging.getLogger("rate_limit.lib.test")
for _ in range(0, 2):
self.assertTrue(self.limiter.CanSpend(q))
# Expire cache by waiting. Too bad we can't inject the time, eh?
log.i | nfo("wait 1 second for cache to expire")
time.sleep(1)
for _ in range(0, 2):
self.assertTrue(self.limiter.CanSpend(q))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr)
logging.getLogger("rate_limit.lib.test" ).setLevel( logging.DEBUG )
unittest.main() | |
kevenli/scrapydd | scrapydd/process.py | Python | apache-2.0 | 3,812 | 0.001836 | from tornado.process import cpu_count, _reseed_random
from tornado import ioloop
import logging
import os
import signal
import sys
from tornado.util import errno_from_exception
import errno
logger = logging.getLogger(__name__)
_task_id = None
exiting = False
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the `` | autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the | parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
logger.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
global exiting
exiting = False
def receive_signal(sig, frame):
logger.debug('Received signal')
global exiting
exiting = True
for pid, taskid in children.items():
os.kill(pid, signal.SIGTERM)
signal.signal(signal.SIGTERM, receive_signal)
signal.signal(signal.SIGINT, receive_signal)
num_restarts = 0
while children and not exiting:
logger.debug('Exiting : %s' % exiting)
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
logger.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
logger.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
logger.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0) |
baike21/blog | blogadmin/migrations/0002_auto_20170826_1818.py | Python | gpl-3.0 | 822 | 0.002433 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 10:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogadmin', '0001_initial'),
]
op | erations = [
migrations.AlterField(
model_name='article',
name='category',
field=models.CharField(choices=[('Tech', '\u6280\u672f'), ('Chat', '\u6742\u6587'), ('Movie', '\u5f71\u8bc4'), ('Book', '\u8bfb\u4e66')], default='Tech', max_length=64, verbose_name='\u7c7b\u522b\u6807\u7b7e'),
),
migrations.AlterField(
model_name='article',
| name='pub_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4'),
),
]
|
partofthething/home-assistant | homeassistant/components/owntracks/config_flow.py | Python | apache-2.0 | 2,860 | 0.002448 | """Config flow for OwnTracks."""
import secrets
from homeassistant import config_entries
from homeassistant.const import CONF_WEBHOOK_ID
from .const import DOMAIN # pylint: disable=unused-import
from .helper import supports_encryption
CONF_SECRET = "secret"
CONF_CLOUDHOOK = "cloudhook"
class OwnTracksFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Set up OwnTracks."""
VERSION = 1
async def async_step_user(self, user_input=None):
"""Handle a user initiated set up flow to create OwnTracks webhook."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(step_id="user")
webhook_id, webhook_url, cloudhook = await self._get_webhook_id()
secret = secrets.token_hex(16)
if supports_encryption():
secret_desc = f"The encryption key is {secret} (on Android under preferences -> advanced)"
else:
secret_desc = "Encryption is not supported because nacl is not installed."
return self.async_create_entry(
title="OwnTracks",
data={
CONF_WEBHOOK_ID: webhook_id,
CONF_SECRET: secret,
CONF_CLOUDHOOK: cloudhook,
},
description_placeholders={
"secret": secret_desc,
"webhook_url": webhook_url,
"android_url": "https://play.google.com/store/apps/details?id=org.owntracks.android",
"ios_url": "https://itunes.apple.com/us/app/owntracks/id692424691?mt=8",
"docs_url": "https://www.home-assistant.io/integrations/owntracks/",
},
)
async def async_step_import(self, user_input):
"""Import a config flow from configuration."""
if self._async_current_entries():
| return self.async_abort(reason="single_instance_allowed")
webhook_id, _webhook_url, cloudhook = await self._get_webhook_id()
secret = secrets.token_hex(16)
return self.async_create_entry(
title="OwnTracks",
data={
CON | F_WEBHOOK_ID: webhook_id,
CONF_SECRET: secret,
CONF_CLOUDHOOK: cloudhook,
},
)
async def _get_webhook_id(self):
"""Generate webhook ID."""
webhook_id = self.hass.components.webhook.async_generate_id()
if self.hass.components.cloud.async_active_subscription():
webhook_url = await self.hass.components.cloud.async_create_cloudhook(
webhook_id
)
cloudhook = True
else:
webhook_url = self.hass.components.webhook.async_generate_url(webhook_id)
cloudhook = False
return webhook_id, webhook_url, cloudhook
|
junaart/Structure_data | Code/tree.py | Python | gpl-3.0 | 9,097 | 0.018138 | ###################### TREE######################
class node():
__parent = None
__left = None
__right = None
__data = None
def __init__(self, value):
self.__data = value
self.__parent = None
self.__left = None
self.__right = None
def get_parent(self):
return self.__parent
def get_left(self):
return self.__left
def get_right(self):
return self.__right
def get_data(self):
return self.__data
def set_parent(self, x):
if x==None:
self.__parent=None
elif isinstance(x, node):
self.__parent= x
def set_left(self, x):
if x==None:
self.__left=None
elif isinstance(x, node):
self.__left= x
def set_right(self, x):
if x==None:
self.__right=None
elif isinstance(x, node):
self.__right= x
def set_data(self, x):
self.__data = x
#####################################################
class tree():
__top = None
def __init__(self, value):
self.__top = node(value)
def get_top(self):
return self.__top
def add_node(self, value, current_node):
if (value >= current_node.get_data() | ):
if (current_node.get_right() != None):
current_node = current_node.get_right()
self.add_node(value, current_node)
else:
a = node(value)
current_node.set_right(a)
a.set_parent(current_node)
else:
if (current_node.get_le | ft() != None):
current_node = current_node.get_left()
self.add_node(value, current_node)
else:
a = node(value)
current_node.set_left(a)
a.set_parent(current_node)
def add_list(self, lst):
for i in lst:
self.add_node(i, self.get_top())
def show_tree(self, current_node):
if(current_node.get_left() == None) & (current_node.get_right()==None):
return [current_node.get_data(),[],[]]
elif(current_node.get_left() != None) & (current_node.get_right() == None):
return [current_node.get_data(),self.show_tree(current_node.get_left()),[]]
elif(current_node.get_left() == None) & (current_node.get_right() != None):
return [current_node.get_data(),[],self.show_tree(current_node.get_right())]
else:
return [current_node.get_data(),self.show_tree(current_node.get_left()),self.show_tree(current_node.get_right())]
def find(self, value, current_node):
if current_node==None:
return False
elif current_node.get_data()==value:
return True
elif current_node.get_data()>value:
return self.find(value, current_node.get_left())
else:
return self.find(value, current_node.get_right())
def count_node(self, current_node):
if((current_node.get_left()==None) & (current_node.get_right()==None)):
return 1
elif((current_node.get_left()!=None) & (current_node.get_right()==None)):
return 1+self.count_node(current_node.get_left())
elif((current_node.get_left()==None) & (current_node.get_right()!=None)):
return 1+self.count_node(current_node.get_right())
else:
return 1+self.count_node(current_node.get_left())+self.count_node(current_node.get_right())
def count_leaves(self, current_node):
if((current_node.get_left()==None) & (current_node.get_right()==None)):
return 1
elif((current_node.get_left()!=None) & (current_node.get_right()==None)):
return self.count_leaves(current_node.get_left())
elif((current_node.get_left()==None) & (current_node.get_right()!=None)):
return self.count_leaves(current_node.get_right())
else:
return self.count_leaves(current_node.get_left())+self.count_leaves(current_node.get_right())
def deep_tree(self, current_node):
if current_node==None:
return 0
elif((current_node.get_left()==None) & (current_node.get_right()==None)):
return 0
elif((current_node.get_left()!=None) & (current_node.get_right()==None)):
return 1+self.deep_tree(current_node.get_left())
elif((current_node.get_left()==None) & (current_node.get_right()!=None)):
return 1+self.deep_tree(current_node.get_right())
else:
if self.deep_tree(current_node.get_left()) > self.deep_tree(current_node.get_right()):
return 1+self.deep_tree(current_node.get_left())
else:
return 1+self.deep_tree(current_node.get_right())
def turn_left(self, current_node):
a=current_node
b=a.get_right()
C=b.get_left()
p=a.get_parent()
b.set_parent(p)
a.set_parent(b)
a.set_right(C)
b.set_left(a)
if p!=None:
if p.get_right()==a:
p.set_right(b)
else:
p.set_left(b)
else:
self.__top=b
def turn_right(self, current_node):
b=current_node
a=b.get_left()
C=a.get_right()
p=b.get_parent()
a.set_parent(p)
b.set_parent(a)
b.set_left(C)
a.set_right(b)
if p!=None:
if p.get_left()==b:
p.set_left(a)
else:
p.set_right(a)
else:
self.__top=a
def get_min(self, current_node):
if (current_node.get_left()==None):
return current_node.get_data()
else:
return self.get_min(current_node.get_left())
def delete_node(self,current_node):
if (current_node.get_left()==None) & (current_node.get_right() == None):
if current_node.get_parent()!=None:
if current_node.get_parent().get_left()==current_node:
current_node.get_parent().set_left(None)
current_node.set_parent(None)
else:
current_node.get_parent().set_right(None)
current_node.set_parent(None)
else:
self.__top = None
elif (current_node.get_left()==None) & (current_node.get_right() != None):
if (current_node.get_parent()!=None):
if (current_node.get_parent().get_right() == current_node):
current_node.get_parent().set_right(current_node.get_right())
else:
current_node.get_parent().set_left(current_node.get_right())
current_node.get_right().set_parent(current_node.get_parent())
current_node.set_right(None)
current_node.set_parent(None)
else:
self.__top=current_node.get_right()
self.__top.set_parent(None)
current_node.set_right(None)
elif (current_node.get_left()!=None) & (current_node.get_right() == None):
if (current_node.get_parent()!=None):
if (current_node.get_parent().get_right() == current_node):
current_node.get_parent().set_right(current_node.get_left())
else:
current_node.get_parent().set_left(current_node.get_left())
current_node.get_left().set_parent(current_node.get_parent())
current_node.set_left(None)
current_node.set_parent(None)
else:
self.__top=current_node.get_left()
self.__top.set_parent(None)
current_node.set_left(None)
else:
if self.deep_tree(current_node.get_right() |
Duoxilian/home-assistant | homeassistant/components/history.py | Python | mit | 11,138 | 0 | """
Provide pre-made queries on top of the recorder component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/history/
"""
import asyncio
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import logging
import time
import voluptuous as vol
from homeassistant.const import (
HTTP_BAD_REQUEST, CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_INCLUDE)
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, script
from homeassistant.components.frontend import register_built_in_panel
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_HIDDEN
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'history'
DEPENDENCIES = ['recorder', 'http']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: recorder.FILTER_SCHEMA,
}, extra=vol.ALLOW_EXTRA)
SIGNIFICANT_DOMAINS = ('thermostat', 'climate')
IGNORE_DOMAINS = ('zone', 'scene',)
def last_recorder_run():
"""Retireve the last closed recorder run from the DB."""
recorder.get_instance()
rec_runs = recorder.get_model('RecorderRuns')
with recorder.session_scope() as session:
res = recorder.query(rec_runs).order_by(rec_runs.end.desc()).first()
if res is None:
return None
session.expunge(res)
return res
def get_significant_states(start_time, end_time=None, entity_id=None,
filters=None):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
entity_ids = (entity_id.lower(), ) if entity_id is not None else None
states = recorder.get_model('States')
query = recorder.query(states).filter(
(states.domain.in_(SIGNIFICANT_DOMAINS) |
(states.last_changed == states.last_updated)) &
(states.last_updated > start_time))
if filters:
query = filters.apply(query, entity_ids)
if end_time is not None:
query = query.filter(states.last_updated < end_time)
states = (
state for state in recorder.execute(
query.order_by(states.entity_id, states.last_updated))
if (_is_significant(state) and
not state.attributes.get(ATTR_HIDDEN, False)))
return states_to_json(states, start_time, entity_id, filters)
def state_changes_during_period(start_time, end_time=None, entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
states = recorder.get_model('States')
query = recorder.query(states).filter(
(states.last_changed == states.last_updated) &
(states.last_changed > start_time))
if end_time is not None:
query = query.filter(states.last_updated < end_time)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
states = recorder.execute(
query.order_by(states.entity_id, states.last_updated))
return states_to_json(states, start_time, entity_id)
def get_states(utc_point_in_time, entity_ids=None, run=None, filters=None):
"""Return the states at a specific point in time."""
if run is None:
run = recorder.run_information(utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
from sqlalchemy import and_, func
states = recorder.get_model('States')
most_recent_state_ids = recorder.query(
func.max(states.state_id).label('max_state_id')
).filter(
(states.created >= run.start) &
(states.created < utc_point_in_time) &
(~states.domain.in_(IGNORE_DOMAINS)))
if filters:
most_recent_state_ids = filters.apply(most_recent_state_ids,
entity_ids)
most_recent_state_ids = most_recent_state_ids.group_by(
states.entity_id).subquery()
query = recorder.query(states).join(most_recent_state_ids, and_(
states.state_id == most_recent_state_ids.c.max_state_id))
for state in recorder.execute(query):
if not state.attributes.get(ATTR_HIDDEN, False):
yield state
def states_to_json(states, start_time, entity_id, filters=None):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
entity_ids = [entity_id] if entity_id is not None else None
# Get the states at the start time
for state in get_states(start_time, entity_ids, filters=filters):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
# Append all changes to it
for entity_id, group in groupby(states, lambda state: state.entity_id):
result[entity_id].extend(group)
return result
def get_state(utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = list(get_states(utc_point_in_time, (entity_id,), run))
return states[0] if states else None
# pylint: disable=unused-argument
def setup(hass, config):
"""Setup the history hooks."""
filters = Filters()
exclude = config[DOMAIN].get(CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude[CONF_ENTITIES]
filters.excluded_domains = exclude[CONF_DOMAINS]
include = config[DOMAIN].get(CONF_INCLUDE)
if include:
filters.included_entities = include[CONF_ENTITIES]
filters.included_domains = include[CONF_DOMAINS]
recorder.get_instance()
hass.http.register_view(HistoryPeriodView(filters))
register_built_in_panel(hass, 'history', 'History', 'mdi:poll-box')
return True
class HistoryPeriodView(HomeAssistantView):
"""Handle history period requests."""
url = '/api/history/period'
name = 'api:history:view-period'
extra_urls = ['/api/history/period/{datetime}']
def __init__(self, filters):
"""Initilalize the history period view."""
self.filters = filters
@asyncio.coroutine
def get(self, request, datetime=None):
"""Return history over a period of time."""
timer_start = time.perf_counter()
if datetime:
datetime = dt_ | util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
now = dt_util.utcnow()
one_day = timedelta(days=1)
if datetime:
start_time = dt_util.as_utc(datetime)
else:
start_time = now - one_day
if start_time > now:
| return self.json([])
end_time = request.GET.get('end_time')
if end_time:
end_time = dt_util.as_utc(
dt_util.parse_datetime(end_time))
if end_time is None:
return self.json_message('Invalid end_time', HTTP_BAD_REQUEST)
else:
end_time = start_time + one_day
entity_id = request.GET.get('filter_entity_id')
result = yield from request.app['hass'].loop.run_in_executor(
None, get_significant_states, start_time, end_time, entity_id,
self.filters)
result = result.values()
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
'Extracted %d states in %fs', sum(map(len, result)), elapsed)
return self.json(result)
class Filters(object):
"""Container for the configured include and exclude filters."""
def __init__(self):
"""Initialise the include and exclude filters."""
self.excluded_entities = []
self.excluded_domai |
sgiavasis/C-PAC | CPAC/vmhc/vmhc.py | Python | bsd-3-clause | 22,733 | 0.007918 | import sys
import os
import commands
import nipype.pipeline.engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from utils import *
from CPAC.vmhc import *
from nipype.interfaces.afni import preprocess
from CPAC.registration import create_wf_calculate_ants_warp, \
create_wf_c3d_fsl_to_itk, \
create_wf_collect_transforms, \
create_wf_apply_ants_warp
def create_vmhc(use_ants):
"""
Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions.
Parameters
----------
None
Returns
-------
vmhc_workflow : workflow
Voxel Mirrored Homotopic Connectivity Analysis Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/vmhc/vmhc.py>`_
Workflow Inputs::
inputspec.brain : string (existing nifti file)
Anatomical image(without skull)
inputspec.brain_symmetric : string (existing nifti file)
MNI152_T1_2mm_brain_symmetric.nii.gz
inputspec.rest_res_filt : string (existing nifti file)
Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) )
inputspec.reorient : string (existing nifti file)
RPI oriented anatomical data
inputspec.example_func2highres_mat : string (existing affine transformation .mat file)
Specifies an affine transform that should be applied to the example_func before non linear warping
inputspec.standard : string (existing nifti file)
MNI152_T1_standard_resolution_brain.nii.gz
inputspec.symm_standard : string (existing nifti file)
MNI152_T1_2mm_symmetric.nii.gz
inputspec.twomm_brain_mask_dil : string (existing nifti file | )
MNI152_T1_2mm_brain_mask_symmetric_di | l.nii.gz
inputspec.config_file_twomm_symmetric : string (existing .cnf file)
T1_2_MNI152_2mm_symmetric.cnf
inputspec.rest_mask : string (existing nifti file)
A mask functional volume(derived by dilation from motion corrected functional volume)
fwhm_input.fwhm : list (float)
For spatial smoothing the Z-transformed correlations in MNI space.
Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.
inputspec.mean_functional : string (existing nifti file)
The mean functional image for use in the func-to-anat registration matrix conversion
to ITK (ANTS) format, if the user selects to use ANTS.
Workflow Outputs::
outputspec.highres2symmstandard : string (nifti file)
Linear registration of T1 image to symmetric standard image
outputspec.highres2symmstandard_mat : string (affine transformation .mat file)
An affine transformation .mat file from linear registration and used in non linear registration
outputspec.highres2symmstandard_warp : string (nifti file)
warp file from Non Linear registration of T1 to symmetrical standard brain
outputspec.fnirt_highres2symmstandard : string (nifti file)
Non Linear registration of T1 to symmetrical standard brain
outputspec.highres2symmstandard_jac : string (nifti file)
jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain
outputspec.rest_res_2symmstandard : string (nifti file)
nonlinear registration (func to standard) image
outputspec.VMHC_FWHM_img : string (nifti file)
pearson correlation between res2standard and flipped res2standard
outputspec.VMHC_Z_FWHM_img : string (nifti file)
Fisher Z transform map
outputspec.VMHC_Z_stat_FWHM_img : string (nifti file)
Z statistic map
Order of commands:
- Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_::
flirt
-ref MNI152_T1_2mm_brain_symmetric.nii.gz
-in mprage_brain.nii.gz
-out highres2symmstandard.nii.gz
-omat highres2symmstandard.mat
-cost corratio
-searchcost corratio
-dof 12
-interp trilinear
- Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt <http://fsl.fmrib.ox.ac.uk/fsl/fnirt/>`_::
fnirt
--in=head.nii.gz
--aff=highres2symmstandard.mat
--cout=highres2symmstandard_warp.nii.gz
--iout=fnirt_highres2symmstandard.nii.gz
--jout=highres2symmstandard_jac.nii.gz
--config=T1_2_MNI152_2mm_symmetric.cnf
--ref=MNI152_T1_2mm_symmetric.nii.gz
--refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
--warpres=10,10,10
- Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing <http://imaging.mrc-cbu.cam.ac.uk/imaging/PrinciplesSmoothing>`_ `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm>`_::
fslmaths rest_res_filt.nii.gz
-kernel gauss FWHM/ sqrt(8-ln(2))
-fmean -mas rest_mask.nii.gz
rest_res_filt_FWHM.nii.gz
- Apply nonlinear registration (func to standard). For details see `applywarp <http://www.fmrib.ox.ac.uk/fsl/fnirt/warp_utils.html#applywarp>`_::
applywarp
--ref=MNI152_T1_2mm_symmetric.nii.gz
--in=rest_res_filt_FWHM.nii.gz
--out=rest_res_2symmstandard.nii.gz
--warp=highres2symmstandard_warp.nii.gz
--premat=example_func2highres.mat
- Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim <http://fsl.fmrib.ox.ac.uk/fsl/fsl4.0/avwutils/index.html>`_::
fslswapdim
rest_res_2symmstandard.nii.gz
-x y z
tmp_LRflipped.nii.gz
- Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_::
3dTcorrelate
-pearson
-polort -1
-prefix VMHC_FWHM.nii.gz
rest_res_2symmstandard.nii.gz
tmp_LRflipped.nii.gz
- Fisher Z Transform the correlation. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc
-a VMHC_FWHM.nii.gz
-expr 'log((a+1)/(1-a))/2'
-prefix VMHC_FWHM_Z.nii.gz
- Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) ::
-Use Nibabel to do this
- Compute the Z statistic map ::
3dcalc
-a VMHC_FWHM_Z.nii.gz
-expr 'a*sqrt('${nvols}'-3)'
-prefix VMHC_FWHM_Z_stat.nii.gz
Workflow:
.. image:: ../images/vmhc_graph.dot.png
:width: 500
Workflow Detailed:
.. image:: ../images/vmhc_detailed_graph.dot.png
:width: 500
References
----------
.. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010
Examples
--------
>>> vmhc_w = create_vmhc()
>>> vmhc_w.inputs.inputspec.brain_symmetric = 'MNI152_T1_2mm_brain_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.symm_s |
EdDev/vdsm | lib/vdsm/network/ip/rule/__init__.py | Python | gpl-2.0 | 2,327 | 0 | # Copyright 2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
from __future__ import absolute_import
import abc
import six
from vdsm.network import driverloader
@six.add_metaclass(abc.ABCMeta)
class IPRuleApi(object):
@staticmethod
def add(rule_data):
""" Adding a rule entry described by an IPRuleData | data object """
raise NotImplementedError
@staticmethod
def delete(rule_data):
""" Delete a rule entry described by an IPRuleData data object """
raise NotImplementedError
@staticmethod
def rules(table='all'):
raise NotImplementedError
class IPRuleData(object):
""" A data structure used to keep rule information """
def __init__(self, to=None, src=Non | e, iif=None, table=None):
self._to = to
self._src = src
self._iif = iif
self._table = table
@property
def to(self):
return self._to
@property
def src(self):
return self._src
@property
def iif(self):
return self._iif
@property
def table(self):
return self._table
def __repr__(self):
return 'to={} src={} iif={} table={}'.format(
self.to, self.src, self.iif, self.table)
class IPRuleError(Exception):
pass
class IPRuleAddError(IPRuleError):
pass
class IPRuleDeleteError(IPRuleError):
pass
class Drivers(object):
IPROUTE2 = 'iproute2'
def driver(driver_name):
_drivers = driverloader.load_drivers('IPRule', __name__, __path__[0])
return driverloader.get_driver(driver_name, _drivers)
|
EmreAtes/spack | lib/spack/spack/cmd/reindex.py | Python | lgpl-2.1 | 1,426 | 0 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by | Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it un | der the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack
import spack.store
description = "rebuild Spack's package database"
section = "admin"
level = "long"
def reindex(parser, args):
spack.store.db.reindex(spack.store.layout)
|
oscaro/django | tests/migrations/test_loader.py | Python | bsd-3-clause | 12,020 | 0.002246 | from __future__ import unicode_literals
from unittest import skipIf
from django.test import TestCase, override_settings
from django.db import connection, connections
from django.db.migrations.graph import NodeNotFoundError
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.recorder import MigrationRecorder
from django.test import modify_settings
from django.utils import six
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set([("myapp", "0432_ponies")]),
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
set((x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.import_error"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
MigrationLoader(connection)
@skipIf(six.PY2, "PY2 doesn't load empty dirs.")
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
MigrationLoader(connection)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
recorder.flush()
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
reco | rder = MigrationRecorder(connection)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migra | tions", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_app |
Abjad/abjad | tests/test_NoteHeadList___delitem__.py | Python | gpl-3.0 | 216 | 0 | import abjad
def | test_NoteHeadList___delitem___01():
"""
Deletes note-head.
"""
chord = abjad.Chord("<ef' cs'' f''>4")
del chord.note_heads[1]
assert abjad.lilypond(chord) == "<ef' f' | '>4"
|
PyAr/fades | tests/test_parsing/test_reqs.py | Python | gpl-3.0 | 3,030 | 0.00066 | """Check the requirements parsing."""
import io
from logassert import Multiple
from fades import parsing, REPO_PYPI, REPO_VCS
from tests import get_reqs
def test_empty():
parsed = parsing._parse_requirement(io.StringIO("""
"""))
assert parsed == {}
def test_simple():
parsed = parsing._parse_requirement(io.StringIO("""
pypi::foo
"""))
assert parsed == {REPO_PYPI: get_reqs('foo')}
def test_simple_default():
parsed = parsing._parse_requireme | nt(io.StringIO("""
foo
"""))
assert parsed == {REPO_PYPI: get_reqs('foo')}
def test_double():
parsed = parsing._parse_requirement(io.StringIO("""
pypi: | :time
foo
"""))
assert parsed == {
REPO_PYPI: get_reqs('time') + get_reqs('foo')
}
def test_version_same():
parsed = parsing._parse_requirement(io.StringIO("""
pypi::foo == 3.5
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo == 3.5')
}
def test_version_same_default():
parsed = parsing._parse_requirement(io.StringIO("""
foo == 3.5
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo == 3.5')
}
def test_version_different():
parsed = parsing._parse_requirement(io.StringIO("""
foo !=3.5
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo !=3.5')
}
def test_version_same_no_spaces():
parsed = parsing._parse_requirement(io.StringIO("""
foo==3.5
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo ==3.5')
}
def test_version_greater_two_spaces():
parsed = parsing._parse_requirement(io.StringIO("""
foo > 2
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo > 2')
}
def test_version_same_or_greater():
parsed = parsing._parse_requirement(io.StringIO("""
foo >=2
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo >= 2')
}
def test_comments():
parsed = parsing._parse_requirement(io.StringIO("""
pypi::foo # some text
# other text
bar
"""))
assert parsed == {
REPO_PYPI: get_reqs('foo') + get_reqs('bar')
}
def test_strange_repo(logs):
parsed = parsing._parse_requirement(io.StringIO("""
unknown::foo
"""))
assert Multiple("Not understood fades repository", "unknown") in logs.warning
assert parsed == {}
def test_vcs_simple():
parsed = parsing._parse_requirement(io.StringIO("""
vcs::strangeurl
"""))
assert parsed == {REPO_VCS: [parsing.VCSDependency("strangeurl")]}
def test_vcs_simple_default():
parsed = parsing._parse_requirement(io.StringIO("""
bzrhttp://server/bleh
"""))
assert parsed == {REPO_VCS: [parsing.VCSDependency("bzrhttp://server/bleh")]}
def test_mixed():
parsed = parsing._parse_requirement(io.StringIO("""
vcs::strangeurl
pypi::foo
"""))
assert parsed == {
REPO_VCS: [parsing.VCSDependency("strangeurl")],
REPO_PYPI: get_reqs('foo'),
}
|
dawnpower/nova | nova/tests/unit/compute/test_compute.py | Python | apache-2.0 | 522,454 | 0.001066 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the L | icense is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for | compute service."""
import base64
import contextlib
import datetime
import operator
import sys
import time
import traceback
import uuid
from eventlet import greenthread
import mock
from mox3 import mox
from neutronclient.common import exceptions as neutron_exceptions
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
import testtools
from testtools import matchers as testtools_matchers
import nova
from nova import availability_zones
from nova import block_device
from nova import compute
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import manager as conductor_manager
from nova.console import type as ctype
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova import policy
from nova import quota
from nova.scheduler import client as scheduler_client
from nova import test
from nova.tests.unit.compute import eventlet_utils
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_migration
from nova.tests.unit import utils as test_utils
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import event
from nova.virt import fake
from nova.virt import hardware
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
def get_primitive_instance_by_uuid(context, instance_uuid):
"""Helper method to get an instance and then convert it to
a primitive form using jsonutils.
"""
instance = db.instance_get_by_uuid(context, instance_uuid)
return jsonutils.to_primitive(instance)
def unify_instance(instance):
"""Return a dict-like instance for both object-initiated and
model-initiated sources that can reasonably be compared.
"""
newdict = dict()
for k, v in instance.iteritems():
if isinstance(v, datetime.datetime):
# NOTE(danms): DB models and Instance objects have different
# timezone expectations
v = v.replace(tzinfo=None)
elif k == 'fault':
# NOTE(danms): DB models don't have 'fault'
continue
elif k == 'pci_devices':
# NOTE(yonlig.he) pci devices need lazy loading
# fake db does not support it yet.
continue
newdict[k] = v
return newdict
class FakeSchedulerAPI(object):
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
pass
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest):
pass
def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
filter_properties, reservations):
pass
class FakeComputeTaskAPI(object):
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(network_manager='nova.network.manager.FlatManager')
fake.set_nodes([NODENAME])
self.flags(use_local=True, group='conductor')
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.compute = importutils.import_object(CONF.compute_manager)
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, NODENAME)
self.compute._resource_tracker_dict[NODENAME] = fake_rt
def fake_get_compute_nodes_in_db(context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
'hypervisor_hostname': 'fake_phyp1',
'memory_mb_used': 512,
'memory_mb': 131072,
'current_workload': 0,
'vcpus': 16,
'cpu_info': 'ppc64,powervm,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
'hypervisor_version': 7,
'disk_available_least': 265856,
'deleted_at': None,
'free_ram_mb': 130560,
'metrics': '',
'stats': '',
'numa_topology': '',
'id': 2,
'host': 'fake_phyp1',
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), cn)
for cn in fake_compute_nodes]
def fake_compute_node_delete(context, compute_node_id):
self.assertEqual(2, compute_node_id)
self.stubs.Set(self.compute, '_g |
clarkperkins/django-rest-framework-siren | rest_framework_siren/pagination.py | Python | apache-2.0 | 767 | 0 | """
Siren pagination
"""
from __future__ import unicode_literals
from collections import OrderedDict
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework_siren.utils imp | ort link_maker
class SirenPagination(PageNumberPagination):
"""
Pagination class for Siren
"""
def get_paginated_respons | e(self, data):
links = [
link_maker(['self'], 'self'),
link_maker(['next'], self.get_next_link()),
link_maker(['previous'], self.get_previous_link()),
]
return Response(OrderedDict([
('properties', {'count': self.page.paginator.count}),
('entities', data),
('links', links),
]))
|
kovidgoyal/html5-parser | src/html5_parser/soup.py | Python | apache-2.0 | 4,216 | 0.000949 | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: Apache 2.0 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
unicode = type('')
cdata_list_attributes = None
universal_cdata_list_attributes = None
empty = ()
def init_bs4_cdata_list_attributes():
global cdata_list_attributes, universal_cdata_list_attributes
from bs4.builder import HTMLTreeBuilder
try:
attribs = HTMLTreeBuilder.DEFAULT_CDATA_LIST_ATTRIBUTES
except AttributeError:
attribs = HTMLTreeBuilder.cdata_list_attributes
cdata_list_attributes = {k: frozenset(v) for k, v in attribs.items()}
universal_cdata_list_attributes = cdata_list_attributes['*']
def map_list_attributes(tag_name, name, val):
if name in universal_cdata_list_attributes:
return val.split()
if name in cdata_list_attributes.get(tag_name, empty):
return val.split()
return val
def soup_module():
if soup_module.ans is None:
try:
import bs4
soup_module.ans = bs4
except ImportError:
import BeautifulSoup as bs3
soup_module.ans = bs3
return soup_module.ans
soup_module.ans = None
def set_soup_module(val):
soup_module.ans = val
def bs4_fast_append(self, new_child):
new_child.parent = self
if self.contents:
previous_child = self.contents[-1]
new_child.previous_sibling = previous_child
previous_child.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
else:
new_child.previous_sibling = None
new_child.previous_element = self
new_child.previous_element.next_element = new_child
new_child.next_sibling = new_child.next_element = None
self.contents.append(new_child)
def bs4_new_tag(Tag, soup):
builder = soup.builder
def new_tag(name, attrs):
attrs = {k: map_list_attributes(name, k, v) for k, v in attrs.items()}
return Tag(soup, name=name, attrs=attrs, builder=builder)
return new_tag
def bs3_fast_append(self, newChild):
newChild.parent = self
if self.contents:
previousChild = self.contents[-1]
newChild.previousSibling = previousChild
previousChild.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
else:
newChild.previousSibling = None
newChild.previous = self
newChild.previous.next = newChild
newChild.nextSibling = newChild.next_element = None
self.contents.append(newChild)
def bs3_new_tag(Tag, soup):
def new_tag(name, attrs):
ans = Tag(soup, name)
ans.attrs = attrs.items()
ans.attrMap = attrs
return ans
return new_tag
VOID_ELEMENTS = frozenset(
'area base br col embed hr img input keygen link menuitem meta param source track wbr'.split())
def is_bs3():
return soup_module().__version__.startswith('3.')
def init_soup():
bs = soup_module()
if is_bs3():
soup = bs.B | eautifulSoup()
new_tag = bs3_new_tag(bs.Tag, soup)
append = bs3_fast_append
soup.i | sSelfClosing = lambda self, name: name in VOID_ELEMENTS
else:
soup = bs.BeautifulSoup('', 'lxml')
new_tag = bs4_new_tag(bs.Tag, soup)
append = bs4_fast_append
if universal_cdata_list_attributes is None:
init_bs4_cdata_list_attributes()
return bs, soup, new_tag, bs.Comment, append, bs.NavigableString
def parse(utf8_data, stack_size=16 * 1024, keep_doctype=False, return_root=True):
from . import html_parser
bs, soup, new_tag, Comment, append, NavigableString = init_soup()
if not isinstance(utf8_data, bytes):
utf8_data = utf8_data.encode('utf-8')
def add_doctype(name, public_id, system_id):
soup.append(bs.Doctype.for_name_and_ids(name, public_id or None, system_id or None))
dt = add_doctype if keep_doctype and hasattr(bs, 'Doctype') else None
root = html_parser.parse_and_build(
utf8_data, new_tag, Comment, NavigableString, append, dt, stack_size)
soup.append(root)
return root if return_root else soup
|
Spooner/pixel-table | pixel_table/external/unicorn.py | Python | mit | 513 | 0.003899 | from __future__ import absolute_import, division, print_function, unicode_literals
import n | umpy as np
class Unicorn(object):
def __init__(self):
import unicornhathd as unicorn
unicorn.rotation(180)
unicorn.bright | ness(0.75)
def write_pixels(self, data):
import unicornhathd as unicorn
for y, row in enumerate((data * 255).astype(np.uint8)):
for x, color in enumerate(row):
unicorn.set_pixel(x, y, *color)
unicorn.show()
|
mikefeneley/topcoder | src/SRM-184/race_approximator.py | Python | mit | 716 | 0.009777 | import math
class RaceApproximator:
def timeToBeat(self, d1, t1, d2, t2, raceDistance):
time = t1 * math.exp( math.log(float(t2)/t1) * math.log(float(d1)/raceDistance) / math.log(float(d1)/d2) )
time = int(time)
hours = time / 3600
minutes = (time - hours * 3600) / 60
seconds = time - hours * 3600 - minutes * 60
hours = str(hours)
if(minutes < 10):
minutes = '0 | ' + str(minutes)
else:
minutes = str(minutes)
if(seconds < 10):
seconds = '0' + str(seconds)
else:
seconds = str(seconds)
final = hours + ':' + minutes + | ':' + seconds
return final
|
VitalLabs/gcloud-python | scripts/run_pylint.py | Python | apache-2.0 | 9,455 | 0 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom script to run PyLint on gcloud codebase.
This runs pylint as a script via subprocess in two different
subprocesses. The first lints the production/library code
using the default rc file (PRODUCTION_RC). The second lints the
test code using an rc file (TEST_RC) which allows more style
violations (hence it has a reduced number of style checks).
"""
from __future__ import print_function
import ConfigParser
import copy
import os
import subprocess
import sys
IGNORED_DIRECTORIES = [
os.path.join('gcloud', 'bigtable', '_generated'),
os.path.join('gcloud', 'bigtable', '_generated_v2'),
os.path.join('gcloud', 'datastore', '_generated'),
]
IGNORED_FILES = [
os.path.join('docs', 'conf.py'),
'setup.py',
]
SCRIPTS_DIR = os.path.abspath(os.path.dirname(__file__))
PRODUCTION_RC = os.path.join(SCRIPTS_DIR, 'pylintrc_default')
TEST_RC = os.path.join(SCRIPTS_DIR, 'pylintrc_reduced')
TEST_DISABLED_MESSAGES = [
'abstract-method',
'arguments-differ',
'assignment-from-no-return',
'attribute-defined-outside-init',
'exec-used',
'import-error',
'invalid-name',
'missing-docstring',
'no-init',
'no-self-use',
'superfluous-parens',
'too-few-public-methods',
'too-many-locals',
'too-many-public-methods',
'unbalanced-tuple-unpacking',
]
TEST_RC_ADDITIONS = {
'MESSAGES CONTROL': {
'disable': ', '.join(TEST_DISABLED_MESSAGES),
},
}
TEST_RC_REPLACEMENTS = {
'FORMAT': {
'max-module-lines': 1900,
},
}
def read_config(filename):
"""Reads pylintrc config onto native ConfigParser object."""
config = ConfigParser.ConfigParser()
with open(filename, 'r') as file_obj:
config.readfp(file_obj)
return config
def make_test_rc(base_rc_filename, additions_dict,
replacements_dict, target_filename):
"""Combines a base rc and test additions into single file."""
main_cfg = read_config(base_rc_filename)
# Create fresh config for test, which must extend production.
test_cfg = ConfigParser.ConfigParser()
test_cfg._sections = copy.deepcopy(main_cfg._sections)
for section, opts in additions_dict.items():
curr_section = test_cfg._sections.setdefault(
section, test_cfg._dict())
for opt, opt_val in opts.items():
curr_val = curr_section.get(opt)
if curr_val is None:
raise KeyError('Expected to be adding to existing option.')
curr_val = curr_val.rstrip(',')
curr_section[opt] = '%s, %s' % (curr_val, opt_val)
for section, opts in replacements_dict.items():
curr_section = test_cfg._sections.setdefault(
section, test_cfg._dict())
for opt, opt_val in opts.items():
curr_val = curr_section.get(opt)
if curr_val is None:
raise KeyError('Expected to be replacing existing option.')
curr_section[opt] = '%s' % (opt_val,)
with open(target_filename, 'w') as file_obj:
test_cfg.write(file_obj)
def valid_filename(filename):
"""Checks if a file is a Python file and is not ignored."""
for directory in IGNORED_DIRECTORIES:
if filename.startswith(directory):
return False
return (filename.endswith('.py') and
filename not in IGNORED_FILES)
def is_production_filename(filename):
"""Checks if the file contains production code.
:rtype: bool
:returns: Boolean indicating production status.
"""
return 'test' not in filename and 'docs' not in filename
def get_files_for_linting(allow_limited=True):
"""Gets a list of files in the repository.
By default, returns all files via ``git ls-files``. However, in some cases
uses a specific commit or branch (a so-called diff base) to compare
against for changed files. (This requires ``allow_limited=True``.)
To speed up linting on Travis pull requests against master, we manually
set the diff base to origin/master. We don't do this on non-pull requests
since origin/master will be equivalent to the currently checked out code.
One could potentially use ${TRAVIS_COMMIT_RANGE} to find a diff base but
this value is not dependable.
To allow faster local ``tox`` runs, the environment variables
``GCLOUD_REMOTE_FOR_LINT`` and ``GCLOUD_BRANCH_FOR_LINT`` can be set to
specify a remote branch to diff against.
:type allow_limited: bool
:param allow_limited: Boolean indicating if a reduced set of files can
be used.
:rtype: pair
:returns: T | uple of the | diff base using the the list of filenames to be
linted.
"""
diff_base = None
if (os.getenv('TRAVIS_BRANCH') == 'master' and
os.getenv('TRAVIS_PULL_REQUEST') != 'false'):
# In the case of a pull request into master, we want to
# diff against HEAD in master.
diff_base = 'origin/master'
elif os.getenv('TRAVIS') is None:
# Only allow specified remote and branch in local dev.
remote = os.getenv('GCLOUD_REMOTE_FOR_LINT')
branch = os.getenv('GCLOUD_BRANCH_FOR_LINT')
if remote is not None and branch is not None:
diff_base = '%s/%s' % (remote, branch)
if diff_base is not None and allow_limited:
result = subprocess.check_output(['git', 'diff', '--name-only',
diff_base])
print('Using files changed relative to %s:' % (diff_base,))
print('-' * 60)
print(result.rstrip('\n')) # Don't print trailing newlines.
print('-' * 60)
else:
print('Diff base not specified, listing all files in repository.')
result = subprocess.check_output(['git', 'ls-files'])
return result.rstrip('\n').split('\n'), diff_base
def get_python_files(all_files=None):
"""Gets a list of all Python files in the repository that need linting.
Relies on :func:`get_files_for_linting()` to determine which files should
be considered.
NOTE: This requires ``git`` to be installed and requires that this
is run within the ``git`` repository.
:type all_files: list or ``NoneType``
:param all_files: Optional list of files to be linted.
:rtype: tuple
:returns: A tuple containing two lists and a boolean. The first list
contains all production files, the next all test files and
the boolean indicates if a restricted fileset was used.
"""
using_restricted = False
if all_files is None:
all_files, diff_base = get_files_for_linting()
using_restricted = diff_base is not None
library_files = []
non_library_files = []
for filename in all_files:
if valid_filename(filename):
if is_production_filename(filename):
library_files.append(filename)
else:
non_library_files.append(filename)
return library_files, non_library_files, using_restricted
def lint_fileset(filenames, rcfile, description):
"""Lints a group of files using a given rcfile."""
# Only lint filenames that exist. For example, 'git diff --name-only'
# could spit out deleted / renamed files. Another alternative could
# be to use 'git diff --name-status' and filter out files with a
# status of 'D'.
filenames = [filename for filename in filenames
if os.path.exists(filename)]
if filenames:
rc_flag = '--rcfile=%s' % (rcfile,)
pylint_shell_command = ['pylint', rc_flag] + filenames
|
duanyifei/python_modules_test | test_message.py | Python | gpl-3.0 | 925 | 0.021935 | #coding:utf8
'''
发布订阅模式
'''
import message
#Example
def hello(name):
print "hello, %s."%name
def hi(name):
print "hi, %s."%name
def stop(name):
print 'hello, %s. greet5'%name
print 'discontinued.'
ctx = message.Context()
ctx.discontinued = Tru | e
return ctx
#订阅一个话题,并发布
message.sub('greet', hello)
message.sub('greet', hi)
message.pub('greet', 'lai')
message.pub('greet1', 'lai1')
#订阅另一个话题,并发布
message.sub('greet1', hello)
message.pub('greet1', 'lai1')
#取消订阅
message.unsub('greet', hello)
message.pub('greet', 'unsub')
#声明一个话题
message.declare('greet2', 'lai2')
#第一次订阅不用发布就可以收到预定义的消息
message.sub('greet2', hello)
#测试调整队列顺序
message.s | ub('greet', hello)
#stop执行后别的函数不再执行
message.sub('greet', stop, front=True)
message.pub('greet', 'stop')
|
zeffii/sublimetext_productivity | Packages/User/open_gitbash_here.py | Python | mit | 719 | 0.002782 | import sublime, sublime_plugin
import os
import subprocess
import threading
class OpenGitbashHere(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
file_path = view.file_name()
dirname = os.path.dirname(file_path)
th = BashTerminalThread(dirname)
th.start()
def enabled(self):
return True if self.view.file_name() else False
class BashTerminalThread(threading.Thread):
def __init__(self, dirname):
self.dirname = dirname
| threading.Thread.__init__(self)
def run(se | lf):
if self.dirname:
fpc = "--cd={0}".format(self.dirname)
subprocess.call([r"C:\Program Files\Git\git-bash.exe", fpc])
|
XcomConvent/xcom40k-shades | xcom40k/app/urls.py | Python | apache-2.0 | 3,601 | 0.029714 | from . import views
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# index page
url(r'^$', views.site().index, name = 'index'),
# login form
url(r'^login/$', views.site().login, name = 'login'),
url(r'^logout/$', views.site().logout, name = 'logout'),
# profiles
url(r'^profile/$', views.site().profile().index, name = 'profile'),
## users
url(r'^profile/users/(?P<user_id>[0-9]+)/view/$', views.site().profile().users().view, name = 'profile.users.view'),
url(r'^profile/users/(?P<user_id>[0-9]+)/edit/$', views.site().profile().users().edit, name = 'profile.users.edit'),
## reports
url(r'^profile/users/(?P<user_id>[0-9]+)/reports/$', views.site().profile().reports().index, name = 'profile.reports'),
url(r'^profile/users/(?P<user_id>[0-9]+)/reports/(?P<report_id>[0-9]+)/view/$', views.site().profile().reports().view, name = 'profile.reports.view'),
url(r'^profile/users/(?P<user_id>[0-9]+)/reports/(?P<report_id>[0-9]+)/edit/$', views.site().profile().reports().edit, name = 'profile.reports.edit'),
# url(r'^profile/users/(?P<user_id>[ | 0-9]+)/reports/(?P<report_id>)/pdf/$', views.site().profile().reports() | .pdf, name = 'profile.reports.pdf'),
## chars
url(r'^profile/chars/new/$', views.site().profile().chars().new, name = 'profile.chars.new'),
url(r'^profile/chars/(?P<char_id>[0-9]+)/view/$', views.site().profile().chars().view, name = 'profile.chars.view'),
url(r'^profile/chars/(?P<char_id>[0-9]+)/edit/$', views.site().profile().chars().edit, name = 'profile.chars.edit'),
# missions
url(r'^missions/$', views.site().missions().index, name = 'missions'),
url(r'^missions/(?P<mission_id>[0-9]+)/view/$', views.site().missions().fly().index, name = 'missions.view'),
url(r'^missions/(?P<mission_id>[0-9]+)/(?P<char_id>[0-9]+)/edit/$', views.site().missions().fly().edit, name = 'missions.edit'),
url(r'^missions/(?P<mission_id>[0-9]+)/pdf/$', views.site().missions().pdf, name = 'missions.pdf'),
url(r'^missions/(?P<mission_id>[0-9]+)/(?P<char_id>[0-9]+)/rm/$', views.site().missions().fly().rm, name = 'missions.rm'),
url(r'^missions/(?P<mission_id>[0-9]+)/report/$', views.site().missions().report, name = 'missions.report'),
# stash
url(r'^stash/$', views.site().stash().index, name = 'stash'),
url(r'^stash/view/$', views.site().stash().view, name = 'stash.view'),
url(r'^stash/(?P<market_token_id>[0-9]+)/buy/$', views.site().stash().token().buy, name = 'stash.tokens.buy'),
url(r'^stash/sell/add/$', views.site().stash().sell().add, name = 'stash.sell.add'),
url(r'^stash/sell/make/$', views.site().stash().sell().make, name = 'stash.sell.make'),
# train
url(r'^train/$', views.site().train().index, name = 'train'),
url(r'^train/(?P<char_id>[0-9]+)/edit/$', views.site().train().edit, name = 'train.edit'),
url(r'^neuro/$', views.site().train().neuro().index, name = 'train.neuro'),
url(r'^neuro/add/$', views.site.train.neuro.NeuroRequestCreateViewGeneric.as_view(), name = 'train.neuro.add'),
url(r'^neuro/(?P<nrq_id>[0-9]+)/auth/$', views.site().train().neuro().auth, name = 'train.neuro.auth'),
# rnd & stuff
url(r'^rnd/storyline/$', views.site().nfo().storyline, name = 'nfo.storyline'),
url(r'^rnd/rnd/$', views.site().nfo().rnd, name = 'nfo.rnd'), # wiki engine? Markdoc
url(r'^rnd/recruit/$', views.site().nfo().recruit, name = 'nfo.recruit'),
#vk
url(r'^rnd/vk/$', views.site().nfo().vk, name = 'vk')
]
|
jon-jacky/PyModel | samples/WebApplication/ScenarioLogin.py | Python | bsd-3-clause | 279 | 0.017921 | "like scenario_login.txt in NMod | el WebApplication"
from WebModel import Login, Logout
actions = (Login, Logout) # | just these to allow interleaving
testSuite = [
[
(Login, ( 'VinniPuhh', 'Correct' ), 'Success'),
(Logout, ( 'VinniPuhh', ), None)
]
]
|
uberfastman/yahoo-fantasy-football-metrics | integrations/drive_integration.py | Python | gpl-3.0 | 8,962 | 0.002008 | __author__ = "Wren J. R. (uberfastman)"
__email__ = "wrenjr@yahoo.com"
# code snippets taken from: http://stackoverflow.com/questions/24419188/automating-pydrive-verification-process
import datetime
import logging
from pathlib import Path
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from report.logger import get_logger
from utils.app_config_parser import AppConfigParser
logger = get_logger(__name__, propagate=False)
# Suppress verbose googleapiclient info/warning logging
logging.getLogger("googleapiclient").setLevel(level=logging.ERROR)
logging.getLogger("googleapiclient.discovery").setLevel(level=logging.ERROR)
logging.getLogger("googleapiclient.discovery_cache").setLevel(level=logging.ERROR)
logging.getLogger("googleapiclient.discovery_cache.file_cache").setLevel(level=logging.ERROR)
class GoogleDriveUploader(object):
def __init__(self, filename, config):
logger.debug("Initializing Google Drive uploader.")
project_dir = Path(__file__).parents[1]
logger.debug("Authenticating with Google Drive.")
self.filename = Path(project_dir) / filename
self.config = config
self.gauth = GoogleAuth()
auth_token = Path(project_dir) / Path(self.config.get("Drive", "google_drive_auth_token"))
# Try to load saved client credentials
self.gauth.LoadCredentialsFile(auth_token)
if self.gauth.credentials is None:
# Authenticate if they're not there
self.gauth.LocalWebserverAuth()
elif self.gauth.access_token_expired:
# Refresh them if expired
self.gauth.Refresh()
else:
# Initialize the saved creds
self.gauth.Authorize()
# Save the current credentials to a file
self.gauth.SaveCredentialsFile(auth_token)
def upload_file(self, test=False):
logger.debug("Uploading file to Google Drive.")
# Create GoogleDrive instance with authenticated GoogleAuth instance.
drive = GoogleDrive(self.gauth)
# Get lists of folders
root_folders = drive.ListFile(
{"q": "'root' in parents and mimeTyp | e='application/vnd.google-apps.folder' and trashed=false"}).GetList()
google_drive_folder_path_default = self.config.get("Drive", "google_drive_folder_path_default")
google_drive_folder_path = Path(sel | f.config.get(
"Drive", "google_drive_folder_path", fallback=google_drive_folder_path_default)).parts
google_drive_root_folder_id = self.make_root_folder(
drive,
self.check_file_existence(google_drive_folder_path[0], root_folders, "root"),
google_drive_folder_path[0]
)
if not test:
parent_folder_id = google_drive_root_folder_id
parent_folder_content_folders = drive.ListFile({
"q": (
f"'{parent_folder_id}' in parents and "
f"mimeType='application/vnd.google-apps.folder' and "
f"trashed=false"
)
}).GetList()
for folder in google_drive_folder_path[1:]:
# create folder chain in Google Drive
parent_folder_id = self.make_parent_folder(
drive,
self.check_file_existence(folder, parent_folder_content_folders, parent_folder_id),
folder,
parent_folder_id
)
parent_folder_content_folders = drive.ListFile({
"q": (
f"'{parent_folder_id}' in parents and "
f"mimeType='application/vnd.google-apps.folder' and "
f"trashed=false"
)
}).GetList()
# Check for season folder and create it if it does not exist
season_folder_name = Path(self.filename).parts[-3]
season_folder_id = self.make_parent_folder(
drive,
self.check_file_existence(season_folder_name, parent_folder_content_folders, parent_folder_id),
season_folder_name,
parent_folder_id
)
season_folder_content_folders = drive.ListFile({
"q": (
f"'{season_folder_id}' in parents and "
f"mimeType='application/vnd.google-apps.folder' and "
f"trashed=false"
)
}).GetList()
# Check for league folder and create it if it does not exist
league_folder_name = Path(self.filename).parts[-2].replace("-", "_")
league_folder_id = self.make_parent_folder(
drive,
self.check_file_existence(league_folder_name, season_folder_content_folders, season_folder_id),
league_folder_name, season_folder_id
)
league_folder_content_pdfs = drive.ListFile({
"q": (
f"'{league_folder_id}' in parents and "
f"mimeType='application/pdf' and "
f"trashed=false"
)
}).GetList()
# Check for league report and create if if it does not exist
report_file_name = Path(self.filename).parts[-1]
report_file = self.check_file_existence(report_file_name, league_folder_content_pdfs, league_folder_id)
else:
all_pdfs = drive.ListFile({"q": "mimeType='application/pdf' and trashed=false"}).GetList()
report_file_name = self.filename
report_file = self.check_file_existence(report_file_name, all_pdfs, "root")
league_folder_id = "root"
if report_file:
report_file.Delete()
upload_file = drive.CreateFile(
{
"title": report_file_name,
"mimeType": "application/pdf",
"parents": [
{
"kind": "drive#fileLink",
"id": league_folder_id
}
]
}
)
upload_file.SetContentFile(self.filename)
# Upload the file.
upload_file.Upload()
upload_file.InsertPermission(
{
"type": "anyone",
"role": "reader",
"withLink": True
}
)
return "\nFantasy Football Report\nGenerated %s\n*%s*\n\n_Google Drive Link:_\n%s" % (
"{:%Y-%b-%d %H:%M:%S}".format(datetime.datetime.now()), upload_file['title'], upload_file["alternateLink"])
@staticmethod
def check_file_existence(file_name, file_list, parent_id):
drive_file_name = file_name
google_drive_file = None
for drive_file in file_list:
if drive_file["title"] == drive_file_name:
for parent_folder in drive_file["parents"]:
if parent_folder["id"] == parent_id or parent_folder["isRoot"]:
google_drive_file = drive_file
return google_drive_file
@staticmethod
def make_root_folder(drive, folder, folder_name):
if not folder:
new_root_folder = drive.CreateFile(
{
"title": folder_name,
"parents": [
{
"kind": "drive#fileLink",
"isRoot": True,
"id": "root"
}
],
"mimeType": "application/vnd.google-apps.folder"
}
)
new_root_folder.Upload()
root_folder_id = new_root_folder["id"]
else:
root_folder_id = folder["id"]
return root_folder_id
@staticmethod
def make_parent_folder(drive, folder, folder_name, parent_folder_id):
if not folder:
new_parent_folder = drive.CreateFile(
{
"title": folder_name,
"parents": [
{
|
ran777/edu_intell | edu_creative/views.py | Python | gpl-3.0 | 3,325 | 0.001535 | from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import Q, F
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from posts.models import Posts
from uploadfiles.models import UploadFile
from edu_warning.tools import user_setting
# Create your views here.
def __page_it(q, page):
paginator = Paginator(q, user_setting['creative']['page_num'])
try:
q = paginator.page(page)
except PageNotAnInteger:
q = paginator.page(1)
except EmptyPage:
q = paginator.page(paginator.num_pages)
return q
def index(request):
context = {"page_now": "创意设计"}
return render(request, 'creative/base.html', context)
def design(request):
q = Posts.objects.filter(post_category__name="筹划设计").order_by('-click_num')
q = list(z | ip(q, (i.uploadfile_set.all()[0].file for i in q)))
context = {'q': __page_it(q, request.GET.get('page'))}
return render(request, 'creative/design.html', context)
def templates(request):
q = Posts.objects.filter(post_category__name="模版创意").order_by('-click_num')
q = list(zip(q, (i.uploadfile_set.all()[0].file for i in q)))
context = {'q': __page_it(q, request.GET.get('page'))}
| return render(request, 'creative/template_creative.html', context)
def method(request):
q = Posts.objects.filter(post_category__name="方法形式").order_by('-click_num')
# q = list(zip(q, (i.uploadfile_set.all()[0].file for i in q)))
attachment = [i.uploadfile_set.all() for i in q]
img = (i.filter(type='i') for i in attachment)
files = (i.filter(type='f') for i in attachment)
videos = (i.filter(type='v') for i in attachment)
q = list(zip(q, img, files, videos))
context = {'q': __page_it(q, request.GET.get('page'))}
return render(request, 'creative/method_form.html', context)
def course_ware(request):
q = Posts.objects.filter(post_category__name="课件样式").order_by('-click_num')
attachment = [i.uploadfile_set.all() for i in q]
img = (i.filter(type='i')[0] for i in attachment)
files = (i.filter(type='f')[0] for i in attachment)
q = list(zip(q, img, files))
context = {'q': __page_it(q, request.GET.get('page'))}
return render(request, 'creative/course_ware.html', context)
def post_detail(request):
q_type = request.GET.get('type')
if q_type is None:
return
context = {"q_type": q_type}
if q_type == 'c': # 课件详情
post = Posts.objects.get(pk=int(request.GET.get('pid')))
post.click_num += 1
post.save()
context['post'] = post
context['file'] = UploadFile.objects.get(pk=int(request.GET.get('fid')))
context['img'] = UploadFile.objects.get(pk=int(request.GET.get('iid')))
if q_type == 'm': # 方法形式详情
post = Posts.objects.get(pk=int(request.GET.get('pid')))
post.click_num += 1
post.save()
context['post'] = post
context['file'] = UploadFile.objects.get(pk=int(request.GET.get('fid')))
if q_type == 't': # 表格详情
post = Posts.objects.get(pk=int(request.GET.get('pid')))
post.click_num += 1
post.save()
context['post'] = post
return render(request, 'creative/post_detail.html', context)
|
nrc/rustc-perf | collector/benchmarks/cranelift-codegen/cranelift-codegen/meta-python/isa/x86/defs.py | Python | mit | 746 | 0 | """
x86 definitions.
Commonly used definitions.
"""
from __future__ import absolute_import
from cdsl.isa import TargetISA, CPUMode
import base.instructions
from . import instructions as x86
from base.immediates import floatcc
ISA = TargetISA('x86', [base.instructions.GROUP, x86.GROUP]) # type: TargetISA
# CPU modes for 32-bit and 64-bit operation.
X86_64 = CPUMode('I64', ISA)
X86_32 = CPUMode('I32', ISA)
# The set of floating point condition codes tha | t are directly supported.
# Other condition codes need to be reversed or expressed as two tests.
supported_floatccs = [
floatcc.ord,
| floatcc.uno,
floatcc.one,
floatcc.ueq,
floatcc.gt,
floatcc.ge,
floatcc.ult,
floatcc.ule]
|
DamienIrving/ocean-analysis | data_processing/obs/calc_durack_ocean_maps.py | Python | mit | 5,232 | 0.005352 | """
Filename: calc_durack_ocean_maps.py
Author: Damien Irving, irving.damien@gmail.com
Description: Calculate the zonal and vertical mean ocean anomaly fields
from the Durack and Wijffels (2010) data files
"""
# Import general Python modules
import sys, os, pdb
import argparse, math
import numpy
import iris
iris.FUTURE.netcdf_no_unlimited = True
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
import calc_ocean_maps
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
def fix_cube(cube, data_type):
"""Fixes for initial loading of cube"""
cube = iris.util.squeeze(cube)
cube.coord('sea_water_pressure').units = 'dbar'
cube.coord('sea_water_pressure').standard_name = 'depth'
assert data_type in ['trend', 'climatology']
if data_type == 'trend':
cube.data = cube.data / 50.
cube.units = 'K/yr'
return cube
def main(inargs):
"""Run the program."""
variables = ['potential_temperature', 'practical_salinity']
# Read data
change_cube = {}
| climatology_cube = {}
for variable in variables:
change_cube[variable] = iris.load_cube(inargs.infile, 'change_over_time_in_sea_water_'+va | riable)
change_cube[variable] = fix_cube(change_cube[variable], 'trend')
climatology_cube[variable] = iris.load_cube(inargs.infile, 'sea_water_'+variable)
climatology_cube[variable] = fix_cube(climatology_cube[variable], 'climatology')
basin_array_default = calc_ocean_maps.create_basin_array(change_cube[variable])
coord_names = [coord.name() for coord in change_cube[variable].dim_coords]
atts = change_cube[variable].attributes
atts['history'] = gio.write_metadata(file_info={inargs.infile: atts['history']})
atts['model_id'] = 'Durack and Wijffels'
# Calculate maps
for variable in variables:
if variable == 'potential_temperature':
standard_name = 'sea_water_potential_temperature'
var_name = 'thetao'
elif variable == 'practical_salinity':
standard_name = 'sea_water_salinity'
var_name = 'so'
change_cube_list = iris.cube.CubeList([])
climatology_cube_list = iris.cube.CubeList([])
for layer in calc_ocean_maps.vertical_layers.keys():
change_cube_vm = calc_ocean_maps.calc_vertical_mean(change_cube[variable].copy(), layer, coord_names, atts, standard_name, var_name)
change_cube_list.append(change_cube_vm)
climatology_cube_vm = calc_ocean_maps.calc_vertical_mean(climatology_cube[variable].copy(), layer, coord_names, atts, standard_name, var_name)
climatology_cube_list.append(climatology_cube_vm)
if layer in ['surface', 'argo']:
for basin in calc_ocean_maps.basins.keys():
basin_array = calc_ocean_maps.create_basin_array(change_cube_vm)
depth_cube = None
change_cube_list.append(calc_ocean_maps.calc_zonal_vertical_mean(change_cube_vm.copy(), depth_cube, basin_array, basin, layer, atts, standard_name, var_name))
for basin in calc_ocean_maps.basins.keys():
change_cube_zm = calc_ocean_maps.calc_zonal_mean(change_cube[variable].copy(), basin_array_default, basin, atts, standard_name, var_name)
change_cube_list.append(change_cube_zm)
climatology_cube_zm = calc_ocean_maps.calc_zonal_mean(climatology_cube[variable].copy(), basin_array_default, basin, atts, standard_name, var_name)
climatology_cube_list.append(climatology_cube_zm)
iris.save(change_cube_list, eval('inargs.change_outfile_'+var_name))
iris.save(climatology_cube_list, eval('inargs.climatology_outfile_'+var_name))
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description='Calculate the zonal and vertical mean ocean anomaly fields from Durack and Wijffels (2010) data files'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infile", type=str, help="Input data file")
parser.add_argument("change_outfile_thetao", type=str, help="Output file name for potential temperature change data")
parser.add_argument("climatology_outfile_thetao", type=str, help="Output file name for potential temperature climatology data")
parser.add_argument("change_outfile_so", type=str, help="Output file name for salinity change data")
parser.add_argument("climatology_outfile_so", type=str, help="Output file name for salinity climatology data")
args = parser.parse_args()
main(args)
|
shlomimatichin/workflow | workflow/calendar/models.py | Python | gpl-3.0 | 554 | 0.057762 | from django.db import models
from django.contrib.auth.models import User
class Attendance( models.Model ):
date = models.DateField()
teamMember = models.ForeignKey( User, blank = True, null = True, related_name = "cal | endarAttendances" )
workingDay = models.BooleanField()
user = models.ForeignKey( User, related_name = "calendarAttendancesReported" )
when = models.DateTimeField( auto_now = True )
def __unicode__( self ):
return "<Attendance %s %s %s>" % ( self.date, self.teamMember.username if self.teamMember else 'tea | m', self.workingDay )
|
inventree/InvenTree | InvenTree/stock/migrations/0063_auto_20210511_2343.py | Python | mit | 676 | 0 | # Generated by Django 3.2 on 2021-05-11 13:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('st | ock', '0062_auto_20210511_2151'),
]
operations = [
migrations.RemoveField(
model_name='stockitemtracking',
name='link',
),
migrations.RemoveField(
| model_name='stockitemtracking',
name='quantity',
),
migrations.RemoveField(
model_name='stockitemtracking',
name='system',
),
migrations.RemoveField(
model_name='stockitemtracking',
name='title',
),
]
|
rarcotvmw/capirca | tests/lib/iptables_test.py | Python | apache-2.0 | 39,394 | 0.002132 | # Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for iptables rendering module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import re
import unittest
from lib import aclgenerator
from lib import iptables
from lib import nacaddr
from lib import naming
from lib import policy
import mock
GOOD_HEADER_1 = """
header {
comment:: "this is a test acl"
target:: iptables INPUT ACCEPT
}
"""
GOOD_HEADER_2 = """
header {
comment:: "this is a test acl"
target:: iptables OUTPUT DROP
}
"""
GOOD_HEADER_3 = """
header {
comment:: "this is a test acl with abbreviation"
target:: iptables INPUT ACCEPT abbreviateterms
}
"""
GOOD_HEADER_4 = """
header {
comment:: "this is a test acl with truncation"
target:: iptables INPUT ACCEPT truncateterms
}
"""
GOOD_HEADER_5 = """
header {
comment:: "this is a test acl with no default target"
target:: iptables INPUT
}
"""
GOOD_HEADER_6 = """
header {
comment:: "this is a test acl with a custom chain and no default target"
target:: iptables foo
}
"""
IPV6_HEADER_1 = """
header {
comment:: "test header for inet6 terms"
target:: iptables INPUT DROP inet6
}
"""
NON_STANDARD_CHAIN = """
header {
comment:: "this is a test acl with non-standard chain"
target:: iptables foo ACCEPT
}
"""
NOSTATE_HEADER = """
header {
comment:: "iptables filter without stateful"
target:: iptables INPUT ACCEPT nostate
}
"""
CHAIN_HEADER_1 = """
header {
comment:: "this is a test acl"
target:: iptables foobar_chain nostate
}
"""
BAD_HEADER_2 = """
header {
target:: juniper
}
"""
BAD_HEADER_3 = """
header {
target:: iptables INPUT MAYBE
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: icmp
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
source-address:: INTERNAL
source-exclude:: OOB_NET
protocol:: tcp
source-port:: HTTP
action:: accept
}
"""
GOOD_TERM_3 = """
term good-term-3 {
source-port:: HTTP
protocol:: tcp
option:: rst fin tcp-established established
action:: accept
}
"""
GOOD_TERM_4 = """
term good-term-4 {
protocol:: tcp udp esp ah gre icmp 50
action:: accept
}
"""
GOOD_TERM_5 = """
term good-term-5 {
verbatim:: iptables "mary had a little lamb"
verbatim:: cisco "mary had second lamb"
verbatim:: juniper "mary had third lamb"
}
"""
GOOD_TERM_6 = """
term good-term-6 {
comment:: "Some text describing what this block does,
possibly including newines, blank lines,
and extra-long comments (over 255 characters)
%(long_line)s
All these cause problems if passed verbatim to iptables.
"
comment:: ""
protocol:: tcp
action:: accept
}
""" % {'long_line': '-' * 260}
GOOD_TERM_7 = """
term drop-short-initial-fragments {
option:: first-fragment
packet-length:: 1-119
action:: deny
}
term drop-header-overwrite {
fragment-offset:: 1-119
action:: deny
}
"""
GOOD_TERM_8 = """
term block-some-icmp {
protocol:: icmp
icmp-type:: router-solicitation information-request unreachable echo-reply
action:: deny
}
"""
GOOD_TERM_9 = """
term good-term-9 {
source-address:: SOME_SOURCE
destination-address:: SOME_DEST
protocol:: tcp
source-port:: HTTP
action:: accept
}
"""
GOOD_TERM_10 = """
term good-term-10 {
owner:: foo@google.com
action:: accept
}
"""
GOOD_TERM_11 = """
term good_term_11 {
protocol:: icmp
icmp-type:: unreachable
icmp-code:: 3 4
action:: accept
}
"""
BAD_QUOTE_TERM_1 = """
term bad-quote-term-1 {
comment:: "Text describing without quotes"
protocol:: tcp
action:: accept
}
"""
IPV6_TERM_1 = """
term inet6-icmp {
protocol:: icmpv6
icmp-type:: destination-unreachable time-exceeded echo-reply
action:: deny
}
"""
IPV6_HEADERS = """
term ipv6-header-1 {
protocol:: hopopt
action:: deny
}
term ipv6-header-2 {
protocol:: fragment
action:: deny
}
"""
ICMPV6_TERM_1 = """
term inet6-icmp {
source-address:: IPV6_INTERNAL
protocol:: icmpv6
icmp-type:: destination-unreachable
action:: deny
}
"""
LOGGING_TERM_1 = """
term foo {
protocol:: tcp
logging:: syslog
action:: accept
}
"""
UDP_STATE_TERM = """
term test-conntrack-udp {
protocol:: udp
option:: established
action:: accept
}
"""
TCP_STATE_TERM = """
term tcp-established-only {
protocol:: tcp
option:: established
action:: accept
}
"""
STATEFUL_ONLY_TERM = """
term stateful-only {
option:: established
action:: accept
}
"""
BAD_LONG_TERM_NAME = """
term this-term-name-is-really-far-too-long {
protocol:: tcp
action:: accept
}
"""
GOOD_LONG_TERM_NAME = """
term google-experiment-abb | reviations {
protocol:: tcp
action:: accept
}
"""
GOOD_MULTIPORT = """
term multiport {
source-port:: FOURTEEN_PORTS
protocol:: tcp
action:: accept
}
"""
MULTIPORT_SWAP = """
term multiport {
source-port:: HTTP HTTPS
destination-port:: SSH
protocol:: tcp
action:: accept
}
"""
EXPIRED_TERM = """
term is_expired {
expiration:: 2001-01-01
action:: accept
} |
"""
EXPIRING_TERM = """
term is_expiring {
expiration:: %s
action:: accept
}
"""
GOOD_MULTIPORT_RANGE = """
term bad-mport-ranges {
destination-port:: FIFTEEN_PORTS_WITH_RANGES
protocol:: tcp
action:: accept
}
"""
LARGE_MULTIPORT = """
term bad-multiport {
destination-port:: LOTS_OF_PORTS
protocol:: tcp
action:: accept
}
"""
DUAL_LARGE_MULTIPORT = """
term bad-multiport {
source-port:: LOTS_OF_SPORTS
destination-port:: LOTS_OF_DPORTS
protocol:: tcp
action:: accept
}
"""
UNSUPPORTED_TERM = """
term ether-type-filter {
ether-type:: arp
action:: accept
}
"""
UNKNOWN_TERM_KEYWORD = """
term unknown-keyword {
comment:: "imaginary new keyword added to the policy library."
comment:: "i.e. ip-options-count:: 2-255"
comment:: "must be added in tests due to checking in policy library."
action:: deny
}
"""
UNSUPPORTED_EXCEPT = """
term block-non-standard {
protocol-except:: tcp udp icmp
action:: deny
}
"""
REJECT_TERM1 = """
term reject-term1 {
action:: reject-with-tcp-rst
}
"""
REJECT_TERM2 = """
term reject-term2 {
action:: reject
}
"""
NEXT_TERM1 = """
term next-term1 {
action:: next
}
"""
BAD_PROTOCOL_MATCHES = """
term proto-accept-and-reject {
protocol:: tcp udp icmp
protocol-except:: gre
action:: accept
}
"""
SOURCE_INTERFACE_TERM = """
term src-interface {
protocol:: tcp
source-interface:: eth0
action:: accept
}
"""
DESTINATION_INTERFACE_TERM = """
term dst-interface {
protocol:: tcp
destination-interface:: eth0
action:: accept
}
"""
GOOD_WARNING_TERM = """
term good-warning-term {
source-port:: HTTP
protocol:: tcp
option:: rst fin tcp-established established
policer:: batman
action:: accept
}
"""
SUPPORTED_TOKENS = {
'action',
'comment',
'counter',
'destination_address',
'destination_address_exclude',
'destination_interface',
'destination_port',
'destination_prefix',
'expiration',
'fragment_offset',
'icmp_code',
'icmp_type',
'stateless_reply',
'logging',
'name',
'option',
'owner',
'packet_length',
'platform',
'platform_exclude',
'protocol',
'routing_instance',
'source_address',
'source_address_exclude',
'source_interface',
'source_port',
'source_prefix',
'translated',
'verbatim',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-pa |
bendudson/BOUT-1.0 | tools/tokamak_grids/cyclone/cyclone.py | Python | gpl-3.0 | 5,335 | 0.005061 | #!/usr/bin/env python
try:
from numpy import *
from scipy.integrate import quad
except ImportError:
print "ERROR: Need NumPy and SciPy modules"
raise
try:
from boututils import DataFile
except ImportError:
print "ERROR: Missing boututils.Datafile. Add pylib to your PYTHONPATH"
raise
######################################################
nx = 68 # Number of radial grid points
ny = 32 # Number of poloidal (parallel) grid points
varyBp = False
output = "cyclone_"+str(nx)+"x"+str(ny)+".nc"
######################################################
Ni = 1. # Ion density in 10^20 m^-3
Ti = 1000 # Temperature in eV (Te = Ti)
Rmaj = 4 # Major radius [meters]
q = 1.4 # Safety factor q = r*Bt/(R*Bp)
s = 0.776 # Magnetic shear s = (r/q) dq/dr
eta_i = 3.114 # Ratio of density to temp. length scales eta = L_n / L_T
epsilon = 0.18 # Inverse aspect ratio epsilon = r / R
Rnorm = 6.92 # Ratio of major radius to L_T Rnorm = R / L_T
rho_norm = 0.01 # Normalised ion gyro-radius rho_norm = rho_i / L_T
r_wid = 100 # Radial extent, normalised to gyro-radius r_wid = dr / rho_i
Mi = 2.*1.67262158e-27 # Ion mass [kg]. Deuterium
######################################################
def eps_integral(eps, theta=None):
if theta == None:
theta = 2.*pi
return (quad(lambda t: 1./((1. - eps*cos(t))**2), 0., theta))[0]
rminor = Rmaj * epsilon # Minor radius [m]
L_T = Rmaj / Rnorm # Temp. length scale [m]
L_n = eta_i * L_T # Density length scale [m]
rho_i = rho_norm * L_T # Ion Larmor radius [m]
Bt0 = sqrt(2.*Ti*Mi / 1.602e-19) / rho_i # Toroidal field from rho_i [T]
Bp = rminor * Bt0 * eps_integral(epsilon)/ (q * Rmaj) # Poloidal field [T]
dr = r_wid * rho_i # Width of domain [m]
theta = 2.*pi * arange(0,float(ny)) / float(ny)
Rxy = zeros([nx, ny])
Zxy = Rxy
for i in range(ny):
Rxy[:,i] = Rmaj - rminor*cos(theta[i])
Zxy[:,i] = rminor * sin(theta[i])
dy = zeros([nx,ny]) + 2.*pi / float(ny)
hthe = zeros([nx,ny]) + rminor
Btxy = Bt0 * Rmaj / Rxy
print "Toroidal fie | ld varies from "+str(Bt0*Rmaj/(Rmaj + rminor)) + \
" to "+str(Bt0*Rmaj/(Rmaj - rminor))
# Minor radius offset
drprof = dr*((arange(nx) / float(nx-1)) - 0.5)
# | q profile
qprof = q + (s*q/rminor) * drprof
print "q varies from "+str(min(qprof))+" to "+str(max(qprof))
ShiftAngle = qprof * 2.*pi
Bpxy = zeros([nx,ny])
if varyBp:
# Vary Bp to get shear
for y in range(ny):
Bpxy[:,y] = Bp * q / qprof
print "Poloidal field varies from "+str(min(Bpxy))+" to "+str(max(Bpxy))
else:
# Constant Bp, but shift angle varies
Bpxy += Bp
dx = Bp * (dr / float(nx-1)) * Rxy
Bxy = sqrt(Btxy**2 + Bpxy**2)
zShift = zeros([nx, ny])
qint = eps_integral(epsilon)
for y in range(1,ny):
zShift[:,i] = ShiftAngle * eps_integral(epsilon, theta=theta[i]) / qint
# Make zShift = 0 on outboard midplane (for plotting mainly)
y0 = int(ny/2)
zs0 = zShift[:,y0]
for i in range(ny):
zShift[:,i] -= zs0
Ni0 = zeros([nx, ny])
Ti0 = Ni0
for i in range(ny):
Ni0[:,i] = Ni * exp(-drprof / L_n)
Ti0[:,i] = Ti * exp(-drprof / L_T)
Te0 = Ti0
pressure = Ni0 * (Ti0 + Te0) * 1.602e-19*1.0e20 # In Pascals
Jpar0 = zeros([nx, ny])
# Shape : Rxy, Zxy
# Differencing: hthe, dx, dy
# Profiles : Ni0, Ti0, Te0, pressure, Jpar0
# B field : Btxy, Bpxy, Bxy
# q profile : qprof
######################################################
# Curvature
# Bxy is constant in x, so need to supply logB too
logB = zeros([nx, ny])
for x in range(nx):
for y in range(ny):
rpos = (float(x)/float(nx-1) - 0.5) * dr
R = Rmaj - (rminor + rpos)*cos(theta[y])
Bt = Bt0 * Rmaj / R
logB[x,y] = log(sqrt(Bt**2 + Bp**2))
######################################################
# Topology: Just in the core
ixseps1 = nx
ixseps2 = nx
jyseps1_1 = -1
jyseps1_2 = int(ny/2)
jyseps2_1 = jyseps1_2
jyseps2_2 = ny-1
ny_inner = jyseps1_2
# Only one region
yup_xsplit = [nx]
ydown_xsplit = [nx]
yup_xin = [0]
yup_xout = [-1]
ydown_xin = [0]
ydown_xout = [-1]
nrad = [nx]
npol = [ny]
######################################################
print "Writing grid to file "+output
of = DataFile()
of.open(output, create=True)
of.write("nx", nx)
of.write("ny", ny)
# Topology for original scheme
of.write("ixseps1", ixseps1)
of.write("ixseps2", ixseps2)
of.write("jyseps1_1", jyseps1_1)
of.write("jyseps1_2", jyseps1_2)
of.write("jyseps2_1", jyseps2_1)
of.write("jyseps2_2", jyseps2_2)
of.write("ny_inner", ny_inner)
# Grid spacing
of.write("dx", dx)
of.write("dy", dy)
of.write("ShiftAngle", ShiftAngle)
of.write("zShift", zShift)
of.write("Rxy", Rxy)
of.write("Zxy", Zxy)
of.write("Bpxy", Bpxy)
of.write("Btxy", Btxy)
of.write("Bxy", Bxy)
of.write("hthe", hthe)
# Topology for general configurations
of.write("yup_xsplit", yup_xsplit)
of.write("ydown_xsplit", ydown_xsplit)
of.write("yup_xin", yup_xin)
of.write("ydown_xin", ydown_xin)
of.write("ydown_xout", ydown_xout)
of.write("nrad", nrad)
of.write("npol", npol)
# plasma profiles
of.write("pressure", pressure)
of.write("Jpar0", Jpar0)
of.write("Ni0", Ni0)
of.write("Te0", Te0)
of.write("Ti0", Ti0)
of.write("Ni_x", Ni)
of.write("Te_x", Ti)
of.write("Ti_x", Ti)
of.write("bmag", Bt0)
of.write("rmag", Rmaj)
# Curvature
of.write("logB", logB)
of.close()
print "Done"
|
giginet/django-generic-tagging | generic_tagging/templatetags/tagging.py | Python | mit | 1,210 | 0.000826 | from django import template
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from ..models import TaggedItem, Tag
register = template.Library()
@register.assignment_tag
def get_tagged_items_for(object):
'''retrieve tagged items which relative with the specific object.
:syntax: {% get_tagged_items_for <object> as <variable> %}
'''
return TaggedItem.objects.get_for_object(object)
@register.assignment_tag
def get_tags_for(object):
'''retrieve tags which relative with the specific object.
:syntax: {% get_tags_for <object> as <variable> %}
'''
return Tag.objects.get_for_object(object)
@register.assig | nment_tag
def get_content_type_for(object):
'''retrieve content type object for the specific object.
:syntax: {% get_content_typ | e_for <object> as <variable> %}
'''
return ContentType.objects.get_for_model(object)
@register.simple_tag
def render_generic_tagging_head_tag():
return render_to_string('generic_tagging/head.html')
@register.simple_tag
def render_generic_tagging_component_tag_for(object):
return render_to_string('generic_tagging/component.html', {'object': object})
|
mrjacobagilbert/gnuradio | grc/gui/FileDialogs.py | Python | gpl-3.0 | 6,524 | 0.002452 | """
Copyright 2007 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from os import path
from gi.repository import Gtk
from . import Constants, Utils, Dialogs
class FileDialogHelper(Gtk.FileChooserDialog, object):
"""
A wrapper class for the gtk file chooser dialog.
Implement a file chooser dialog with only necessary parameters.
"""
title = ''
action = Gtk.FileChooserAction.OPEN
filter_label = ''
filter_ext = ''
def __init__(self, parent, current_file_path):
"""
FileDialogHelper constructor.
Create a save or open dialog with cancel and ok buttons.
Use standard settings: no multiple selection, local files only, and the * filter.
Args:
action: Gtk.FileChooserAction.OPEN or Gtk.FileChooserAction.SAVE
title: the title of the dialog (string)
"""
ok_stock = {
Gtk.FileChooserAction.OPEN: 'gtk-open',
Gtk.FileChooserAction.SAVE: 'gtk-save'
}[self.action]
Gtk.FileChooserDialog.__init__(self, title=self.title, action=self.action,
transient_for=parent)
self.add_buttons('gtk-cancel', Gtk.ResponseType.CANCEL, ok_stock, Gtk.ResponseType.OK)
self.set_select_multiple(False)
self.set_local_only(True)
self.parent = parent
self.current_file_path = current_file_path or path.join(
Constants.DEFAULT_FILE_PATH, Constants.NEW_FLOGRAPH_TITLE + Constants.FILE_EXTENSION)
self.set_current_folder(path.dirname(current_file_path)) # current directory
self.setup_filters()
def setup_filters(self, filters=None):
set_default = True
filters = filters or ([(self.filter_label, self.filter_ext)] if self.filter_label else [])
filters.append(('All Files', ''))
for label, ext in filters:
if not label:
continue
f = Gtk.FileFilter()
f.set_name(label)
f.add_pattern('*' + ext)
self.add_filter(f)
if not set_default:
self.set_filter(f)
set_default = True
def run(self):
"""Get the filename and destroy the dialog."""
response = Gtk.FileChooserDialog.run(self)
filename = self.get_filename() if response == Gtk.ResponseType.OK else None
self.destroy()
return filename
class SaveFileDialog(FileDialogHelper):
"""A dialog box to save or open flow graph files. This is a base class, do not use."""
action = Gtk.FileChooserAction.SAVE
def __init__(self, parent, current_file_path):
super(SaveFileDialog, self).__init__(parent, current_file_path)
self.set_current_name(path.splitext(path.basename(self.current_file_path))[0] + self.filter_ext)
self.set_create_folders(True)
self.set_do_overwrite_confirmation(True)
class OpenFileDialog(FileDialogHelper):
"""A dialog box to save or open flow graph files. This is a base class, do not use."""
action = Gtk.FileChooserAction.OPEN
def show_missing_message(self, filename):
Dialogs.MessageDialogWrapper(
self.parent,
Gtk.MessageType.WARNING, Gtk.ButtonsType.CLOSE, 'Cannot Open!',
'File <b>{filename}</b> Does not Exist!'.format(filename=Utils.encode(filename)),
).run_and_destroy()
def get_filename(self):
"""
Run the dialog and get the filename.
If this is a save dialog and the file name is missing the extension, append the file extension.
If the file name with the extension already exists, show a overwrite dialog.
If this is an open dialog, return a list of filenames.
Returns:
the complete file path
"""
filenames = Gtk.FileChooserDialog.get_filenames(self)
for filename in filenames:
if not path.exists(filename):
self.show_missing_message(filename)
return None # rerun
return filenames
class OpenFlowGraph(OpenFileDialog):
title = 'Open a Flow Graph from a File...'
filter_label = 'Flow Graph Files'
filter_ext = Constants.FILE_EXTENSION
def __init__(self, parent, current_file_path=''):
super(OpenFlowGraph, self).__init__(parent, current_file_path)
self.set_select_multiple(True)
class OpenQSS(OpenFileDialog):
title = 'Open a QSS theme...'
filter_label = 'QSS Themes'
filter_ext = '.qss'
class SaveFlowGraph(SaveFileDialog):
title = 'Save a Flow Graph to a File...'
filter_label = 'Flow Graph Files'
filter_ext = Constants.FILE_EXTENSION
class SaveConsole(SaveFileDialog):
title = 'Sa | ve Console to a File...'
filter_label = ' | Test Files'
filter_ext = '.txt'
class SaveScreenShot(SaveFileDialog):
title = 'Save a Flow Graph Screen Shot...'
filters = [('PDF Files', '.pdf'), ('PNG Files', '.png'), ('SVG Files', '.svg')]
filter_ext = '.pdf' # the default
def __init__(self, parent, current_file_path=''):
super(SaveScreenShot, self).__init__(parent, current_file_path)
self.config = Gtk.Application.get_default().config
self._button = button = Gtk.CheckButton(label='Background transparent')
self._button.set_active(self.config.screen_shot_background_transparent())
self.set_extra_widget(button)
def setup_filters(self, filters=None):
super(SaveScreenShot, self).setup_filters(self.filters)
def show_missing_message(self, filename):
Dialogs.MessageDialogWrapper(
self.parent,
Gtk.MessageType.ERROR, Gtk.ButtonsType.CLOSE, 'Can not Save!',
'File Extension of <b>{filename}</b> not supported!'.format(filename=Utils.encode(filename)),
).run_and_destroy()
def run(self):
valid_exts = {ext for label, ext in self.filters}
filename = None
while True:
response = Gtk.FileChooserDialog.run(self)
if response != Gtk.ResponseType.OK:
filename = None
break
filename = self.get_filename()
if path.splitext(filename)[1] in valid_exts:
break
self.show_missing_message(filename)
bg_transparent = self._button.get_active()
self.config.screen_shot_background_transparent(bg_transparent)
self.destroy()
return filename, bg_transparent
|
kashif/chainer | chainer/functions/connection/bilinear.py | Python | mit | 6,625 | 0 | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import array
from chainer.utils import type_check
class BilinearFunction(function.Function):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'%s or %s' % (in_types.size() == 3, in_types.size() == 6),
'%s == %s' % (in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
e1 = array.as_mat(inputs[0])
e2 = array.as_mat(inputs[1])
W = inputs[2]
if not type_check.same_types(*inputs):
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(e1): {1}, type(e2): {2}'
.format(type(W), type(e1), type(e2)))
xp = cuda.get_array_module(*inputs)
if xp is numpy:
y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)
else:
i_len, j_len = e1.shape
k_len = e2.shape[1]
# 'ij,ik->ijk'
e1e2 = e1[:, :, None] * e2[:, None, :]
# ijk->i[jk]
e1e2 = e1e2.reshape(i_len, j_len * k_len)
# jkl->[jk]l
W_mat = W.reshape(-1, W.shape[2])
# 'i[jk],[jk]l->il'
y = e1e2.dot(W_mat)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, inputs, grad_outputs):
e1 = array.as_ma | t(inputs[0])
e2 = array.as_mat(inputs[1])
W = inputs[2]
gy | = grad_outputs[0]
xp = cuda.get_array_module(*inputs)
if xp is numpy:
gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)
ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)
ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)
else:
kern = cuda.reduce('T in0, T in1, T in2', 'T out',
'in0 * in1 * in2', 'a + b', 'out = a', 0,
'bilinear_product')
e1_b = e1[:, :, None, None] # ij
e2_b = e2[:, None, :, None] # ik
gy_b = gy[:, None, None, :] # il
W_b = W[None, :, :, :] # jkl
gW = kern(e1_b, e2_b, gy_b, axis=0) # 'ij,ik,il->jkl'
ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3)) # 'ik,jkl,il->ij'
ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3)) # 'ij,jkl,il->ik'
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2, b = inputs[3:]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (~chainer.Variable): Left input variable.
e2 (~chainer.Variable): Right input variable.
W (~chainer.Variable): Quadratic weight variable.
V1 (~chainer.Variable): Left coefficient variable.
V2 (~chainer.Variable): Right coefficient variable.
b (~chainer.Variable): Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<http://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return BilinearFunction()(e1, e2, W)
else:
return BilinearFunction()(e1, e2, W, V1, V2, b)
|
kencochrane/django-defender | defender/models.py | Python | apache-2.0 | 1,116 | 0 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class AccessAttempt(models.Model):
""" Access Attempt log """
user_agent = models.CharField(
max_length=255,
)
ip_address = models.GenericIPAddressField(
verbose_name='IP Address',
null=True,
| )
username = models.CharField(
max_length=255,
| null=True,
)
http_accept = models.CharField(
verbose_name='HTTP Accept',
max_length=1025,
)
path_info = models.CharField(
verbose_name='Path',
max_length=255,
)
attempt_time = models.DateTimeField(
auto_now_add=True,
)
login_valid = models.BooleanField(
default=False,
)
class Meta:
ordering = ['-attempt_time']
def __str__(self):
""" unicode value for this model """
return "{0} @ {1} | {2}".format(self.username,
self.attempt_time,
self.login_valid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.