code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (C) 2011 by Andrew Regner <andrew@aregner.com>
#
# This program is free software; you can redistribute it andor modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307, USA.
"""RackDNSGui Main Application Module
"""
import os
import random
import argparse
import tornado.web
import tornado.ioloop
import helpers
import handlers
class RackDNSGuiApplication(object):
"""Main Application class for rackdnsgui."""
def __init__(self):
self._debug = False
self._verbose = False
self._quiet = False
arguments = RackDNSGuiOptions("rackdnsgui").parsed_args
self._quiet = arguments.quiet
self._debug = arguments.debug
# If we have debugging turned on we should also have verbose.
if self._debug:
self._verbose = True
else:
self._verbose = arguments.verbose
# If we have verbose we shouldn't be quiet.
if self._verbose:
self._quiet = False
# Other option handling ...
helpers.COLORIZE = arguments.color
self._port = arguments.port
def run(self):
"""Run the application ... """
cookie_secret = "testingtestingtestingtesting" if self._debug else \
"".join([chr(random.randint(0,254)) for n in xrange(40)])
options = {
'template_path': os.path.join(os.path.dirname(__file__), 'templates'),
'cookie_secret': cookie_secret,
'debug': self._debug,
}
helpers.debug(__file__, repr(options))
webapp = tornado.web.Application([
(r"/(login|logout)?", handlers.LoginHandler),
(r"/zones(.*)", handlers.ZoneHandler),
("/favicon.ico", handlers.NullHandler),
], **options)
webapp.listen(self._port)
tornado.ioloop.IOLoop.instance().start()
class RackDNSGuiOptions(object):
"""Options for the rackdnsgui application."""
def __init__(self, name):
"""Create the option parser."""
self._parser = argparse.ArgumentParser(prog = name)
self._parser = self._add_args()
@property
def parser(self):
"""Return the option parser."""
return self._parser
@property
def parsed_args(self):
"""Return the parsed arguments."""
return self._parser.parse_args()
def _add_args(self):
"""Add the options to the parser."""
self._parser.add_argument('--version', action = "version",
version = "%(prog)s 0.1")
# --verbose, -v
help_list = [
"Specifies verbose output from the application.",
]
self._parser.add_argument('--verbose', '-v', action = 'store_true',
default = False, help = ''.join(help_list))
# --debug, -D
help_list = [
"Specifies debugging output from the application. Implies verbose ",
"output from the application.",
]
self._parser.add_argument('--debug', '-D', action = 'store_true',
default = False, help = ''.join(help_list))
# --quiet, -q
help_list = [
"Specifies quiet output from the application. This is ",
"superceded by verbose output.",
]
self._parser.add_argument('--quiet', '-q', action = 'store_true',
default = False, help = ''.join(help_list))
# --color=[none,light,dark,auto]
help_list = [
"Specifies whether output should use color and which type of ",
"background to color for (light or dark). This defaults to ",
"the value of system.color in the configuration file.",
]
self._parser.add_argument('--color',
choices = [ "none", "light", "dark", "auto", ],
default = "none", help = ''.join(help_list))
# --port, -p
help_list = [
"Specifies the port for RackDNSGui to listen on.",
]
self._parser.add_argument('--port', '-p', default = 5051, type = int,
help = ''.join(help_list))
return self._parser
| adregner/rackdnsgui | rackdnsgui/application.py | Python | gpl-2.0 | 5,030 |
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""
Demo #2
The second script in our tutorial about using GalSim in python scripts: examples/demo*.py.
(This file is designed to be viewed in a window 100 characters wide.)
This script is a bit more sophisticated, but still pretty basic. We're still only making
a single image, but now the galaxy has an exponential radial profile and is sheared.
The PSF is a circular Moffat profile. And the noise is Poisson using the flux from both
the object and a background sky level to determine the variance in each pixel.
New features introduced in this demo:
- obj = galsim.Exponential(flux, scale_radius)
- obj = galsim.Moffat(beta, flux, half_light_radius)
- obj.applyShear(g1, g2) -- with explanation of other ways to specify shear
- rng = galsim.BaseDeviate(seed)
- noise = galsim.PoissonNoise(rng, sky_level)
- galsim.hsm.EstimateShear(image, image_epsf)
"""
import sys
import os
import math
import logging
import galsim
def main(argv):
"""
A little bit more sophisticated, but still pretty basic:
- Use a sheared, exponential profile for the galaxy.
- Convolve it by a circular Moffat PSF.
- Add Poisson noise to the image.
"""
# In non-script code, use getLogger(__name__) at module scope instead.
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("demo2")
gal_flux = 1.e5 # counts
gal_r0 = 2.7 # arcsec
g1 = 0.1 #
g2 = 0.2 #
psf_beta = 5 #
psf_re = 1.0 # arcsec
pixel_scale = 0.2 # arcsec / pixel
sky_level = 2.5e4 # counts / arcsec^2
# This time use a particular seed, so the image is deterministic.
# This is the same seed that is used in demo2.yaml, which means the images produced
# by the two methods will be precisely identical.
random_seed = 1534225
logger.info('Starting demo script 2 using:')
logger.info(' - sheared (%.2f,%.2f) exponential galaxy (flux = %.1e, scale radius = %.2f),',
g1, g2, gal_flux, gal_r0)
logger.info(' - circular Moffat PSF (beta = %.1f, re = %.2f),', psf_beta, psf_re)
logger.info(' - pixel scale = %.2f,', pixel_scale)
logger.info(' - Poisson noise (sky level = %.1e).', sky_level)
# Initialize the (pseudo-)random number generator that we will be using below.
rng = galsim.BaseDeviate(random_seed)
# Define the galaxy profile.
gal = galsim.Exponential(flux=gal_flux, scale_radius=gal_r0)
# Shear the galaxy by some value.
# There are quite a few ways you can use to specify a shape.
# q, beta Axis ratio and position angle: q = b/a, 0 < q < 1
# e, beta Ellipticity and position angle: |e| = (1-q^2)/(1+q^2)
# g, beta ("Reduced") Shear and position angle: |g| = (1-q)/(1+q)
# eta, beta Conformal shear and position angle: eta = ln(1/q)
# e1,e2 Ellipticity components: e1 = e cos(2 beta), e2 = e sin(2 beta)
# g1,g2 ("Reduced") shear components: g1 = g cos(2 beta), g2 = g sin(2 beta)
# eta1,eta2 Conformal shear components: eta1 = eta cos(2 beta), eta2 = eta sin(2 beta)
gal.applyShear(g1=g1, g2=g2)
logger.debug('Made galaxy profile')
# Define the PSF profile.
psf = galsim.Moffat(beta=psf_beta, flux=1., half_light_radius=psf_re)
logger.debug('Made PSF profile')
# Define the pixel size
pix = galsim.Pixel(pixel_scale)
logger.debug('Made pixel profile')
# Final profile is the convolution of these.
final = galsim.Convolve([gal, psf, pix])
final_epsf = galsim.Convolve([psf, pix])
logger.debug('Convolved components into final profile')
# Draw the image with a particular pixel scale.
image = final.draw(dx=pixel_scale)
image_epsf = final_epsf.draw(dx=pixel_scale)
logger.debug('Made image of the profile')
# To get Poisson noise on the image, we will use a class called PoissonNoise.
# However, we want the noise to correspond to what you would get with a significant
# flux from tke sky. This is done by telling PoissonNoise to add noise from a
# sky level in addition to the counts currently in the image.
#
# One wrinkle here is that the PoissonNoise class needs the sky level in each pixel,
# while we have a sky_level in counts per arcsec^2. So we need to convert:
sky_level_pixel = sky_level * pixel_scale**2
noise = galsim.PoissonNoise(rng, sky_level=sky_level_pixel)
image.addNoise(noise)
logger.debug('Added Poisson noise')
# Write the image to a file.
if not os.path.isdir('output'):
os.mkdir('output')
file_name = os.path.join('output', 'demo2.fits')
file_name_epsf = os.path.join('output','demo2_epsf.fits')
image.write(file_name)
image_epsf.write(file_name_epsf)
logger.info('Wrote image to %r',file_name)
logger.info('Wrote effective PSF image to %r',file_name_epsf)
results = galsim.hsm.EstimateShear(image, image_epsf)
logger.info('HSM reports that the image has observed shape and size:')
logger.info(' e1 = %.3f, e2 = %.3f, sigma = %.3f (pixels)', results.observed_shape.e1,
results.observed_shape.e2, results.moments_sigma)
logger.info('When carrying out Regaussianization PSF correction, HSM reports distortions')
logger.info(' e1, e2 = %.3f, %.3f',
results.corrected_e1, results.corrected_e2)
logger.info('Expected values in the limit that noise and non-Gaussianity are negligible:')
exp_shear = galsim.Shear(g1=g1, g2=g2)
logger.info(' g1, g2 = %.3f, %.3f', exp_shear.e1,exp_shear.e2)
if __name__ == "__main__":
main(sys.argv)
| mardom/GalSim | examples/demo2.py | Python | gpl-3.0 | 6,455 |
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
devices = {
"ax1": {
"name": "ax1",
"host": "10.10.100.20",
"port": 8443,
"protocol": "https",
"username": "admin",
"password": "a10",
"status": True,
"autosnat": True,
"api_version": "2.1",
"v_method": "LSI",
"max_instance": 5000,
"use_float": True,
"method": "hash"
},
"ax2": {
"name": "ax2",
"host": "10.10.100.21",
"port": 8080,
"protocol": "http",
"username": "admin",
"password": "a10",
"status": False,
"api_version": "2.1",
"v_method": "LSI",
"max_instance": 5000,
"use_float": True,
"method": "hash"
},
"ax3": {
"host": "10.10.100.22",
"protocol": "http",
"username": "admin",
"password": "a10",
"status": True,
"api_version": "2.1",
"max_instance": 5000,
"use_float": True,
},
"ax4": {
"host": "10.10.100.23",
"username": "admin",
"password": "a10",
"api_version": "2.1",
"use_float": True,
"ha_sync_list": [
{
"name": "ax5",
"ip": "1.1.1.1",
"username": "admin",
"password": "a10"
}
]
},
"axxv21": {
"host": "10.10.100.29",
"protocol": "http",
"username": "admin",
"password": "a10",
"status": True,
"api_version": "2.1",
"max_instance": 5000,
"use_float": True,
},
"axv30": {
"host": "10.10.100.30",
"protocol": "http",
"username": "admin",
"password": "a10",
"status": True,
"api_version": "3.0",
"max_instance": 5000,
"use_float": True,
},
"axadp-alt": {
"host": "10.10.100.24",
"username": "admin",
"password": "a10",
"protocol": "https",
"v_method": "ADP",
"shared_partition": "mypart",
},
"axadp-noalt": {
"host": "10.10.100.24",
"username": "admin",
"password": "a10",
"protocol": "https",
"v_method": "ADP"
},
"axipinip": {
"host": "10.48.5.219",
"protocol": "https",
"user": "admin",
"password": "a10",
"status": True,
"api_version": "2.1",
"max_instance": 5000,
"use_float": True,
"ipinip": True
},
}
| sasukeh/a10-neutron-lbaas | a10_neutron_lbaas/tests/unit/unit_config/config.py | Python | apache-2.0 | 3,120 |
"""
Commands and hooks for creating and using sync, publish, and progress status
commands.
"""
from gettext import gettext as _
from pulp.bindings import responses
from pulp.client.commands import options, polling
from pulp.client.extensions.extensions import PulpCliOptionGroup
from pulp.common import tags
# Command Descriptions
DESC_SYNC_RUN = _('triggers an immediate sync of a repository')
DESC_SYNC_STATUS = _('displays the status of a repository\'s sync tasks')
DESC_PUBLISH_RUN = _('triggers an immediate publish of a repository')
DESC_PUBLISH_STATUS = _('displays the status of a repository\'s publish tasks')
class StatusRenderer(object):
def __init__(self, context):
self.context = context
self.prompt = context.prompt
def display_report(self, progress_report):
raise NotImplementedError()
class SyncPublishCommand(polling.PollingCommand):
"""
This class contains common behaviors found in the sync and publish commands in this module.
It is not intended to be used by itself. It is intended to be used as a common superclass.
"""
def __init__(self, name, description, method, context, renderer):
"""
Initialize the command, and call the superclass __init__().
:param name: The name of the command
:type name: basestring
:param description: The description of the command
:type description: basestring
:param method: The method to be run if the command is used
:type method: callable
:param context: The CLI context from Okaara
:type context: pulp.client.extensions.core.ClientContext
:param renderer: The renderer to be used to print progress reports
:type renderer: StatusRenderer
"""
if method is None:
method = self.run
super(SyncPublishCommand, self).__init__(name, description, method, context)
self.renderer = renderer
self.add_option(options.OPTION_REPO_ID)
self.context = context
self.prompt = context.prompt
def progress(self, task, spinner):
"""
Render the progress report, if it is available on the given task.
:param task: The Task that we wish to render progress about
:type task: pulp.bindings.responses.Task
:param spinner: Not used by this method, but the superclass will give it to us
:type spinner: okaara.progress.Spinner
"""
if task.progress_report is not None:
self.renderer.display_report(task.progress_report)
def task_header(self, task):
"""
We don't want any task header printed for this task, so we need to override
the superclass behavior.
:param task: The Task that we don't want to do anything with. Unused.
:type task: pulp.bindings.responses.Task
"""
pass
class RunSyncRepositoryCommand(SyncPublishCommand):
"""
Requests an immediate sync for a repository. If the sync begins (it is not
postponed or rejected), the provided renderer will be used to track its
progress. The user has the option to exit the progress polling or skip it
entirely through a flag on the run command.
"""
def __init__(self, context, renderer, name='run', description=DESC_SYNC_RUN, method=None):
"""
:type renderer: pulp.client.commands.repo.sync_publish.StatusRenderer
"""
super(RunSyncRepositoryCommand, self).__init__(name, description, method, context, renderer)
def run(self, **kwargs):
"""
If there are existing sync tasks running, attach to them and display their progress
reports. Else, queue a new sync task and display its progress report.
:param kwargs: The user input
:type kwargs: dict
"""
repo_id = kwargs[options.OPTION_REPO_ID.keyword]
background = kwargs[polling.FLAG_BACKGROUND.keyword]
self.prompt.render_title(_('Synchronizing Repository [%(r)s]') % {'r': repo_id})
# See if an existing sync is running for the repo. If it is, resume
# progress tracking.
existing_sync_tasks = _get_repo_tasks(self.context, repo_id, 'sync')
if existing_sync_tasks:
msg = _('A sync task is already in progress for this repository. ')
if not background:
msg += _('Its progress will be tracked below.')
self.context.prompt.render_paragraph(msg, tag='in-progress')
self.poll(existing_sync_tasks, kwargs)
else:
# Trigger the actual sync
response = self.context.server.repo_actions.sync(repo_id, None)
sync_task = response.response_body
self.poll([sync_task], kwargs)
class SyncStatusCommand(SyncPublishCommand):
def __init__(self, context, renderer, name='status', description=DESC_SYNC_STATUS, method=None):
super(SyncStatusCommand, self).__init__(name, description, method, context, renderer)
def run(self, **kwargs):
"""
Query the server to find any existing and incomplete sync Tasks. If found, attach to them
and display their progress. If not, display and error and return.
:param kwargs: The user input
:type kwargs: dict
"""
repo_id = kwargs[options.OPTION_REPO_ID.keyword]
self.prompt.render_title(_('Repository Status [%(r)s]') % {'r': repo_id})
# Load the relevant task group
existing_sync_tasks = _get_repo_tasks(self.context, repo_id, 'sync')
if not existing_sync_tasks:
msg = _('The repository is not performing any operations')
self.prompt.render_paragraph(msg, tag='no-tasks')
else:
self.poll(existing_sync_tasks, kwargs)
class RunPublishRepositoryCommand(SyncPublishCommand):
"""
Base class for repo publish operation.
Requests an immediate publish for a repository. Specified distributor_id is used
for publishing. If the publish begins (it is not postponed or rejected),
the provided renderer will be used to track its progress. The user has the option
to exit the progress polling or skip it entirely through a flag on the run command.
List of additional configuration override options can be passed in override_config_options.
"""
def __init__(self, context, renderer, distributor_id, name='run', description=DESC_PUBLISH_RUN,
method=None, override_config_options=()):
"""
:param context: Pulp client context
:type context: See okaara
:param renderer: StatusRenderer subclass that will interpret the sync or publish progress
report
:type renderer: StatusRenderer
:param distributor_id: Id of a distributor to be used for publishing
:type distributor_id: str
:param override_config_options: Additional publish options to be accepted from user. These
options will override respective options from the default
publish config. Each entry should be either a PulpCliOption
or PulpCliFlag instance
:type override_config_options: list
"""
super(RunPublishRepositoryCommand, self).__init__(name, description, method, context,
renderer)
self.distributor_id = distributor_id
self.override_config_keywords = []
# Process and add config override options in their own group and save option keywords
if override_config_options:
override_config_group = PulpCliOptionGroup(_("Publish Options"))
self.add_option_group(override_config_group)
for option in override_config_options:
override_config_group.add_option(option)
self.override_config_keywords.append(option.keyword)
def run(self, **kwargs):
"""
Run the publish operation on the server, or if one is already running, attach to it.
:param kwargs: The user inputs
:type kwargs: dict
"""
repo_id = kwargs[options.OPTION_REPO_ID.keyword]
background = kwargs[polling.FLAG_BACKGROUND.keyword]
override_config = {}
# Generate override_config if any of the override options are passed.
if self.override_config_keywords:
override_config = self.generate_override_config(**kwargs)
self.prompt.render_title(_('Publishing Repository [%(r)s]') % {'r': repo_id})
# Display override configuration used
if override_config:
self.prompt.render_paragraph(
_('The following publish configuration options will be used:'))
self.prompt.render_document(override_config)
# See if an existing publish is running for the repo. If it is, resume
# progress tracking.
existing_publish_tasks = _get_repo_tasks(self.context, repo_id, 'publish')
if existing_publish_tasks:
msg = _('A publish task is already in progress for this repository. ')
if not background:
msg += _('Its progress will be tracked below.')
self.context.prompt.render_paragraph(msg, tag='in-progress')
self.poll(existing_publish_tasks, kwargs)
else:
if not override_config:
override_config = None
response = self.context.server.repo_actions.publish(repo_id, self.distributor_id,
override_config)
task_id = response.response_body
self.poll([task_id], kwargs)
def generate_override_config(self, **kwargs):
"""
Check if any of the override config options is passed by the user and create override_config
dictionary
:param kwargs: all keyword arguments passed in by the user on the command line
:type kwargs: dict
:return: config option dictionary consisting of option values passed by user for valid
publish config options (stored in override_config_keywords)
:rtype: dict
"""
override_config = {}
for option in self.override_config_keywords:
if kwargs[option]:
# Replace hyphens in option keywords to underscores eg. iso-prefix to iso_prefix
override_config[option.replace('-', '_')] = kwargs[option]
return override_config
class PublishStatusCommand(SyncPublishCommand):
def __init__(self, context, renderer, name='status', description=DESC_PUBLISH_STATUS,
method=None):
super(PublishStatusCommand, self).__init__(name, description, method, context, renderer)
def run(self, **kwargs):
"""
Query the server for any incomplete publish operations for the repo given in kwargs. If
found, display their progress reports. If not, display and error message and return.
:param kwargs: The user input
:type kwargs: dict
"""
repo_id = kwargs[options.OPTION_REPO_ID.keyword]
self.prompt.render_title(_('Repository Status [%(r)s]') % {'r': repo_id})
existing_publish_tasks = _get_repo_tasks(self.context, repo_id, 'publish')
if not existing_publish_tasks:
msg = _('The repository is not performing any operations')
self.prompt.render_paragraph(msg, tag='no-tasks')
else:
self.poll(existing_publish_tasks, kwargs)
def _get_repo_tasks(context, repo_id, action):
"""
Retrieve a list of incomplete Task objects for the given repo_id and action. action must be one
of 'sync' or 'publish'.
:param context: The CLI context from Okaara
:type context: pulp.client.extensions.core.ClientContext
:param repo_id: The primary key of the repository you wish to limit the Task query to
:type repo_id: basestring
:param action: One of "sync" or "publish"
:type action: basestring
:return: A list of Task objects
:rtype: list
"""
repo_tag = tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id)
if action == 'publish':
action_tag = tags.action_tag(tags.ACTION_PUBLISH_TYPE)
elif action == 'sync':
action_tag = tags.action_tag(tags.ACTION_SYNC_TYPE)
else:
raise ValueError(
'_get_repo_tasks() does not support %(action)s as an action.' % {'action': action})
repo_search_criteria = {'filters': {'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [repo_tag, action_tag]}}}
return context.server.tasks_search.search(**repo_search_criteria)
| beav/pulp | client_lib/pulp/client/commands/repo/sync_publish.py | Python | gpl-2.0 | 12,879 |
from __future__ import absolute_import
from .autoslugfield import * # NOQA
from .bounded import * # NOQA
from .extendedchar import * # NOQA
from .foreignkey import * # NOQA
from .gzippeddict import * # NOQA
from .node import * # NOQA
from .pickle import * # NOQA | lyoniionly/django-cobra | src/cobra/models/fields/__init__.py | Python | apache-2.0 | 269 |
#! /usr/bin/python
#
# Copyright (C) 2012,2014 Stefano Sanfilippo.
# See LICENSE.txt in the main source package for more information
#
from __future__ import with_statement
import sys
# Ugly ugly trick to give us compatibility both with Py2 and Py3k
try:
import cStringIO as StringIO
except ImportError:
try:
import StringIO
except ImportError:
import io as StringIO
FLAG = ['DropTail', 'RED', 'CBQ', 'FQ', 'SFQ', 'DRR']
def fix(filename, overwrite=False):
"""Will append a `Off` flag into each `(Duplex|Simplex)` Link declaration.
Needed because the old file format was updated to include
Queue Visualization. Converted file will be saved to ${somename}.new.nss
Args:
filename: the name of the file to be converted.
overwrite: will overwrite input file if `True`.
Returns:
None
"""
with StringIO.StringIO() as buffer:
with open(filename, 'rt') as sourcefile:
ready = steady = False
for line in sourcefile:
buffer.write(line)
if line[:-1] in FLAG:
ready = True
if ready and not steady:
steady = True
elif ready and steady:
buffer.write('Off\n')
ready = steady = False
if not overwrite:
filename = filename.replace('.nss', '.new.nss')
with open(filename, 'wt') as sourcefile:
sourcefile.write(buffer.getvalue())
def main():
filenames = sys.argv[1:]
if filenames:
for filename in filenames:
print ('Converting %s' % filename)
fix(filename)
else:
print('Usage: %s file1.nss [file2.nss [...]]' % sys.argv[0])
sys.exit(0)
if __name__ == '__main__':
main()
| esseks/nscript | scripts/fixnss.py | Python | bsd-3-clause | 1,822 |
#!/usr/bin/env python
import unicodedata
SCOWL_FINAL_DIR = '../scowl-6/final'
MAX_LINE_LENGTH = 70
print """
# Copyright 2000-2004 by Kevin Atkinson
# Python wrapper by Johann C. Rocholl
#
# Permission to use, copy, modify, distribute and sell these word
# lists, the associated scripts, the output created from the scripts,
# and its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appears in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation. Kevin Atkinson makes no representations
# about the suitability of this array for any purpose. It is provided
# "as is" without express or implied warranty.
""".strip()
def pythonize_wordlist(constant, wordlist):
print
print constant, '= set("""'
line = ''
for word in wordlist:
if len(line) + 1 + len(word) > MAX_LINE_LENGTH:
print line
line = ''
if line:
line += ' '
line += word
if line:
print line
print '""".split())'
def load_wordlist(number):
wordlist = []
filename = '%s/english-words.%d' % (SCOWL_FINAL_DIR, number)
for word in open(filename):
if "'" in word:
continue
latin1 = word.strip()
word = latin1.decode('latin1')
word = unicodedata.normalize('NFKD', word)
ascii = word.encode('ASCII', 'ignore')
wordlist.append(ascii)
return wordlist
for number in (10, 20, 35, 50):
wordlist = load_wordlist(number)
if number == 50:
wordlist += load_wordlist(40)
wordlist.sort()
pythonize_wordlist('SCOWL%d' % number, wordlist)
| jcrocholl/nxdom | tools/scowl_to_python.py | Python | mit | 1,694 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885499.299002
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/satellites.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class satellites(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(satellites, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<script>
$(function() { InitAccordeon("#accordionS");});
</script>
<div id="accordionS">
''')
for satellite in VFFSL(SL,"satellites",True): # generated from line 7, col 1
write(u'''\t<h1><a href="#" id="ajax/channels?id=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"satellite.service",True)) # u'$quote($satellite.service)' on line 8, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$quote($satellite.service)')) # from line 8, col 39.
write(u'''&stype=''')
_v = VFFSL(SL,"stype",True) # u'$stype' on line 8, col 72
if _v is not None: write(_filter(_v, rawExpr=u'$stype')) # from line 8, col 72.
write(u'''">''')
_v = VFFSL(SL,"satellite.name",True) # u'$satellite.name' on line 8, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$satellite.name')) # from line 8, col 80.
write(u'''</a></h1>
<div>
''')
_v = VFFSL(SL,"tstrings",True)['loading'] # u"$tstrings['loading']" on line 10, col 1
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['loading']")) # from line 10, col 1.
write(u''' ...
\t</div>
''')
write(u'''</div>''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_satellites= 'respond'
## END CLASS DEFINITION
if not hasattr(satellites, '_initCheetahAttributes'):
templateAPIClass = getattr(satellites, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(satellites)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=satellites()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/ajax/satellites.py | Python | gpl-2.0 | 5,571 |
import logging
from nose.tools import * # noqa
from tests.base import OsfTestCase
from website.models import PreprintProvider
from scripts.update_taxonomies import main as taxonomy_main
from scripts.populate_preprint_providers import main as populate_main
from scripts.populate_preprint_providers import STAGING_PREPRINT_PROVIDERS, PROD_PREPRINT_PROVIDERS
class TestAddPreprintProviders(OsfTestCase):
def setUp(self):
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
taxonomy_main()
def tearDown(self):
PreprintProvider.remove()
def test_add_prod_providers(self):
populate_main('prod')
providers = PreprintProvider.find()
assert_equal(providers.count(), len(PROD_PREPRINT_PROVIDERS))
ids = [provider._id for provider in providers]
for id in PROD_PREPRINT_PROVIDERS:
assert_in(id, ids)
for id in set(STAGING_PREPRINT_PROVIDERS) - set(PROD_PREPRINT_PROVIDERS):
assert_not_in(id, ids)
def test_add_default_providers(self):
populate_main(None)
providers = PreprintProvider.find()
assert_equal(providers.count(), len(PROD_PREPRINT_PROVIDERS))
ids = [provider._id for provider in providers]
for id in PROD_PREPRINT_PROVIDERS:
assert_in(id, ids)
for id in set(STAGING_PREPRINT_PROVIDERS) - set(PROD_PREPRINT_PROVIDERS):
assert_not_in(id, ids)
def test_add_staging_providers(self):
populate_main('stage')
providers = PreprintProvider.find()
assert_equal(PreprintProvider.find().count(), len(STAGING_PREPRINT_PROVIDERS))
ids = [provider._id for provider in providers]
for id in STAGING_PREPRINT_PROVIDERS:
assert_in(id, ids)
| aaxelb/osf.io | scripts/tests/test_add_preprint_providers.py | Python | apache-2.0 | 1,785 |
import os
import pathlib
import unittest
from maildaemon.config import load_config
from maildaemon.connection_group import ConnectionGroup
from maildaemon.daemon_group import DaemonGroup
_HERE = pathlib.Path(__file__).parent
_TEST_CONFIG_PATH = _HERE.joinpath('maildaemon_test_config.json')
@unittest.skipUnless(os.environ.get('TEST_COMM') or os.environ.get('CI'),
'skipping tests that require server connection')
class Tests(unittest.TestCase):
config = load_config(_TEST_CONFIG_PATH)
def test_connection(self):
conns = {'test-imap': self.config['connections']['test-imap'],
'test-imap-ssl': self.config['connections']['test-imap-ssl'],
'test-pop': self.config['connections']['test-pop'],
'test-pop-ssl': self.config['connections']['test-pop-ssl']}
connections = ConnectionGroup.from_dict(conns)
daemons = DaemonGroup(connections, [])
self.assertEqual(len(daemons), 4)
def test_run(self):
conns = {'test-imap': self.config['connections']['test-imap'],
'test-imap-ssl': self.config['connections']['test-imap-ssl'],
'test-pop': self.config['connections']['test-pop'],
'test-pop-ssl': self.config['connections']['test-pop-ssl']}
connections = ConnectionGroup.from_dict(conns)
daemons = DaemonGroup(connections, [])
self.assertEqual(len(daemons), 4)
# daemons.run() # TODO: there's some cryptic error in msg id 12 in INBOX
| mbdevpl/maildaemon | test/test_daemon_group.py | Python | apache-2.0 | 1,536 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds android browsers that can be controlled by telemetry."""
import logging as real_logging
import os
import re
import subprocess
import sys
from telemetry import decorators
from telemetry.core import browser
from telemetry.core import platform
from telemetry.core import possible_browser
from telemetry.core import util
from telemetry.core.backends import adb_commands
from telemetry.core.backends.chrome import android_browser_backend
from telemetry.core.platform import android_platform_backend
from telemetry.core.platform.profiler import monsoon
try:
import psutil # pylint: disable=F0401
except ImportError:
psutil = None
CHROME_PACKAGE_NAMES = {
'android-content-shell':
['org.chromium.content_shell_apk',
android_browser_backend.ContentShellBackendSettings,
'ContentShell.apk'],
'android-chrome-shell':
['org.chromium.chrome.shell',
android_browser_backend.ChromeShellBackendSettings,
'ChromeShell.apk'],
'android-webview':
['org.chromium.telemetry_shell',
android_browser_backend.WebviewBackendSettings,
None],
'android-chrome':
['com.google.android.apps.chrome',
android_browser_backend.ChromeBackendSettings,
'Chrome.apk'],
'android-chrome-beta':
['com.chrome.beta',
android_browser_backend.ChromeBackendSettings,
None],
'android-chrome-dev':
['com.google.android.apps.chrome_dev',
android_browser_backend.ChromeBackendSettings,
None],
'android-chrome-canary':
['com.chrome.canary',
android_browser_backend.ChromeBackendSettings,
None],
'android-jb-system-chrome':
['com.android.chrome',
android_browser_backend.ChromeBackendSettings,
None]
}
class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
"""A launchable android browser instance."""
def __init__(self, browser_type, finder_options, backend_settings, apk_name):
super(PossibleAndroidBrowser, self).__init__(browser_type, 'android',
finder_options, backend_settings.supports_tab_control)
assert browser_type in FindAllBrowserTypes(finder_options), \
('Please add %s to android_browser_finder.FindAllBrowserTypes' %
browser_type)
self._backend_settings = backend_settings
self._local_apk = None
chrome_root = util.GetChromiumSrcDir()
if apk_name:
candidate_apks = []
for build_dir, build_type in util.GetBuildDirectories():
apk_full_name = os.path.join(chrome_root, build_dir, build_type, 'apks',
apk_name)
if os.path.exists(apk_full_name):
last_changed = os.path.getmtime(apk_full_name)
candidate_apks.append((last_changed, apk_full_name))
if candidate_apks:
# Find the canadidate .apk with the latest modification time.
newest_apk_path = sorted(candidate_apks)[-1][1]
self._local_apk = newest_apk_path
def __repr__(self):
return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
def _InitPlatformIfNeeded(self):
if self._platform:
return
self._platform_backend = android_platform_backend.AndroidPlatformBackend(
self._backend_settings.adb.device(),
self.finder_options.no_performance_mode)
self._platform = platform.Platform(self._platform_backend)
def Create(self):
self._InitPlatformIfNeeded()
use_rndis_forwarder = (self.finder_options.android_rndis or
self.finder_options.browser_options.netsim or
platform.GetHostPlatform().GetOSName() != 'linux')
backend = android_browser_backend.AndroidBrowserBackend(
self.finder_options.browser_options, self._backend_settings,
use_rndis_forwarder,
output_profile_path=self.finder_options.output_profile_path,
extensions_to_load=self.finder_options.extensions_to_load,
target_arch=self.finder_options.target_arch)
b = browser.Browser(backend, self._platform_backend)
return b
def SupportsOptions(self, finder_options):
if len(finder_options.extensions_to_load) != 0:
return False
return True
def HaveLocalAPK(self):
return self._local_apk and os.path.exists(self._local_apk)
@decorators.Cache
def UpdateExecutableIfNeeded(self):
if self.HaveLocalAPK():
real_logging.warn(
'Refreshing %s on device if needed.' % self._local_apk)
self._backend_settings.adb.Install(self._local_apk)
def last_modification_time(self):
if self.HaveLocalAPK():
return os.path.getmtime(self._local_apk)
return -1
def SelectDefaultBrowser(possible_browsers):
local_builds_by_date = sorted(possible_browsers,
key=lambda b: b.last_modification_time())
if local_builds_by_date:
newest_browser = local_builds_by_date[-1]
return newest_browser
return None
@decorators.Cache
def CanFindAvailableBrowsers(logging=real_logging):
if not adb_commands.IsAndroidSupported():
logging.info('Android build commands unavailable on this machine. Have '
'you installed Android build dependencies?')
return False
try:
with open(os.devnull, 'w') as devnull:
proc = subprocess.Popen(
['adb', 'devices'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=devnull)
stdout, _ = proc.communicate()
if re.search(re.escape('????????????\tno permissions'), stdout) != None:
logging.warn('adb devices reported a permissions error. Consider '
'restarting adb as root:')
logging.warn(' adb kill-server')
logging.warn(' sudo `which adb` devices\n\n')
return True
except OSError:
platform_tools_path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'android_tools', 'sdk', 'platform-tools')
if (sys.platform.startswith('linux') and
os.path.exists(os.path.join(platform_tools_path, 'adb'))):
os.environ['PATH'] = os.pathsep.join([platform_tools_path,
os.environ['PATH']])
return True
return False
def FindAllBrowserTypes(_):
return CHROME_PACKAGE_NAMES.keys()
def FindAllAvailableBrowsers(finder_options, logging=real_logging):
"""Finds all the desktop browsers available on this machine."""
if not CanFindAvailableBrowsers(logging=logging):
logging.info('No adb command found. ' +
'Will not try searching for Android browsers.')
return []
def _GetDevices():
if finder_options.android_device:
return [finder_options.android_device]
else:
return adb_commands.GetAttachedDevices()
devices = _GetDevices()
if not devices:
try:
m = monsoon.Monsoon(wait=False)
m.SetUsbPassthrough(1)
m.SetVoltage(3.8)
m.SetMaxCurrent(8)
logging.warn("""
Monsoon power monitor detected, but no Android devices.
The Monsoon's power output has been enabled. Please now ensure that:
1. The Monsoon's front and back USB are connected to the host.
2. The Device is connected to the Monsoon's main and USB channels.
3. The Device is turned on.
Waiting for device...
""")
util.WaitFor(_GetDevices, 600)
devices = _GetDevices()
if not devices:
raise IOError()
except IOError:
logging.info('No android devices found.')
return []
if len(devices) > 1:
logging.warn(
'Multiple devices attached. Please specify one of the following:\n' +
'\n'.join([' --device=%s' % d for d in devices]))
return []
device = devices[0]
adb = adb_commands.AdbCommands(device=device)
# Trying to root the device, if possible.
if not adb.IsRootEnabled():
# Ignore result.
adb.EnableAdbRoot()
if psutil:
# Host side workaround for crbug.com/268450 (adb instability).
# The adb server has a race which is mitigated by binding to a single core.
for proc in psutil.process_iter():
try:
if 'adb' in proc.name:
if 'cpu_affinity' in dir(proc):
proc.cpu_affinity([0]) # New versions of psutil.
elif 'set_cpu_affinity' in dir(proc):
proc.set_cpu_affinity([0]) # Older versions.
else:
logging.warn(
'Cannot set CPU affinity due to stale psutil version: %s',
'.'.join(str(x) for x in psutil.version_info))
except (psutil.NoSuchProcess, psutil.AccessDenied):
logging.warn('Failed to set adb process CPU affinity')
if not os.environ.get('BUILDBOT_BUILDERNAME'):
# Killing adbd before running tests has proven to make them less likely to
# flake out during the test. We skip this if Telemetry is running under a
# buildbot because build/android/test_runner.py wrapper already took care
# of it before starting the shards.
adb.RestartAdbdOnDevice()
packages = adb.RunShellCommand('pm list packages')
possible_browsers = []
for name, package_info in CHROME_PACKAGE_NAMES.iteritems():
[package, backend_settings, local_apk] = package_info
b = PossibleAndroidBrowser(
name,
finder_options,
backend_settings(adb, package),
local_apk)
if 'package:' + package in packages or b.HaveLocalAPK():
possible_browsers.append(b)
if possible_browsers:
installed_prebuilt_tools = adb_commands.SetupPrebuiltTools(adb)
if not installed_prebuilt_tools:
logging.error(
'Android device detected, however prebuilt android tools could not '
'be used. To run on Android you must build them first:\n'
' $ ninja -C out/Release android_tools')
return []
return possible_browsers
| 7kbird/chrome | tools/telemetry/telemetry/core/backends/chrome/android_browser_finder.py | Python | bsd-3-clause | 9,858 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb import errors
from edb.common import parsing
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from . import delta as sd
from . import name as sn
from . import objects as so
from . import scalars as s_scalars
from . import types as s_types
if TYPE_CHECKING:
from . import schema as s_schema
PseudoType_T = TypeVar("PseudoType_T", bound="PseudoType")
class PseudoType(
so.InheritingObject,
s_types.Type,
qlkind=qltypes.SchemaObjectClass.PSEUDO_TYPE,
):
@classmethod
def get(
cls,
schema: s_schema.Schema,
name: Union[str, sn.Name],
) -> PseudoType:
return schema.get_global(PseudoType, name)
def as_shell(self, schema: s_schema.Schema) -> PseudoTypeShell:
return PseudoTypeShell(name=self.get_name(schema))
def get_bases(
self,
schema: s_schema.Schema,
) -> so.ObjectList[PseudoType]:
return so.ObjectList[PseudoType].create_empty() # type: ignore
def get_ancestors(
self,
schema: s_schema.Schema,
) -> so.ObjectList[PseudoType]:
return so.ObjectList[PseudoType].create_empty() # type: ignore
def get_abstract(self, schema: s_schema.Schema) -> bool:
return True
def is_polymorphic(self, schema: s_schema.Schema) -> bool:
return True
def material_type(
self,
schema: s_schema.Schema,
) -> Tuple[s_schema.Schema, PseudoType]:
return schema, self
def is_any(self, schema: s_schema.Schema) -> bool:
return str(self.get_name(schema)) == 'anytype'
def is_anytuple(self, schema: s_schema.Schema) -> bool:
return str(self.get_name(schema)) == 'anytuple'
def is_tuple(self, schema: s_schema.Schema) -> bool:
return self.is_anytuple(schema)
def implicitly_castable_to(
self,
other: s_types.Type,
schema: s_schema.Schema
) -> bool:
return self == other
def find_common_implicitly_castable_type(
self,
other: s_types.Type,
schema: s_schema.Schema,
) -> Tuple[s_schema.Schema, Optional[PseudoType]]:
if self == other:
return schema, self
else:
return schema, None
def get_common_parent_type_distance(
self,
other: s_types.Type,
schema: s_schema.Schema
) -> int:
if self == other:
return 0
else:
return s_types.MAX_TYPE_DISTANCE
def _test_polymorphic(
self,
schema: s_schema.Schema,
other: s_types.Type
) -> bool:
return self == other
def _to_nonpolymorphic(
self,
schema: s_schema.Schema,
concrete_type: s_types.Type
) -> Tuple[s_schema.Schema, s_types.Type]:
return schema, concrete_type
def _resolve_polymorphic(
self,
schema: s_schema.Schema,
concrete_type: s_types.Type
) -> Optional[s_types.Type]:
if self.is_any(schema):
if isinstance(concrete_type, s_scalars.ScalarType):
return concrete_type.get_topmost_concrete_base(schema)
return concrete_type
elif self.is_anytuple(schema):
if (not concrete_type.is_tuple(schema) or
concrete_type.is_polymorphic(schema)):
return None
else:
return concrete_type
else:
raise ValueError(
f'unexpected pseudo type: {self.get_name(schema)}')
class PseudoTypeShell(s_types.TypeShell[PseudoType]):
def __init__(
self,
*,
name: sn.Name,
sourcectx: Optional[parsing.ParserContext] = None,
) -> None:
super().__init__(
name=name, schemaclass=PseudoType, sourcectx=sourcectx)
def is_polymorphic(self, schema: s_schema.Schema) -> bool:
return True
def resolve(self, schema: s_schema.Schema) -> PseudoType:
return PseudoType.get(schema, self.name)
class PseudoTypeCommandContext(sd.ObjectCommandContext[PseudoType]):
pass
class PseudoTypeCommand(
s_types.TypeCommand[PseudoType],
context_class=PseudoTypeCommandContext,
):
pass
class CreatePseudoType(PseudoTypeCommand, sd.CreateObject[PseudoType]):
astnode = qlast.CreatePseudoType
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
if not context.stdmode and not context.testmode:
raise errors.UnsupportedFeatureError(
'user-defined pseudotypes are not supported',
context=astnode.context
)
return super()._cmd_tree_from_ast(schema, astnode, context)
| edgedb/edgedb | edb/schema/pseudo.py | Python | apache-2.0 | 5,525 |
#!/usr/bin/python
from pisi.actionsapi import shelltools, get, autotools, pisitools
shelltools.export("HOME", get.workDIR())
def setup():
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING")
| richard-fisher/repository | desktop/gnome/core/libgtop/actions.py | Python | gpl-2.0 | 351 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from desktop.lib.conf import Config, coerce_bool
HIVE_V1 = Config(
key="hive_v1",
help=_("Use Sentry API V1 for Hive."),
default=True,
type=coerce_bool)
HIVE_V2 = Config(
key="hive_v2",
help=_("Use Sentry generic API V2 for Hive."),
default=False,
type=coerce_bool)
SOLR_V2 = Config(
key="solr_v2",
help=_("Use Sentry generic API V2 for Solr."),
default=True,
type=coerce_bool)
| jayceyxc/hue | apps/security/src/security/conf.py | Python | apache-2.0 | 1,255 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
from mxnet.ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
from common import (setup_module, with_seed, assertRaises, teardown,
assert_raises_cudnn_not_satisfied)
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import raises, assert_raises
from copy import deepcopy
import warnings
import json
import unittest
@with_seed()
def test_parameter():
p = gluon.Parameter('weight', shape=(10, 10))
p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assert len(p.list_data()) == 2
assert len(p.list_grad()) == 2
assert p.data(mx.cpu(1)).context == mx.cpu(1)
assert p.data(mx.cpu(0)).shape == (10, 10)
assert p.var().name == 'weight'
assert p.grad(mx.cpu(0)).stype == 'default'
assert p.data(mx.cpu(0)).stype == 'default'
p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])
assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]
@with_seed()
@raises(AssertionError)
def test_invalid_parameter_stype():
p = gluon.Parameter('weight', shape=(10, 10), stype='invalid')
@with_seed()
@raises(AssertionError)
def test_invalid_parameter_grad_stype():
p = gluon.Parameter('weight', shape=(10, 10), grad_stype='invalid')
@with_seed()
def test_sparse_parameter():
p = gluon.Parameter('weight', shape=(10, 10), stype='row_sparse', grad_stype='row_sparse')
p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
row_id = mx.nd.arange(0, 10, ctx=mx.cpu(1))
assert len(p.list_grad()) == 2
# getting row_sparse data without trainer throws an exception
assertRaises(RuntimeError, p.list_row_sparse_data, row_id)
trainer = mx.gluon.Trainer([p], 'sgd')
assert len(p.list_row_sparse_data(row_id)) == 2
weight = p.row_sparse_data(row_id)
assert weight.context == mx.cpu(1)
assert weight.shape == (10, 10)
assert weight.stype == 'row_sparse'
assert p.var().name == 'weight'
assert p.var().attr('__storage_type__') == str(_STORAGE_TYPE_STR_TO_ID['row_sparse'])
assert p.grad(mx.cpu(0)).stype == 'row_sparse'
p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])
assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]
@with_seed()
def test_parameter_invalid_access():
# cannot call data on row_sparse parameters
p0 = gluon.Parameter('weight', shape=(10, 10), stype='row_sparse', grad_stype='row_sparse')
p0.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assertRaises(RuntimeError, p0.data)
assertRaises(RuntimeError, p0.list_data)
row_id = mx.nd.arange(0, 10)
# cannot call row_sparse_data on dense parameters
p1 = gluon.Parameter('weight', shape=(10, 10))
p1.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assertRaises(RuntimeError, p1.row_sparse_data, row_id.copyto(mx.cpu(0)))
assertRaises(RuntimeError, p1.list_row_sparse_data, row_id)
@with_seed()
def test_paramdict():
ctx = mx.cpu(1)
params0 = gluon.ParameterDict('net_')
params0.get('w0', shape=(10, 10))
params0.get('w1', shape=(10, 10), stype='row_sparse')
all_row_ids = mx.nd.arange(0, 10, ctx=ctx)
# check param names
assert list(params0.keys()) == ['net_w0', 'net_w1']
params0.initialize(ctx=ctx)
trainer0 = mx.gluon.Trainer(params0, 'sgd')
prev_w0 = params0.get('w0').data(ctx)
prev_w1 = params0.get('w1').row_sparse_data(all_row_ids)
# save params
params0.save('test_paramdict.params')
# load params
params1 = gluon.ParameterDict('net_')
params1.get('w0', shape=(10, 10))
params1.get('w1', shape=(10, 10), stype='row_sparse')
params1.load('test_paramdict.params', ctx)
trainer1 = mx.gluon.Trainer(params1, 'sgd')
# compare the values before and after save/load
cur_w0 = params1.get('w0').data(ctx)
cur_w1 = params1.get('w1').row_sparse_data(all_row_ids)
mx.test_utils.assert_almost_equal(prev_w0.asnumpy(), cur_w0.asnumpy())
mx.test_utils.assert_almost_equal(prev_w1.asnumpy(), cur_w1.asnumpy())
# create a new param dict with dense params, and load from the checkpoint
# of sparse & dense params
params2 = gluon.ParameterDict('net_')
params2.get('w0', shape=(10, 10))
params2.get('w1', shape=(10, 10))
params2.load('test_paramdict.params', ctx)
# compare the values before and after save/load
cur_w0 = params2.get('w0').data(ctx)
cur_w1 = params2.get('w1').data(ctx)
mx.test_utils.assert_almost_equal(prev_w0.asnumpy(), cur_w0.asnumpy())
mx.test_utils.assert_almost_equal(prev_w1.asnumpy(), cur_w1.asnumpy())
@with_seed()
def test_parameter_row_sparse_data():
ctx0 = mx.cpu(1)
ctx1 = mx.cpu(2)
dim0 = 4
x = gluon.Parameter('x', shape=(dim0, 2), stype='row_sparse')
x.initialize(init='xavier', ctx=[ctx0, ctx1])
trainer = gluon.Trainer([x], 'sgd')
x_param = x._data[0].copy()
assert x_param.stype == 'row_sparse'
row_id_0 = mx.nd.array([0,1], ctx=ctx0)
retained_0 = x.row_sparse_data(row_id_0)
retained_target_0 = mx.nd.sparse.retain(x_param, row_id_0.as_in_context(ctx0))
mx.test_utils.assert_almost_equal(retained_0.asnumpy(), retained_target_0.asnumpy())
assert retained_0.context == ctx0
row_id_1 = mx.nd.arange(0, dim0, ctx=ctx1)
retained_1 = x.row_sparse_data(row_id_1)
retained_target_1 = x_param
mx.test_utils.assert_almost_equal(retained_1.asnumpy(), retained_target_1.asnumpy())
assert retained_1.context == ctx1
row_id_2 = mx.nd.array([0,1,2])
retained_2 = x.list_row_sparse_data(row_id_2)
retained_target_2 = mx.nd.sparse.retain(x_param, row_id_2.as_in_context(ctx0))
mx.test_utils.assert_almost_equal(retained_2[0].asnumpy(), retained_target_2.asnumpy())
@with_seed()
def test_constant():
class Test(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
self.value = np.asarray([[1,2], [3,4]])
self.const = self.params.get_constant('const', self.value)
def hybrid_forward(self, F, x, const):
return x + const
test = Test()
test.initialize()
trainer = gluon.Trainer(test.collect_params(), 'sgd',
{'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
x = mx.nd.ones((2,2))
x.attach_grad()
y = test(x)
y.backward()
trainer.step(1)
assert (test.const.data().asnumpy() == test.value).all()
assert (x.grad.asnumpy() == 1).all()
@with_seed()
def test_parameter_sharing():
class Net(gluon.Block):
def __init__(self, in_units=0, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = nn.Dense(5, in_units=in_units)
self.dense1 = nn.Dense(5, in_units=in_units)
def forward(self, x):
return self.dense1(self.dense0(x))
net1 = Net(prefix='net1_', in_units=5)
net2 = Net(prefix='net2_', params=net1.collect_params())
net1.collect_params().initialize()
net2(mx.nd.zeros((3, 5)))
net1.save_parameters('net1.params')
net3 = Net(prefix='net3_')
net3.load_parameters('net1.params', mx.cpu())
net4 = Net(prefix='net4_')
net5 = Net(prefix='net5_', in_units=5, params=net4.collect_params())
net4.collect_params().initialize()
net5(mx.nd.zeros((3, 5)))
net4.save_parameters('net4.params')
net6 = Net(prefix='net6_')
net6.load_parameters('net4.params', mx.cpu())
@with_seed()
def test_parameter_str():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = nn.Dense(10, in_units=5, use_bias=False)
net = Net(prefix='net1_')
lines = str(net.collect_params()).splitlines()
assert lines[0] == 'net1_ ('
assert 'net1_dense0_weight' in lines[1]
assert '(10, 5)' in lines[1]
assert 'float32' in lines[1]
assert lines[2] == ')'
@with_seed()
def test_collect_paramters():
net = nn.HybridSequential(prefix="test_")
with net.name_scope():
net.add(nn.Conv2D(10, 3))
net.add(nn.Dense(10, activation='relu'))
assert set(net.collect_params().keys()) == \
set(['test_conv0_weight', 'test_conv0_bias','test_dense0_weight','test_dense0_bias'])
assert set(net.collect_params('.*weight').keys()) == \
set(['test_conv0_weight', 'test_dense0_weight'])
assert set(net.collect_params('test_conv0_bias|test_dense0_bias').keys()) == \
set(['test_conv0_bias', 'test_dense0_bias'])
@with_seed()
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 2, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
model.collect_params().setattr('grad_req', 'null')
assert list(model.collect_params().values())[0]._grad is None
model.collect_params().setattr('grad_req', 'write')
assert list(model.collect_params().values())[0]._grad is not None
@with_seed()
def test_dense():
model = nn.Dense(128, activation='tanh', in_units=10, flatten=False, prefix='test_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test_weight', 'test_bias'])
assert outputs.list_outputs() == ['test_tanh_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(2, 3, 10))
assert outs == [(2, 3, 128)]
model = nn.Dense(128, activation='relu', in_units=30, flatten=True, prefix='test2_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test2_weight', 'test2_bias'])
assert outputs.list_outputs() == ['test2_relu_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(17, 2, 5, 3))
assert outs == [(17, 128)]
@with_seed()
def test_symbol_block():
model = nn.HybridSequential()
model.add(nn.Dense(128, activation='tanh'))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh'),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
model.initialize()
inputs = mx.sym.var('data')
outputs = model(inputs).get_internals()
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
assert len(smodel(mx.nd.zeros((16, 10)))) == 14
out = smodel(mx.sym.var('in'))
assert len(out) == len(outputs.list_outputs())
class Net(nn.HybridBlock):
def __init__(self, model):
super(Net, self).__init__()
self.model = model
def hybrid_forward(self, F, x):
out = self.model(x)
return F.add_n(*[i.sum() for i in out])
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
inputs = mx.sym.var('data')
outputs = model(inputs)
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
# Test case to verify if initializing the SymbolBlock from a model with params
# other than fp32 param dtype.
# 1. Load a resnet model, cast it to fp64 and export
tmp = tempfile.mkdtemp()
tmpfile = os.path.join(tmp, 'resnet34_fp64')
ctx = mx.cpu(0)
net_fp32 = mx.gluon.model_zoo.vision.resnet34_v2(pretrained=True, ctx=ctx, root=tmp)
net_fp32.cast('float64')
net_fp32.hybridize()
data = mx.nd.zeros((1,3,224,224), dtype='float64', ctx=ctx)
net_fp32.forward(data)
net_fp32.export(tmpfile, 0)
# 2. Load the saved model and verify if all the params are loaded correctly.
# and choose one of the param to verify the type if fp64.
sm = mx.sym.load(tmpfile + '-symbol.json')
inputs = mx.sym.var('data', dtype='float64')
net_fp64 = mx.gluon.SymbolBlock(sm, inputs)
net_fp64.collect_params().load(tmpfile + '-0000.params', ctx=ctx)
# 3. Get a conv layer's weight parameter name. Conv layer's weight param is
# expected to be of dtype casted, fp64.
for param_name in net_fp64.params.keys():
if 'conv' in param_name and 'weight' in param_name:
break
assert np.dtype(net_fp64.params[param_name].dtype) == np.dtype(np.float64)
# Cast the symbol block to FP32 and try to forward a FP32 data.
# This will verify SymbolBlock.cast() functionality.
net_fp64.cast('float32')
fp32_data = mx.nd.zeros((1,3,224,224), dtype='float32', ctx=ctx)
prediction = net_fp64.forward(fp32_data)
assert np.dtype(prediction.dtype) == np.dtype(np.float32)
@with_seed()
@raises(AssertionError)
def test_sparse_symbol_block():
data = mx.sym.var('data')
weight = mx.sym.var('weight', stype='row_sparse')
bias = mx.sym.var('bias')
out = mx.sym.broadcast_add(mx.sym.dot(data, weight), bias)
# an exception is expected when creating a SparseBlock w/ sparse param
net = gluon.SymbolBlock(out, data)
@with_seed()
@raises(RuntimeError)
def test_sparse_hybrid_block():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(5,5), stype='row_sparse', dtype='float32')
params.get('bias', shape=(5), dtype='float32')
net = gluon.nn.Dense(5, params=params)
net.initialize()
x = mx.nd.ones((2,5))
# an exception is expected when forwarding a HybridBlock w/ sparse param
y = net(x)
@with_seed()
def check_layer_forward(layer, dshape):
print("checking layer {}\nshape: {}.".format(layer, dshape))
layer.collect_params().initialize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
np_out = out.asnumpy()
np_dx = x.grad.asnumpy()
layer.hybridize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(np_dx, x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
@with_seed()
def test_conv():
layers1d = [
nn.Conv1D(16, 3, in_channels=4),
nn.Conv1D(16, 3, groups=2, in_channels=4),
nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4),
]
for layer in layers1d:
check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2D(16, (3, 4), in_channels=4),
nn.Conv2D(16, (5, 4), in_channels=4),
nn.Conv2D(16, (3, 4), groups=2, in_channels=4),
nn.Conv2D(16, (3, 4), strides=4, in_channels=4),
nn.Conv2D(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2D(16, (3, 4), padding=4, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
layers3d = [
nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'),
nn.Conv3D(16, (5, 4, 3), in_channels=4),
nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4),
nn.Conv3D(16, 4, strides=4, in_channels=4),
nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4),
]
for layer in layers3d:
check_layer_forward(layer, (1, 4, 10, 10, 10))
layer = nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 4))
layer = nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 10, 4))
@with_seed()
def test_deconv():
# layers1d = [
# nn.Conv1DTranspose(16, 3, in_channels=4),
# nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),
# nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),
# ]
# for layer in layers1d:
# check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2DTranspose(16, (3, 4), in_channels=4),
nn.Conv2DTranspose(16, (5, 4), in_channels=4),
nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),
# nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
# layers3d = [
# nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),
# nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),
# nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),
# ]
# for layer in layers3d:
# check_layer_forward(layer, (1, 4, 10, 10, 10))
#
#
# layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 4))
#
# layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 10, 4))
@with_seed()
def test_pool():
layers1d = [
nn.MaxPool1D(),
nn.MaxPool1D(3),
nn.MaxPool1D(3, 2),
nn.AvgPool1D(),
nn.AvgPool1D(count_include_pad=False),
nn.GlobalAvgPool1D(),
]
for layer in layers1d:
check_layer_forward(layer, (1, 2, 10))
layers2d = [
nn.MaxPool2D(),
nn.MaxPool2D((3, 3)),
nn.MaxPool2D(3, 2),
nn.AvgPool2D(),
nn.AvgPool2D(count_include_pad=False),
nn.GlobalAvgPool2D(),
]
for layer in layers2d:
check_layer_forward(layer, (1, 2, 10, 10))
layers3d = [
nn.MaxPool3D(),
nn.MaxPool3D((3, 3, 3)),
nn.MaxPool3D(3, 2),
nn.AvgPool3D(),
nn.AvgPool3D(count_include_pad=False),
nn.GlobalAvgPool3D(),
]
for layer in layers3d:
check_layer_forward(layer, (1, 2, 10, 10, 10))
# test ceil_mode
x = mx.nd.zeros((2, 2, 10, 10))
layer = nn.MaxPool2D(3, ceil_mode=False)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 3, 3))
layer = nn.MaxPool2D(3, ceil_mode=True)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 4, 4))
@with_seed()
def test_batchnorm():
layer = nn.BatchNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
@with_seed()
def test_instancenorm():
layer = nn.InstanceNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
@with_seed()
def test_layernorm():
layer = nn.LayerNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
@with_seed()
def test_reflectionpad():
layer = nn.ReflectionPad2D(3)
check_layer_forward(layer, (2, 3, 24, 24))
@with_seed()
def test_reshape():
x = mx.nd.ones((2, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x.reshape((-1,))
x = x + 10
x.backward()
@with_seed()
def test_slice():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1:3]
x = x + 10
x.backward()
@with_seed()
def test_at():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1]
x = x + 10
x.backward()
@with_seed()
def test_deferred_init():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2)
layer.collect_params().initialize()
layer(x)
def check_split_data(x, num_slice, batch_axis, **kwargs):
res = gluon.utils.split_data(x, num_slice, batch_axis, **kwargs)
assert len(res) == num_slice
mx.test_utils.assert_almost_equal(mx.nd.concat(*res, dim=batch_axis).asnumpy(),
x.asnumpy())
@with_seed()
def test_split_data():
x = mx.nd.random.uniform(shape=(128, 33, 64))
check_split_data(x, 8, 0)
check_split_data(x, 3, 1)
check_split_data(x, 4, 1, even_split=False)
check_split_data(x, 15, 1, even_split=False)
try:
check_split_data(x, 4, 1)
except ValueError:
return
assert False, "Should have failed"
@with_seed()
def test_flatten():
flatten = nn.Flatten()
x = mx.nd.zeros((3,4,5,6))
assert flatten(x).shape == (3, 4*5*6)
x = mx.nd.zeros((3,6))
assert flatten(x).shape == (3, 6)
x = mx.nd.zeros((3,))
assert flatten(x).shape == (3, 1)
@with_seed()
def test_block_attr_hidden():
b = gluon.Block()
# regular attributes can change types
b.a = None
b.a = 1
@raises(TypeError)
@with_seed()
def test_block_attr_block():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Block()
b.b = (2,)
@raises(TypeError)
@with_seed()
def test_block_attr_param():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Parameter()
b.b = (2,)
@with_seed()
def test_block_attr_regular():
b = gluon.Block()
# set block attribute also sets _children
b.c = gluon.Block()
c2 = gluon.Block()
b.c = c2
assert b.c is c2 and list(b._children.values())[0] is c2
@with_seed()
def test_block_attr_list_of_block():
class Model1(gluon.Block):
def __init__(self, **kwargs):
super(Model1, self).__init__(**kwargs)
with self.name_scope():
self.layers = [nn.Dense(i * 10) for i in range(6)]
class Model2(gluon.Block):
def __init__(self, **kwargs):
super(Model2, self).__init__(**kwargs)
with self.name_scope():
self.layers = dict()
self.layers['a'] = [nn.Dense(10), nn.Dense(10)]
class Model3(gluon.Block):
def __init__(self, **kwargs):
super(Model3, self).__init__(**kwargs)
with self.name_scope():
self.layers = nn.Sequential()
self.layers.add(*[nn.Dense(i * 10) for i in range(6)])
class Model4(gluon.Block):
def __init__(self, **kwargs):
super(Model4, self).__init__(**kwargs)
with self.name_scope():
self.data = {'a': '4', 'b': 123}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model = Model1()
model.collect_params()
assert len(w) > 0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model = Model2()
model.collect_params()
assert len(w) > 0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model = Model3()
model.collect_params()
assert len(w) == 0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model = Model4()
model.collect_params()
assert len(w) == 0
def check_sequential(net):
dense1 = gluon.nn.Dense(10)
net.add(dense1)
dense2 = gluon.nn.Dense(10)
net.add(dense2)
dense3 = gluon.nn.Dense(10)
net.add(dense3)
assert net[1] is dense2
assert net[-1] is dense3
slc = net[1:3]
assert len(slc) == 2 and slc[0] is dense2 and slc[1] is dense3
assert isinstance(slc, type(net))
@with_seed()
def test_sequential():
check_sequential(gluon.nn.Sequential())
check_sequential(gluon.nn.HybridSequential())
@with_seed()
def test_sequential_warning():
with warnings.catch_warnings(record=True) as w:
# The following line permits the test to pass if run multiple times
warnings.simplefilter('always')
b = gluon.nn.Sequential()
b.add(gluon.nn.Dense(20))
b.hybridize()
assert len(w) == 1
@with_seed()
def test_global_norm_clip():
stypes = ['default', 'row_sparse']
def check_global_norm_clip(stype, check_isfinite):
x1 = mx.nd.ones((3,3)).tostype(stype)
x2 = mx.nd.ones((4,4)).tostype(stype)
norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
x3 = mx.nd.array([1.0, 2.0, float('nan')]).tostype(stype)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
gluon.utils.clip_global_norm([x1, x3], 2.0, check_isfinite=check_isfinite)
assert len(w) == check_isfinite
for stype in stypes:
for check_isfinite in [True, False]:
check_global_norm_clip(stype, check_isfinite)
@with_seed()
def test_embedding():
def check_embedding(sparse_grad):
layer = gluon.nn.Embedding(10, 100, sparse_grad=sparse_grad)
layer.initialize()
x = mx.nd.array([3,4,2,0,1])
with mx.autograd.record():
y = layer(x)
y.backward()
assert (layer.weight.grad().asnumpy()[:5] == 1).all()
assert (layer.weight.grad().asnumpy()[5:] == 0).all()
def check_embedding_large_input(sparse_grad):
embedding = mx.gluon.nn.Embedding(10, 1, sparse_grad=True)
embedding.initialize()
embedding.hybridize()
shape = (20481,)
with mx.autograd.record():
emb_in = embedding(mx.nd.ones(shape))
loss = emb_in.sum()
loss.backward()
assert embedding.weight.grad().data.sum().asscalar() == 20481
check_embedding(True)
check_embedding(False)
check_embedding_large_input(True)
check_embedding_large_input(False)
@with_seed()
def test_export():
ctx = mx.context.current_context()
model = gluon.model_zoo.vision.resnet18_v1(
prefix='resnet', ctx=ctx, pretrained=True)
model.hybridize()
data = mx.nd.random.normal(shape=(1, 3, 32, 32))
out = model(data)
model.export('gluon')
module = mx.mod.Module.load('gluon', 0, label_names=None, context=ctx)
module.bind(data_shapes=[('data', data.shape)])
module.forward(mx.io.DataBatch([data], None), is_train=False)
mod_out, = module.get_outputs()
assert_almost_equal(out.asnumpy(), mod_out.asnumpy())
model2 = gluon.model_zoo.vision.resnet18_v1(prefix='resnet', ctx=ctx)
model2.collect_params().load('gluon-0000.params', ctx)
out2 = model2(data)
assert_almost_equal(out.asnumpy(), out2.asnumpy())
@with_seed()
def test_import():
ctx = mx.context.current_context()
net1 = gluon.model_zoo.vision.resnet18_v1(
prefix='resnet', ctx=ctx, pretrained=True)
net1.hybridize()
data = mx.nd.random.normal(shape=(1, 3, 32, 32))
out1 = net1(data)
net1.export('net1', epoch=1)
net2 = gluon.SymbolBlock.imports(
'net1-symbol.json', ['data'], 'net1-0001.params', ctx)
out2 = net2(data)
assert_almost_equal(out1.asnumpy(), out2.asnumpy())
@with_seed()
def test_hybrid_stale_cache():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.add(mx.gluon.nn.Flatten())
assert net(mx.nd.ones((2,3,5))).shape == (2, 30)
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=True)
net.initialize()
assert net(mx.nd.ones((2,3,5))).shape == (2, 10)
@with_seed()
def test_lambda():
net1 = mx.gluon.nn.HybridSequential()
net1.add(nn.Activation('tanh'),
nn.LeakyReLU(0.1))
net2 = mx.gluon.nn.HybridSequential()
op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
net2.add(nn.HybridLambda('tanh'),
nn.HybridLambda(op3))
op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
net3 = mx.gluon.nn.Sequential()
net3.add(nn.Lambda('tanh'),
nn.Lambda(op4))
input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)
assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3)
@with_seed()
def test_fill_shape_deferred():
net = nn.HybridSequential()
with net.name_scope():
net.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5,7)))
assert net[0].weight.shape[1] == 3, net[0].weight.shape[1]
assert net[1].gamma.shape[0] == 64, net[1].gamma.shape[0]
assert net[2].weight.shape[1] == 3072, net[2].weight.shape[1]
@with_seed()
def test_dtype():
net = mx.gluon.model_zoo.vision.resnet18_v1()
net.initialize()
net.cast('float64')
with mx.autograd.record():
y = net(mx.nd.ones((16, 3, 32, 32), dtype='float64'))
y.backward()
net = mx.gluon.model_zoo.vision.resnet18_v1()
net.initialize()
net.hybridize()
net(mx.nd.ones((16, 3, 32, 32), dtype='float32'))
net.cast('float64')
net(mx.nd.ones((16, 3, 32, 32), dtype='float64'))
mx.nd.waitall()
class Net(gluon.Block):
def __init__(self, in_dim, output_dim):
super(Net, self).__init__()
with self.name_scope():
self.embed = gluon.nn.Embedding(input_dim=in_dim, output_dim=output_dim,dtype=np.float64)
self.dense = gluon.nn.Dense(2, dtype=np.float64)
def forward(self, x):
e = self.embed(x)
assert(e.dtype == np.float64)
y = self.dense(e)
assert(y.dtype == np.float64)
return y
net = Net(5, 10)
net.initialize()
out = net(mx.nd.ones((3,), dtype=np.float64))
mx.nd.waitall()
@with_seed()
def test_fill_shape_load():
ctx = mx.context.current_context()
net1 = nn.HybridSequential()
with net1.name_scope():
net1.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net1.hybridize()
net1.initialize(ctx=ctx)
net1(mx.nd.ones((2,3,5,7), ctx))
net1.save_parameters('net_fill.params')
net2 = nn.HybridSequential()
with net2.name_scope():
net2.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net2.hybridize()
net2.initialize()
net2.load_parameters('net_fill.params', ctx)
assert net2[0].weight.shape[1] == 3, net2[0].weight.shape[1]
assert net2[1].gamma.shape[0] == 64, net2[1].gamma.shape[0]
assert net2[2].weight.shape[1] == 3072, net2[2].weight.shape[1]
@with_seed()
def test_inline():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10))
net.add(mx.gluon.nn.Dense(10))
net.add(mx.gluon.nn.Dense(10))
net.initialize()
net.hybridize(inline_limit=3)
with mx.autograd.record():
y = net(mx.nd.zeros((1,10)))
len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
y.backward()
net.hybridize(inline_limit=0)
with mx.autograd.record():
y = net(mx.nd.zeros((1,10)))
len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
y.backward()
assert len_1 == len_2 + 2
@with_seed()
def test_activations():
point_to_validate = mx.nd.array([-0.1, 0.1] * 3)
swish = mx.gluon.nn.Swish()
def swish_test(x):
return x * mx.nd.sigmoid(x)
for test_point, ref_point in zip(swish_test(point_to_validate), swish(point_to_validate)):
assert test_point == ref_point
elu = mx.gluon.nn.ELU()
def elu_test(x):
def elu(x):
return 1.0 * (mx.nd.exp(x) - 1) if x < 0 else x
return [elu(x_i) for x_i in x]
for test_point, ref_point in zip(elu_test(point_to_validate), elu(point_to_validate)):
assert test_point == ref_point
selu = mx.gluon.nn.SELU()
def selu_test(x):
def selu(x):
scale, alpha = 1.0507009873554804934193349852946, 1.6732632423543772848170429916717
return scale * x if x >= 0 else alpha * mx.nd.exp(x) - alpha
return [selu(x_i) for x_i in x]
for test_point, ref_point in zip(selu(point_to_validate), selu(point_to_validate)):
assert test_point == ref_point
prelu = mx.gluon.nn.PReLU()
prelu.initialize()
x = point_to_validate.reshape((1, 3, 2))
assert_almost_equal(prelu(x).asnumpy(), mx.nd.where(x >= 0, x, 0.25 * x).asnumpy())
@with_seed()
def test_dropout():
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.gluon.nn.Dropout(rate=ratio, axes=axes)(broadcastx)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
@with_seed()
def test_req():
data = mx.nd.random.uniform(shape=(1,3,224,224))
label = mx.nd.random.uniform(shape=(1))
label[:] = 1
loss = gluon.loss.SoftmaxCrossEntropyLoss()
net = nn.HybridSequential()
net1 = nn.HybridSequential()
net1.add(nn.Dense(4))
net2 = nn.HybridSequential()
net2.add(nn.Dense(3))
net2.add(nn.Dense(2))
net.add(net1)
net.add(net2)
net.initialize()
net.hybridize()
for v in net.collect_params().values():
v.grad_req = 'add'
net.collect_params().zero_grad()
with mx.autograd.record():
pred = net(data)
l = loss(pred, label)
l.backward()
grad = net[0][0].weight.grad().mean().asnumpy()
# run twice to check req = add
pred = net(data)
l = loss(pred, label)
l.backward()
grad_double = net[0][0].weight.grad().mean().asnumpy()
assert_almost_equal(grad * 2, grad_double)
@with_seed()
def test_save_load():
net = mx.gluon.model_zoo.vision.get_resnet(1, 18, pretrained=True)
net.save_parameters('test_save_load.params')
net = mx.gluon.model_zoo.vision.get_resnet(1, 18)
net.output = mx.gluon.nn.Dense(1000)
net.load_parameters('test_save_load.params')
class Network(gluon.Block):
def __init__(self, **kwargs):
super(Network, self).__init__(**kwargs)
with self.name_scope():
self.encoders = gluon.nn.Sequential()
with self.encoders.name_scope():
for _ in range(2):
lstm = mx.gluon.rnn.LSTM(200, 1, bidirectional=True)
self.encoders.add(lstm)
def forward(self, x):
for i in range(2):
x = self.encoders[i](x)
return x
net = Network()
net.initialize(mx.init.Xavier(), ctx=mx.cpu())
net.hybridize()
x = np.random.rand(32, 10, 10)
x = mx.nd.array(x).as_in_context(mx.cpu())
net(x)
net.save_parameters('tmp.params')
net2 = Network()
net2.load_parameters('tmp.params')
@with_seed()
def test_symbol_block_save_load():
class Net(gluon.HybridBlock):
def __init__(self):
super(Net, self).__init__()
with self.name_scope():
backbone = gluon.model_zoo.vision.resnet18_v1()
data = mx.sym.var('data')
featnames = ['stage1_activation0', 'stage2_activation0', 'stage3_activation0']
out_names = ['_'.join([backbone.name, featname, 'output']) for featname in featnames]
internals = backbone(data).get_internals()
outs = [internals[out_name] for out_name in out_names]
self.backbone = gluon.SymbolBlock(outs, data, params=backbone.collect_params())
self.body = nn.Conv2D(3, 1)
def hybrid_forward(self, F, x):
x = self.body(x)
return self.backbone(x)
net1 = Net()
net1.initialize(mx.init.Normal())
net1.hybridize()
net1(mx.nd.random.normal(shape=(1, 3, 32, 32)))
net1.save_parameters('./test_symbol_block_save_load.params')
net2 = Net()
net2.load_parameters('./test_symbol_block_save_load.params', ctx=mx.cpu())
@with_seed()
def test_hybrid_multi_context():
net = mx.gluon.model_zoo.vision.get_resnet(1, 18)
net.initialize(ctx=[mx.cpu(0), mx.cpu(1)])
net.hybridize()
net(mx.nd.zeros((1, 3, 32, 32), ctx=mx.cpu(0))).asnumpy()
@with_seed()
def test_zero_grad():
data = mx.nd.random.uniform(shape=(3,3))
net = nn.Embedding(3, 4, sparse_grad=True, prefix='test_zero_grad_')
net.initialize()
with mx.autograd.record():
l = net(data)
l.backward()
net.collect_params().zero_grad()
grad = net.collect_params()['test_zero_grad_weight'].grad()
assert_almost_equal(grad.asnumpy(), grad.asnumpy() * 0)
def check_hybrid_static_memory(**kwargs):
x = mx.nd.random.uniform(shape=(2, 3, 32, 32))
x.attach_grad()
net1 = gluon.model_zoo.vision.get_resnet(
1, 18, pretrained=True, prefix='net_', ctx=mx.context.current_context())
net2 = gluon.model_zoo.vision.get_resnet(
1, 18, pretrained=True, prefix='net_', ctx=mx.context.current_context())
net2.hybridize(**kwargs)
net1(x)
net2(x)
def test(net, x):
with mx.autograd.record():
y = net(x) + net(x)
y.backward()
grads = {k: v.grad() for k, v in net.collect_params().items() if v.grad_req != 'null'}
return y, grads
y1, grads1 = test(net1, x)
y2, grads2 = test(net2, x)
assert_almost_equal(y1.asnumpy(), y2.asnumpy(), rtol=1e-3, atol=1e-5)
for key in grads1:
assert_almost_equal(grads1[key].asnumpy(), grads2[key].asnumpy(), rtol=1e-3, atol=2e-5)
@with_seed()
def test_hybrid_static_memory():
check_hybrid_static_memory()
check_hybrid_static_memory(static_alloc=True)
check_hybrid_static_memory(static_alloc=True, static_shape=True)
def check_hybrid_static_memory_switching(**kwargs):
net = gluon.model_zoo.vision.get_resnet(
1, 18, pretrained=True, ctx=mx.context.current_context())
net.hybridize(**kwargs)
x = mx.nd.random.uniform(shape=(4, 3, 32, 32))
net(x)
with mx.autograd.record():
y = net(x)
y.backward()
x = mx.nd.random.uniform(shape=(2, 3, 32, 32))
net(x)
with mx.autograd.record():
y = net(x)
y.backward()
mx.nd.waitall()
@with_seed()
def test_hybrid_static_memory_switching():
check_hybrid_static_memory_switching()
check_hybrid_static_memory_switching(static_alloc=True)
check_hybrid_static_memory_switching(static_alloc=True, static_shape=True)
@with_seed()
def test_hook():
global hook_call_count
hook_call_count = 0
global pre_hook_call_count
pre_hook_call_count = 0
def call_hook(block, x, y):
global hook_call_count
hook_call_count += 1
def call_pre_hook(block, x):
global pre_hook_call_count
pre_hook_call_count += 1
block = nn.Dense(10)
block.initialize()
handle = block.register_forward_hook(call_hook)
pre_handle = block.register_forward_pre_hook(call_pre_hook)
block(mx.nd.ones((3, 5)))
assert hook_call_count == 1
assert pre_hook_call_count == 1
handle.detach()
block(mx.nd.ones((3, 5)))
assert hook_call_count == 1
assert pre_hook_call_count == 2
pre_handle.detach()
block(mx.nd.ones((3, 5)))
assert hook_call_count == 1
assert pre_hook_call_count == 2
@with_seed()
def test_apply():
global called_blocks
called_blocks = []
def record_name(block):
global called_blocks
called_blocks.append(block.name)
block = nn.HybridSequential(prefix='test_')
with block.name_scope():
block.add(nn.Dense(10))
block.add(nn.Dropout(0.5))
block.apply(record_name)
assert called_blocks == ['test_dense0', 'test_dropout0', 'test']
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_summary():
net = gluon.model_zoo.vision.resnet50_v1()
net.initialize()
net.summary(mx.nd.ones((32, 3, 224, 224)))
net2 = nn.Sequential()
with net2.name_scope():
net2.add(nn.Embedding(40, 30))
net2.add(gluon.rnn.LSTM(30))
net2.add(nn.Dense(40, flatten=False, params=net2[0].params))
net2.initialize()
net2.summary(mx.nd.ones((80, 32)))
net3 = gluon.rnn.LSTM(30)
net3.initialize()
begin_state = net3.begin_state(32)
net3.summary(mx.nd.ones((80, 32, 5)), begin_state)
net.hybridize()
assert_raises(AssertionError, net.summary, mx.nd.ones((32, 3, 224, 224)))
@with_seed()
def test_legacy_save_params():
net = gluon.nn.HybridSequential(prefix='')
with net.name_scope():
net.add(gluon.nn.Conv2D(10, (3, 3)))
net.add(gluon.nn.Dense(50))
net.initialize()
net(mx.nd.ones((1,1,50,50)))
a = net(mx.sym.var('data'))
a.save('test.json')
net.save_params('test.params')
model = gluon.nn.SymbolBlock(outputs=mx.sym.load_json(open('test.json', 'r').read()),
inputs=mx.sym.var('data'))
model.load_params('test.params', ctx=mx.cpu())
@with_seed()
def test_sparse_hybrid_block_grad():
class Embedding(mx.gluon.HybridBlock):
def __init__(self, num_tokens, embedding_size):
super(Embedding, self).__init__()
self.num_tokens = num_tokens
with self.name_scope():
self.embedding = mx.gluon.nn.Embedding(
num_tokens, embedding_size, sparse_grad=True)
def hybrid_forward(self, F, words):
emb = self.embedding(words)
return emb + F.ones_like(emb)
embedding = Embedding(20, 3)
embedding.initialize()
embedding.hybridize()
with mx.autograd.record():
emb0 = embedding(mx.nd.arange(10)).sum()
emb1 = embedding(mx.nd.arange(10)).sum()
loss = emb0 + emb1
loss.backward()
grad = embedding.embedding.weight.grad().asnumpy()
assert (grad[:10] == 2).all()
assert (grad[10:] == 0).all()
@with_seed()
def test_sparse_hybrid_block():
class Linear(mx.gluon.HybridBlock):
def __init__(self, units):
super(Linear, self).__init__()
with self.name_scope():
self.w = self.params.get('w', shape=(units, units))
def hybrid_forward(self, F, x, w):
return F.dot(x, w)
class SparseBlock(mx.gluon.HybridBlock):
def __init__(self, units):
super(SparseBlock, self).__init__()
with self.name_scope():
self.net = Linear(units)
def hybrid_forward(self, F, x):
return self.net(x) * x
block = SparseBlock(2)
block.initialize()
block.hybridize()
x = mx.nd.ones((2,2)).tostype('csr')
with mx.autograd.record():
z = block(x) + block(x)
z.backward()
assert (block.net.w.grad().asnumpy() == 4).all()
def test_hybrid_static_memory_recording():
net = gluon.model_zoo.vision.get_resnet(
1, 18, pretrained=True, ctx=mx.context.current_context())
net.hybridize(static_alloc=True)
x = mx.nd.random.uniform(shape=(1, 3, 32, 32))
with mx.autograd.record(True):
net(x)
net(x)
def test_share_inputs_outputs():
class TestIOBackward(gluon.HybridBlock):
def __init__(self, prefix=None, params=None):
super(TestIOBackward, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, in1, in2):
return in1 + in2
class TestIOForward(gluon.HybridBlock):
def __init__(self, prefix=None, params=None):
super(TestIOForward, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, in1):
return in1
d1 = mx.nd.arange(10)
d2 = mx.nd.arange(10)
params=[{'inline_limit':0},
{'inline_limit':0, 'static_alloc':True},
{'inline_limit':0, 'static_alloc':True, 'static_shape':True}]
# Test the case that inputs and outputs of a forward graph share NDArrays.
for param in params:
t = TestIOForward()
t.hybridize(**param)
for i in range(5):
d1.attach_grad()
out_grad = mx.nd.random.uniform(shape=(10))
res = t(d1)
assert_almost_equal(res.asnumpy(), d1.asnumpy())
param = deepcopy(params[2])
param['param_indices'] = (1)
param['data_indices'] = (0)
params.append(param)
# Test the case that inputs and outputs of a backward graph share NDArrays.
for param in params:
t = TestIOBackward()
t.hybridize(**param)
for i in range(5):
d1.attach_grad()
d2.attach_grad()
out_grad = mx.nd.random.uniform(shape=(10))
with mx.autograd.record():
res = t(d1, d2)
res.backward(out_grad=out_grad)
assert_almost_equal(out_grad.asnumpy(), d1.grad.asnumpy())
assert_almost_equal(out_grad.asnumpy(), d2.grad.asnumpy())
def test_grad_graph_change():
class Model(mx.gluon.HybridBlock):
def hybrid_forward(self, F, array, index):
row = array.take(index)
return row, index
array = mx.nd.arange(3)
index = mx.nd.array([2])
array.attach_grad()
model = Model()
model.hybridize(inline_limit=0)
with mx.autograd.record(train_mode=True):
row, _ = model(array, index)
row.backward()
def check_layer_forward_withinput(net, x):
x_hybrid = x.copy()
x.attach_grad()
x_hybrid.attach_grad()
net.collect_params().initialize()
with mx.autograd.record():
out1 = net(x)
out1.backward()
net.hybridize()
with mx.autograd.record():
out2 = net(x_hybrid)
out2.backward()
mx.test_utils.assert_almost_equal(x.grad.asnumpy(), x_hybrid.grad.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)
@with_seed()
def test_conv2d_16c():
chn_list = [16, 256]
kernel_list = [1, 3]
kernel_list.append(224)
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self,
chn_num,
kernel,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = gluon.nn.Conv2D(chn_num, (kernel, kernel))
def hybrid_forward(self, F, x):
out = self.conv0(x)
return out
x = mx.nd.random.uniform(-1.0, 1.0, shape=(batch_size, 3, 224, 224))
for i in range(len(chn_list)):
for j in range(len(kernel_list)):
net = Net(chn_list[i], kernel_list[j])
check_layer_forward_withinput(net, x)
@with_seed()
def test_group_conv2d_16c():
grp_list = [16]
input_size_list = np.random.randint(low=3, high=65, size=10).tolist()
kernel_list = [1, 3]
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self,
chn_num,
kernel,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = gluon.nn.Conv2D(chn_num, (1, 1))
self.conv1 = gluon.nn.Conv2D(chn_num, (kernel, kernel), groups=chn_num)
def hybrid_forward(self, F, x):
y = self.conv0(x)
out = self.conv1(y)
return out
for i in range(len(input_size_list)):
x = mx.nd.random.uniform(-1.0, 1.0, shape=(batch_size, 3, input_size_list[i], input_size_list[i]))
for j in range(len(grp_list)):
for k in range(len(kernel_list)):
net = Net(grp_list[j], kernel_list[k])
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_deconv2d_16c():
in_chn_list = [1024, 512, 256, 128, 64, 32, 16]
out_chn_list = [512, 256, 128, 64, 32, 16, 3]
kernel_list = [1, 3, 5, 7]
in_shape = [4, 8, 16, 32, 64, 224]
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self, chn_num, kernel, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.deconv0 = gluon.nn.Conv2DTranspose(chn_num, (kernel, kernel))
def hybrid_forward(self, F, x):
out = self.deconv0(x)
return out
for i in range(len(in_shape)):
x = mx.nd.random.uniform(-1.0, 1.0, shape=(batch_size, in_chn_list[i], in_shape[i], in_shape[i]))
for j in range(len(kernel_list)):
net = Net(out_chn_list[i], kernel_list[j])
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_batchnorm_16c():
chn_list = [16, 1024]
shape = np.random.randint(low=1, high=300, size=10)
shape_list = []
for i in range(len(shape)):
shape_list.append((shape[i], shape[i]))
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self,
chn_num,
kernel,
axis,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = gluon.nn.Conv2D(chn_num, (kernel, kernel))
self.bn0 = gluon.nn.BatchNorm(axis=axis)
def hybrid_forward(self, F, x):
conv = self.conv0(x)
out = self.bn0(conv)
return out
for i in range(len(chn_list)):
for j in range(len(shape_list)):
shape = (batch_size, ) + (3,) + shape_list[j]
x = mx.nd.random.uniform(-1.0, 1.0, shape=shape)
net = Net(chn_list[i], 1, 1)
check_layer_forward_withinput(net, x)
@with_seed()
def test_concat():
chn_list = [16, 64]
shapes = [1, 3, 5]
input_num = np.random.randint(low=2, high=11)
shape_list = []
for i in range(len(shapes)):
shape_list.append((shapes[i], shapes[i]))
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self,
check_dim,
input_num,
chn_num,
kernel,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
from mxnet.gluon.contrib.nn import HybridConcurrent
self.concat = HybridConcurrent(axis=check_dim)
for i in range(input_num):
self.concat.add(gluon.nn.Conv2D(chn_num, (kernel, kernel)))
def hybrid_forward(self, F, x):
return self.concat(x)
for s in range(len(shape_list)):
shape = (batch_size,) + (3,) + shape_list[i]
x = mx.nd.random.uniform(-1.0, 1.0, shape=shape)
for i in range(len(chn_list)):
for axis in range(4):
net = Net(axis, input_num, chn_list[i], 1)
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(64, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape((0, 0, 128, 32))
out = self.conv0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 3, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_conv_reshape_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(64, (3, 3))
self.conv1 = nn.Conv2D(128, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape((0, 0, 128, 32))
y = self.conv0(x_reshape)
"spatial shape of y is (62, 62)"
y_reshape = y.reshape((0, 0, 124, 31))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 3, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(16, (3, 3))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=(0, 2, 0, 0), end=(4, 5, 32, 32))
out = self.conv0(x_slice)
return out
x = mx.nd.random.uniform(shape=(8, 6, 32, 32))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_conv_slice_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(32, (3, 3))
self.conv1 = nn.Conv2D(16, (1, 1))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=(0, 0, 0, 0), end=(4, 16, 16, 16))
y = self.conv0(x_slice)
"shape of y is (4, 32, 14, 14)"
y_slice = y.slice(begin=(0, 0, 0, 0), end=(4, 16, 3, 3))
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 32, 32, 32))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_slice_conv_reshape_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(64, (3, 3))
self.conv1 = nn.Conv2D(128, (3, 3))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=(0, 0, 1, 1), end=(4, 16, 33, 33))
y = self.conv0(x_slice)
"shape of y is (4, 64, 30, 30)"
y_reshape = y.reshape((0, 0, 60, 15))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_conv_slice_conv():
"""
This test will test gluon Conv2d computation with ndarray reshape and slice
"""
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(16, (3, 3))
self.conv1 = nn.Conv2D(32, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape((0, 0, 64, 16))
y = self.conv0(x_reshape)
"shape of y is (4, 16, 62, 14)"
y_slice = y.slice(begin=(0, 0, 0, 0), end=(2, 16, 14, 14))
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 3, 32, 32))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_dense():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
def hybrid_forward(self, F, x):
x_reshape = x.reshape((8, 64, 128, -1))
out = self.dense0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_dense():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.slice = slice
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=tuple(self.slice[0]),
end=tuple(self.slice[1]))
out = self.dense0(x_slice)
return out
x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_dense_slice_dense():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = 32
channel1 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
self.slice = slice
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
y = self.dense0(x_slice)
y_slice = y.slice(begin=(1, 0), end=(3, 10))
out = self.dense1(y_slice)
return out
x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_dense_reshape_dense():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
channel1 = np.random.randint(1, 33)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
def hybrid_forward(self, F, x):
x_reshape = x.reshape((4, 16, 128, 32))
y = self.dense0(x_reshape)
y_reshape = y.reshape((1, -1))
out = self.dense1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 16, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_dense_reshape_dense():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
channel1 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
self.slice = slice
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
y = self.dense0(x_slice)
y_reshape = y.reshape((1, -1))
out = self.dense1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_dense_slice_dense():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = 64
channel1 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
def hybrid_forward(self, F, x):
x_reshape = x.reshape((4, 16, 128, 32))
y = self.dense0(x_reshape)
y_slice = y.slice(begin=(1, 32), end=(3, 64))
out = self.dense1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 16, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(96, (1, 1))
self.bn0 = nn.BatchNorm()
self.reshape = shape
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_reshape = x_in.reshape(self.reshape)
out = self.bn0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
shape = (4, 64, 64, -1)
net = Net(shape)
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_slice = x_in.slice(begin=tuple(self.slice[0]),
end=tuple(self.slice[1]))
out = self.bn0(x_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[0, 0, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_slice_batchnorm_slice_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_slice = x_in.slice(begin=tuple(self.slice[0][0]), end=tuple(self.slice[0][1]))
y = self.bn0(x_slice)
y_slice = y.slice(begin=tuple(self.slice[1][0]), end=tuple(self.slice[1][1]))
out = self.bn1(y_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[[0, 0, 0, 0], [4, 32, 32, 32]], [[0, 0, 0, 0], [2, 64, 16, 16]]]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_batchnorm_reshape_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.reshape = shape
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_reshape = x_in.reshape(self.reshape[0])
y = self.bn0(x_reshape)
y_reshape = y.reshape(self.reshape[1])
out = self.bn1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
shape = [(4, 64, 64, -1), (4, 128, -1, 32)]
net = Net(shape)
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_batchnorm_reshape_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.reshape = shape
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_slice = x_in.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
y = self.bn0(x_slice)
y_reshape = y.reshape(self.reshape)
out = self.bn1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[0, 0, 0, 0], [4, 32, 32, 32]]
shape = (1, 128, 64, -1)
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_batchnorm_slice_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.reshape = shape
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_reshape = x_in.reshape(self.reshape)
y = self.bn0(x_reshape)
y_slice = y.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
out = self.bn1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
slice = [[0, 0, 0, 0], [2, 64, 32, 32]]
shape = (4, 64, 64, -1)
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_pooling2d():
max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
global_maxpooling = nn.GlobalMaxPool2D()
global_avgpooling = nn.GlobalAvgPool2D()
pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
class Net(gluon.HybridBlock):
def __init__(self,
shape,
pooling_layer,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.pool0 = pooling_layer
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
out = self.pool0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 32, 32))
shape = (4, 64, 64, -1)
for i in range(len(pooling_layers)):
net = Net(shape, pooling_layers[i])
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_pooling2d():
max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
global_maxpooling = nn.GlobalMaxPool2D()
global_avgpooling = nn.GlobalAvgPool2D()
pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
class Net(gluon.HybridBlock):
def __init__(self,
slice,
pooling_layer,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.pool0 = pooling_layer
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
out = self.pool0(x_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [(0, 0, 0, 0), (4, 16, 32, 64)]
for i in range(len(pooling_layers)):
net = Net(slice, pooling_layers[i])
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_pooling2d_reshape_pooling2d():
max_pooling = nn.MaxPool2D(strides=(2, 2), padding=(1, 1))
avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
global_maxpooling = nn.GlobalMaxPool2D()
global_avgpooling = nn.GlobalAvgPool2D()
pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
class Net(gluon.HybridBlock):
def __init__(self,
shape,
pooling_layer1,
pooling_layer2,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.pool0 = pooling_layer1
self.pool1 = pooling_layer2
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape[0])
y = self.pool0(x_reshape)
y_reshape = y.reshape(self.reshape[1])
out = self.pool1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
shape = [(128, 256, 64, -1), (128, 256, 11, -1)]
for i in range(len(pooling_layers)):
for j in range(len(pooling_layers)):
if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
shape[1] = (256, 128, 1, 1)
net = Net(shape, pooling_layers[i], pooling_layers[j])
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_pooling2d_slice_pooling2d():
max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
global_maxpooling = nn.GlobalMaxPool2D()
global_avgpooling = nn.GlobalAvgPool2D()
pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
class Net(gluon.HybridBlock):
def __init__(self,
slice,
pooling_layer1,
pooling_layer2,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.pool0 = pooling_layer1
self.pool1 = pooling_layer2
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])
y = self.pool0(x_slice)
y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])
out = self.pool1(y_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[(8, 0, 100, 50), (16, -1, -1, -1)], [(0, 64, 0, 50), (2, -1, -1, -1)]]
for i in range(len(pooling_layers)):
for j in range(len(pooling_layers)):
if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
slice[1] = [(0, 64, 0, 0), (2, -1, 1, 1)]
net = Net(slice, pooling_layers[i], pooling_layers[j])
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_slice_pooling2d_reshape_pooling2d():
max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
global_maxpooling = nn.GlobalMaxPool2D()
global_avgpooling = nn.GlobalAvgPool2D()
pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
class Net(gluon.HybridBlock):
def __init__(self,
shape,
slice,
pooling_layer1,
pooling_layer2,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.pool0 = pooling_layer1
self.pool1 = pooling_layer2
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
y = self.pool0(x_slice)
y_reshape = y.reshape(self.reshape)
out = self.pool1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [(8, 0, 100, 50), (16, 128, 256, 256)]
shape = (32, -1, 0, 0)
for i in range(len(pooling_layers)):
for j in range(len(pooling_layers)):
net = Net(shape, slice, pooling_layers[i], pooling_layers[j])
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_pooling2d_slice_pooling2d():
max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
global_maxpooling = nn.GlobalMaxPool2D()
global_avgpooling = nn.GlobalAvgPool2D()
pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
class Net(gluon.HybridBlock):
def __init__(self,
shape,
slice,
pooling_layer1,
pooling_layer2,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.pool0 = pooling_layer1
self.pool1 = pooling_layer2
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
y = self.pool0(x_reshape)
y_slice = y.slice(begin=self.slice[0], end=self.slice[1])
out = self.pool1(y_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
shape = (0, 512, 64, -1)
slice = [(8, 256, 10, 20), (-1, -1, -1, 70)]
for i in range(len(pooling_layers)):
for j in range(len(pooling_layers)):
if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
slice = [(8, 256, 0, 0), (-1, -1, 1, 1)]
net = Net(shape, slice, pooling_layers[i], pooling_layers[j])
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.conv0 = nn.Conv2DTranspose(64, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
out = self.conv0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 16, 32, 32))
shape = (4, 16, 64, -1)
net = Net(shape)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_slice_deconv():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.conv0 = nn.Conv2DTranspose(64, (3, 3))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
out = self.conv0(x_slice)
return out
x = mx.nd.random.uniform(shape=(8, 32, 64, 64))
slice = [(0, 16, 0, 0), (4, 32, 32, 32)]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_deconv_reshape_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape[0])
y = self.conv0(x_reshape)
"shape of y is (4, 32, 66, 18)"
y_reshape = y.reshape(self.reshape[1])
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 16, 32, 32))
shape = [(4, 16, 64, -1), (4, 32, 33, -1)]
net = Net(shape)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_slice_deconv_slice_deconv():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])
y = self.conv0(x_slice)
"shape of y is (4, 32, 66, 18)"
y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(8, 32, 64, 64))
slice = [[(0, 0, 0, 0), (4, 16, 32, 32)], [(0, 0, 0, 0), (2, 16, 16, 16)]]
net = Net(slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_reshape_deconv_slice_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
y = self.conv0(x_reshape)
"shape of y is (4, 32, 66, 18)"
y_slice = y.slice(begin=self.slice[0], end=self.slice[1])
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 16, 32, 32))
shape = (4, 16, 64, -1)
slice = [(0, 0, 0, 0), (2, 16, 16, 16)]
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')
def test_slice_deconv_reshape_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(96, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
y = self.conv0(x_slice)
"shape of y is (4, 32, 34, 34)"
y_reshape = y.reshape(self.reshape)
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(8, 32, 64, 64))
shape = (4, 64, 34, -1)
slice = [(4, 0, 0, 0), (8, 16, 32, 32)]
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_activation():
class Net(gluon.HybridBlock):
def __init__(self, act, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.act = nn.Activation(act)
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
out = self.act(x_reshape)
return out
acts = ["relu", "sigmoid", "tanh", "softrelu"]
for act in acts:
x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32))
shape = (4, 32, 32, -1)
net = Net(act, shape)
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_activation():
class Net(gluon.HybridBlock):
def __init__(self, act, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.act = nn.Activation(act)
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
out = self.act(x_slice)
return out
acts = ["relu", "sigmoid", "tanh", "softrelu"]
for act in acts:
x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64))
slice = [(0, 16, 32, 32), (4, 32, 64, 64)]
net = Net(act, slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_activation_reshape_activation():
class Net(gluon.HybridBlock):
def __init__(self, act0, act1, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.act0 = nn.Activation(act0)
self.act1 = nn.Activation(act1)
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape[0])
y = self.act0(x_reshape)
y_reshape = y.reshape(self.reshape[1])
out = self.act1(y_reshape)
return out
acts = ["relu", "sigmoid", "tanh", "softrelu"]
for idx0, act0 in enumerate(acts):
for idx1, act1 in enumerate(acts):
if idx1 == idx0:
continue
x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32))
shape = [(4, 32, 32, -1), (4, 32, 16, -1)]
net = Net(act0, act1, shape)
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_activation_slice_activation():
class Net(gluon.HybridBlock):
def __init__(self, act0, act1, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.act0 = nn.Activation(act0)
self.act1 = nn.Activation(act1)
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])
y = self.act0(x_slice)
y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])
out = self.act1(y_slice)
return out
acts = ["relu", "sigmoid", "tanh", "softrelu"]
for idx0, act0 in enumerate(acts):
for idx1, act1 in enumerate(acts):
if idx1 == idx0:
continue
x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64))
slice = [[(0, 16, 32, 32), (4, 32, 64, 64)], [(2, 0, 16, 16), (4, 16, 32, 32)]]
net = Net(act0, act1, slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_reshape_activation_slice_activation():
class Net(gluon.HybridBlock):
def __init__(self, act0, act1, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.act0 = nn.Activation(act0)
self.act1 = nn.Activation(act1)
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
y = self.act0(x_reshape)
y_slice = y.slice(begin=self.slice[0], end=self.slice[1])
out = self.act1(y_slice)
return out
acts = ["relu", "sigmoid", "tanh", "softrelu"]
for idx0, act0 in enumerate(acts):
for idx1, act1 in enumerate(acts):
if idx1 == idx0:
continue
x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32))
shape = (4, 32, 32, -1)
slice = [(0, 0, 0, 0), (2, 16, 16, 16)]
net = Net(act0, act1, shape, slice)
check_layer_forward_withinput(net, x)
@with_seed()
def test_slice_activation_reshape_activation():
class Net(gluon.HybridBlock):
def __init__(self, act0, act1, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.act0 = nn.Activation(act0)
self.act1 = nn.Activation(act1)
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
y = self.act0(x_slice)
y_reshape = y.reshape(self.reshape)
out = self.act1(y_reshape)
return out
acts = ["relu", "sigmoid", "tanh", "softrelu"]
for idx0, act0 in enumerate(acts):
for idx1, act1 in enumerate(acts):
if idx1 == idx0:
continue
x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64))
slice = [(0, 16, 32, 32), (4, 32, 64, 64)]
shape = (4, 32, 32, -1)
net = Net(act0, act1, shape, slice)
check_layer_forward_withinput(net, x)
if __name__ == '__main__':
import nose
nose.runmodule()
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/tests/python/unittest/test_gluon.py | Python | apache-2.0 | 88,352 |
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import logging
import threading
initLock = threading.Lock()
rootLoggerInitialized = False
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
level = logging.INFO
file_log = None # File name
console_log = True
def init_handler(handler):
handler.setFormatter(Formatter(log_format))
def init_logger(logger):
logger.setLevel(level)
if file_log is not None:
fileHandler = logging.FileHandler(file_log)
init_handler(fileHandler)
logger.addHandler(fileHandler)
if console_log:
consoleHandler = logging.StreamHandler()
init_handler(consoleHandler)
logger.addHandler(consoleHandler)
def initialize():
global rootLoggerInitialized
with initLock:
if not rootLoggerInitialized:
init_logger(logging.getLogger())
rootLoggerInitialized = True
def getLogger(name=None):
initialize()
return logging.getLogger(name)
# This formatter provides a way to hook in formatTime.
class Formatter(logging.Formatter):
DATETIME_HOOK = None
def formatTime(self, record, datefmt=None):
newDateTime = None
if Formatter.DATETIME_HOOK is not None:
newDateTime = Formatter.DATETIME_HOOK()
if newDateTime is None:
ret = super(Formatter, self).formatTime(record, datefmt)
else:
ret = str(newDateTime)
return ret
| Yam-cn/potato | engine/logger.py | Python | apache-2.0 | 2,104 |
#!/usr/bin/python
# hitstats.py
# originall by cshields
# heavily redone by marineam
# watches an apache log and provides a funky rss feed
# of the city/country location of the latest hit
import re, time, os, sys, socket, getopt, GeoIP
import BaseHTTPServer
if socket.gethostname() == "ftp-osl":
HOSTS = [ 'localhost',
'http://ftp-chi.example.org:8000/' ]
else:
HOSTS = [ 'localhost' ]
host_index = 0;
# location of the apache log to watch
LOGDIR = '/var/log/apache2/transfer'
# the location of the GeoLiteCity.dat (or equiv) file
GEODATA = "/usr/local/share/GeoIP/GeoLiteCity.dat"
gi = GeoIP.open(GEODATA, GeoIP.GEOIP_STANDARD)
current_locale = ""
current_lat = ""
current_long = ""
st_old = None
log = None
re_ip = re.compile(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b')
class RssHandler (BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
def do_GET(self):
"""Respond to a GET request."""
global host_index;
if HOSTS[host_index] == "localhost":
self.send_xml()
else:
self.send_redirect(HOSTS[host_index])
host_index = (host_index + 1) % len(HOSTS)
def send_redirect(self, host):
self.send_response(302)
self.send_header("Location", host)
self.send_header("Content-type", "text/xml")
self.end_headers()
def send_xml(self):
if self.update_ip() == 1:
# Try again one more time
self.update_ip()
rss = """<rss version="2.0"><channel>
<title>Latest download locale</title>
<link>http://%s:%d/</link>
<pubDate>%s</pubDate>
<item><title>%s</title></item>
<item><title>%s</title></item>
<item><title>%s</title></item>\n</channel></rss>\n""" % (
socket.getfqdn(), port, time.asctime(),
current_locale, current_lat, current_long)
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(rss)
def update_geo(self, ip):
global current_locale
global current_lat
global current_long
try:
gir = gi.record_by_addr(ip)
except:
self.log_error("Failed to get geo data for ip '%s'", ip)
else:
if type(gir['city']) == str and type(gir['country_name']) == str:
current_locale = gir['city'] + ", " + gir['country_name']
current_lat = str(gir['latitude'])
current_long = str(gir['longitude'])
else:
self.log_error("Failed to get city/coutnry info for ip '%s'",
ip)
def update_ip(self):
global st_old
global log
logfile = "%s/%s.log" % (LOGDIR, time.strftime("%Y%m%d"));
st_new = os.stat(logfile)
if not log or not st_old or st_new.st_ino != st_old.st_ino:
if log:
log.close()
log = open(logfile, 'r')
st_old = st_new
elif st_new.st_size == st_old.st_size:
# log has not updated, don't bother reading
return 0
# seek to 1 byte before the end of the file
try:
log.seek(-1, 2)
except IOError:
self.log_error("Empty logfile!")
return 0
c = None
while c != '\n':
# Seek two chars back and read 1
log.seek(-2, 1)
c = log.read(1)
line = log.readline()
match = re_ip.search(line)
if match:
ip = match.group()
self.update_geo(ip)
return 0
else:
self.log_error("Failed to find ip address in line '%s'",line)
return 1
def run_server(port):
httpd = BaseHTTPServer.HTTPServer(("", port), RssHandler)
sys.stderr.write("Server Start: %s\n" % time.asctime())
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
sys.stderr.write("Server Stop: %s\n" % time.asctime())
def usage():
print "hitstats.py --port <port number> [--log <log file>]"
def main():
global port
port = None
log = None
try:
opts, args = getopt.getopt(sys.argv[1:], "p:l:h",
["port=", "log=", "--help"])
except getopt.GetoptError:
usage()
sys.exit(1)
for (opt, arg) in opts:
if opt in ("-p", "--port"):
port = int(arg)
if opt in ("-l", "--log"):
log = arg
if opt in ("-h", "--help"):
usage()
sys.exit(0)
if not port:
sys.stderr.write("No port given\n")
usage()
sys.exit(1)
if log:
logfd = open(log, 'a+', 0)
sys.stderr.flush()
os.dup2(logfd.fileno(), sys.stderr.fileno())
run_server(port)
if __name__ == '__main__':
main()
| osuosl/osuosl-mirror-sync | sbin.mirror/hitstats.py | Python | gpl-2.0 | 5,062 |
"""Tests for scripts/pbounds.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import pbounds
class PBoundsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(PBoundsTest, self).__init__(*args, **kwargs)
self.debugger = None
self.target = None
def tearDown(self):
if self.debugger and self.target:
self.debugger.DeleteTarget(self.target)
def testPFrame(self):
"""Tests the expected output of the |pbounds <instance>| command."""
self.debugger = lldb.SBDebugger.Create()
self.debugger.SetAsync(False)
self.target = self.debugger.CreateTarget('')
error = lldb.SBError()
process = self.target.AttachToProcessWithName(self.debugger.GetListener(),
'TestApp', False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
self.debugger.SetSelectedTarget(self.target)
result = lldb.SBCommandReturnObject()
# Get the test view, which has an abitrary tag of 19.
self.debugger.GetCommandInterpreter().HandleCommand(
'po [[UIWindow keyWindow] viewWithTag:19]', result)
self.assertTrue(result.Succeeded())
output = result.GetOutput()
start_index = output.find('0x')
self.assertTrue(start_index != -1)
end_index = output.find(';')
self.assertTrue(end_index != -1)
view = output[start_index:end_index]
pbounds.pbounds(self.debugger, view, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'\(origin = \(x = 0, y = 0\), size = \(width = 100, height = 100\)\)'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
| mrhappyasthma/HappyDebugging | tests/pbounds_test.py | Python | mit | 1,813 |
__author__ = 'tri'
class Waiter:
"""
This class need to refactor
"""
def __init__(self):
self.objects = {}
return
def register_waiter(self, keyword, ob):
self.objects[keyword] = ob
def unregister_waiter(self, keyword):
del self.objects[keyword]
def run(self):
raise NotImplementedError
| ductri/game_programming-ass1 | utils/customer_waiter_pattern/waiter.py | Python | apache-2.0 | 360 |
# coding=utf-8
"""
ItemNSVD1 Collaborative Filtering Recommender
[Rating Prediction]
Literature:
István Pilászy and Domonkos Tikk:
Recommending new movies: even a few ratings are more valuable than metadata
RecSys 2009
https://dl.acm.org/citation.cfm?id=1639731
"""
# © 2019. Case Recommender (MIT License)
import numpy as np
from caserec.recommenders.rating_prediction.base_nsvd1 import BaseNSVD1
from caserec.utils.extra_functions import timed
from caserec.utils.process_data import ReadFile
__author__ = 'Arthur Fortes <fortes.arthur@gmail.com>'
class ItemNSVD1(BaseNSVD1):
def __init__(self, train_file=None, test_file=None, metadata_file=None, output_file=None, epochs=30,
learn_rate=0.01, delta=0.015, factors=10, init_mean=0, init_stdev=0.1, stop_criteria=0.001,
batch=False, n2=10, learn_rate2=0.01, delta2=0.015, sep='\t', output_sep='\t', metadata_sep='\t',
metadata_as_binary=False, random_seed=None):
"""
ItemNSVD1 for rating prediction
Usage::
>> ItemNSVD1(train, test, metadata_file='user_metadata.dat').compute()
>> ItemNSVD1(train, test, metadata_file='user_metadata.dat', batch=True).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns
(user metadata).
:type metadata_file: str
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param epochs: Number of epochs over the training data
:type epochs: int, default 10
:param learn_rate: Learning rate (alpha)
:type learn_rate: float, default 0.05
:param delta: Regularization value
:type delta: float, default 0.015
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param stop_criteria: Difference between errors for stopping criteria
:type stop_criteria: float, default 0.001
:param batch: Tf True, use batch model to train the model
:type batch: bool, default False
:param n2: Number of interactions in batch step
:type n2: int, default 10
:param learn_rate2: Learning rate in batch step
:type learn_rate2: float, default 0.01
:param delta2: Regularization value in Batch step
:type delta2: float, default 0.015
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param metadata_sep: Delimiter for similarity or metadata file
:type metadata_sep: str, default '\t'
:param metadata_as_binary: f True, the explicit value will be transform to binary
:type metadata_as_binary: bool, default False
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(ItemNSVD1, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
factors=factors, init_mean=init_mean, init_stdev=init_stdev, sep=sep,
output_sep=output_sep, random_seed=random_seed)
self.recommender_name = 'ItemNSVD1'
self.metadata_file = metadata_file
self.batch = batch
self.epochs = epochs
self.learn_rate = learn_rate
self.delta = delta
self.stop_criteria = stop_criteria
self.n2 = n2
self.learn_rate2 = learn_rate2
self.delta2 = delta2
self.metadata_sep = metadata_sep
self.metadata_as_binary = metadata_as_binary
# internal vars
self.x = None
self.non_zero_x = None
self.d = None
def init_model(self):
"""
Method to treat and initialize the model. Extends init_model from BaseNSVD1
"""
super(ItemNSVD1, self).init_model()
self.non_zero_x = []
self.d = []
self.metadata = ReadFile(self.metadata_file, sep=self.metadata_sep, as_binary=self.metadata_as_binary
).read_metadata_or_similarity()
# create metadata matrix (user x metadata)
self.x = np.zeros((self.number_items, len(self.metadata['col_2'])))
meta_to_meta_id = {}
for m, data in enumerate(self.metadata['col_2']):
meta_to_meta_id[data] = m
for item in self.metadata['col_1']:
for m in self.metadata['dict'][item]:
self.x[self.item_to_item_id[item], meta_to_meta_id[m]] = self.metadata['dict'][item][m]
# create header info for metadata
sparsity = (1 - (self.metadata['number_interactions'] /
(len(self.metadata['col_1']) * len(self.metadata['col_2'])))) * 100
self.extra_info_header = ">> metadata:: %d items and %d metadata (%d interactions) | sparsity:: %.2f%%" % \
(len(self.metadata['col_1']), len(self.metadata['col_2']),
self.metadata['number_interactions'], sparsity)
self.number_metadata = len(self.metadata['col_2'])
for i in range(self.number_items):
self.non_zero_x.append(list(np.where(self.x[i] != 0)[0]))
with np.errstate(divide='ignore'):
self.d.append(1 / np.dot(self.x[i].T, self.x[i]))
# Create Factors
self.create_factors()
def fit(self):
"""
This method performs iterations of stochastic gradient ascent over the training data.
"""
for k in range(self.epochs):
rmse = 0
count_error = 0
if self.batch:
self.q = np.dot(self.x, self.w)
for i, item in enumerate(self.items):
c, e = self.update_factors(item, i)
rmse += e
count_error += c
for _ in range(self.n2):
for i, item in enumerate(self.items):
e = self.q[i] - (np.dot(self.x[i], self.w))
for l in self.non_zero_x[i]:
self.w[l] += self.learn_rate2 * (self.d[i] * np.dot(self.x[i][l], e.T) -
(self.w[l] * self.delta2))
self.q = np.dot(self.x, self.w)
else:
for i, item in enumerate(self.items):
self.q[i] = np.dot(self.x[i], self.w)
a = np.array(self.q[i])
c, e = self.update_factors(item, i)
rmse += e
count_error += c
for l in self.non_zero_x[i]:
self.w[l] += self.d[i] * self.x[i][l] * (self.q[i] - a)
rmse = np.sqrt(rmse / float(count_error))
if (np.fabs(rmse - self.last_rmse)) <= self.stop_criteria:
break
else:
self.last_rmse = rmse
def update_factors(self, item, i):
c, e = 0, 0
for user in self.train_set['users_viewed_item'].get(item, []):
u = self.user_to_user_id[user]
rui = self._predict(u, i)
error = self.train_set['feedback'][user][item] - rui
b = np.array(self.p[u])
# update factors
self.p[u] += self.learn_rate * (error * self.q[i] - self.delta * self.p[u])
self.q[i] += self.learn_rate * (error * b - self.delta * self.q[i])
self.b[u] += self.learn_rate * (error - self.delta * self.b[u])
self.c[i] += self.learn_rate * (error - self.delta * self.c[i])
c += 1
e += error ** 2
return c, e
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(ItemNSVD1, self).compute(verbose=verbose)
if verbose:
self.init_model()
if self.extra_info_header is not None:
print(self.extra_info_header)
print("training_time:: %4f sec" % timed(self.fit))
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
# Execute all in silence without prints
self.init_model()
self.fit()
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep)
| ArthurFortes/CaseRecommender | caserec/recommenders/rating_prediction/item_nsvd1.py | Python | mit | 10,046 |
# Copyright (C) 2010,2011 Chris Lalancette <clalance@redhat.com>
# Copyright (C) 2012-2014 Chris Lalancette <clalancette@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
Miscellaneous utility functions.
"""
import os
import random
import subprocess
import tempfile
import errno
import stat
import shutil
import pycurl
import gzip
import time
import select
try:
import configparser
except ImportError:
import ConfigParser as configparser
import collections
import ftplib
import struct
def generate_full_auto_path(relative):
"""
Function to find the absolute path to an unattended installation file.
"""
# all of the automated installation paths are installed to $pkg_path/auto,
# so we just need to find it and generate the right path here
if relative is None:
raise Exception("The relative path cannot be None")
pkg_path = os.path.dirname(__file__)
return os.path.abspath(os.path.join(pkg_path, "auto", relative))
def executable_exists(program):
"""
Function to find out whether an executable exists in the PATH
of the user. If so, the absolute path to the executable is returned.
If not, an exception is raised.
"""
def is_exe(fpath):
"""
Helper method to check if a file exists and is executable
"""
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
if program is None:
raise Exception("Invalid program name passed")
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
raise Exception("Could not find %s" % (program))
def write_bytes_to_fd(fd, buf):
"""
Function to write all bytes in "buf" to "fd". This handles both EINTR
and short writes.
"""
size = len(buf)
offset = 0
while size > 0:
try:
bytes_written = os.write(fd, buf[offset:])
offset += bytes_written
size -= bytes_written
except OSError as err:
# python's os.write() can raise an exception on EINTR, which
# according to the man page can happen if a signal was
# received before any data was written. Therefore, we don't
# need to update destlen or size, but just retry
if err.errno == errno.EINTR:
continue
raise
return offset
def read_bytes_from_fd(fd, num):
"""
Function to read and return bytes from fd. This handles the EINTR situation
where no bytes were read before a signal happened.
"""
read_done = False
while not read_done:
try:
ret = os.read(fd, num)
read_done = True
except OSError as err:
# python's os.read() can raise an exception on EINTR, which
# according to the man page can happen if a signal was
# received before any data was read. In this case we need to retry
if err.errno == errno.EINTR:
continue
raise
return ret
def copyfile_sparse(src, dest):
"""
Function to copy a file sparsely if possible. The logic here is
all taken from coreutils cp, specifically the 'sparse_copy' function.
"""
if src is None:
raise Exception("Source of copy cannot be None")
if dest is None:
raise Exception("Destination of copy cannot be None")
if not os.path.exists(src):
raise Exception("Source '%s' does not exist" % (src))
if os.path.exists(dest) and os.path.samefile(src, dest):
raise Exception("Source '%s' and dest '%s' are the same file" % (src, dest))
base = os.path.dirname(dest)
if not os.path.exists(base):
mkdir_p(base)
src_fd = os.open(src, os.O_RDONLY)
try:
dest_fd = os.open(dest, os.O_WRONLY|os.O_CREAT|os.O_TRUNC)
try:
sb = os.fstat(src_fd)
# See io_blksize() in coreutils for an explanation of why 32*1024
buf_size = max(32*1024, sb.st_blksize)
size = sb.st_size
destlen = 0
while size != 0:
buf = read_bytes_from_fd(src_fd, min(buf_size, size))
if len(buf) == 0:
break
buflen = len(buf)
if buf == '\0'*buflen:
os.lseek(dest_fd, buflen, os.SEEK_CUR)
else:
write_bytes_to_fd(dest_fd, buf)
destlen += buflen
size -= buflen
os.ftruncate(dest_fd, destlen)
finally:
os.close(dest_fd)
finally:
os.close(src_fd)
def bsd_split(line, digest_type):
"""
Function to split a BSD-style checksum line into a filename and
checksum.
"""
current = len(digest_type)
if line[current] == ' ':
current += 1
if line[current] != '(':
return None, None
current += 1
# find end of filename. The BSD 'md5' and 'sha1' commands do not escape
# filenames, so search backwards for the last ')'
file_end = line.rfind(')')
if file_end == -1:
# could not find the ending ), fail
return None, None
filename = line[current:file_end]
line = line[(file_end + 1):]
line = line.lstrip()
if line[0] != '=':
return None, None
line = line[1:]
line = line.lstrip()
if line[-1] == '\n':
line = line[:-1]
return line, filename
def sum_split(line, digest_bits):
"""
Function to split a normal Linux checksum line into a filename and
checksum.
"""
digest_hex_bytes = digest_bits / 4
min_digest_line_length = digest_hex_bytes + 2 + 1 # length of hex message digest + blank and binary indicator (2 bytes) + minimum file length (1 byte)
min_length = min_digest_line_length
if line[0] == '\\':
min_length = min_length + 1
if len(line) < min_length:
# if the line is too short, skip it
return None, None
if line[0] == '\\':
current = digest_hex_bytes + 1
hex_digest = line[1:current]
escaped_filename = True
else:
current = digest_hex_bytes
hex_digest = line[0:current]
escaped_filename = False
# if the digest is not immediately followed by a white space, it is an
# error
if line[current] != ' ' and line[current] != '\t':
return None, None
current += 1
# if the whitespace is not immediately followed by another space or a *,
# it is an error
if line[current] != ' ' and line[current] != '*':
return None, None
if line[current] == '*':
binary = True
current += 1
filename = line[current:]
if line[-1] == '\n':
filename = line[current:-1]
if escaped_filename:
# FIXME: a \0 is not allowed in the sum file format, but
# string_escape allows it. We'd probably have to implement our
# own codec to fix this
filename = filename.decode('string_escape')
return hex_digest, filename
def get_sum_from_file(sumfile, file_to_find, digest_bits, digest_type):
"""
Function to get a checksum digest out of a checksum file given a
filename.
"""
retval = None
f = open(sumfile, 'r')
for line in f:
binary = False
# remove any leading whitespace
line = line.lstrip()
# ignore blank lines
if len(line) == 0:
continue
# ignore comment lines
if line[0] == '#':
continue
if line.startswith(digest_type):
# OK, if it starts with a string of ["MD5", "SHA1", "SHA256"], then
# this is a BSD-style sumfile
hex_digest, filename = bsd_split(line, digest_type)
else:
# regular sumfile
hex_digest, filename = sum_split(line, digest_bits)
if hex_digest is None or filename is None:
continue
if filename == file_to_find:
retval = hex_digest
break
f.close()
return retval
def get_md5sum_from_file(sumfile, file_to_find):
"""
Function to get an MD5 checksum out of a checksum file given a filename.
"""
return get_sum_from_file(sumfile, file_to_find, 128, "MD5")
def get_sha1sum_from_file(sumfile, file_to_find):
"""
Function to get a SHA1 checksum out of a checksum file given a filename.
"""
return get_sum_from_file(sumfile, file_to_find, 160, "SHA1")
def get_sha256sum_from_file(sumfile, file_to_find):
"""
Function to get a SHA256 checksum out of a checksum file given a
filename.
"""
return get_sum_from_file(sumfile, file_to_find, 256, "SHA256")
def string_to_bool(instr):
"""
Function to take a string and determine whether it is True, Yes, False,
or No. It takes a single argument, which is the string to examine.
Returns True if instr is "Yes" or "True", False if instr is "No"
or "False", and None otherwise.
"""
if instr is None:
raise Exception("Input string was None!")
lower = instr.lower()
if lower == 'no' or lower == 'false':
return False
if lower == 'yes' or lower == 'true':
return True
return None
def generate_macaddress():
"""
Function to generate a random MAC address.
"""
mac = [0x52, 0x54, 0x00, random.randint(0x00, 0xff),
random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
return ':'.join(["%02x" % x for x in mac])
class SubprocessException(Exception):
"""
Class for subprocess exceptions. In addition to a error message, it
also has a retcode member that has the returncode from the command.
"""
def __init__(self, msg, retcode):
Exception.__init__(self, msg)
self.retcode = retcode
def subprocess_check_output(*popenargs, **kwargs):
"""
Function to call a subprocess and gather the output.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
printfn = None
if kwargs.has_key('printfn'):
printfn = kwargs['printfn']
del kwargs['printfn']
executable_exists(popenargs[0][0])
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
poller = select.poll()
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
poller.register(process.stdout.fileno(), select_POLLIN_POLLPRI)
poller.register(process.stderr.fileno(), select_POLLIN_POLLPRI)
stdout = ''
stderr = ''
retcode = process.poll()
while retcode is None:
start = time.time()
try:
ready = poller.poll(1000)
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
for fd, mode in ready:
if mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
poller.unregister(fd)
else:
if printfn is not None:
printfn(data)
if fd == process.stdout.fileno():
stdout += data
else:
stderr += data
else:
# Ignore hang up or errors.
poller.unregister(fd)
end = time.time()
if (end - start) < 1:
time.sleep(1 - (end - start))
retcode = process.poll()
tmpout, tmperr = process.communicate()
stdout += tmpout
stderr += tmperr
if printfn is not None:
printfn(tmperr)
printfn(tmpout)
if retcode:
cmd = ' '.join(*popenargs)
raise SubprocessException("'%s' failed(%d): %s" % (cmd, retcode, stderr), retcode)
return (stdout, stderr, retcode)
def mkdir_p(path):
"""
Function to make a directory and all intermediate directories as
necessary. The functionality differs from os.makedirs slightly, in
that this function does *not* raise an error if the directory already
exists.
"""
if path is None:
raise Exception("Path cannot be None")
if path == '':
# this can happen if the user did something like call os.path.dirname()
# on a file without directories. Since os.makedirs throws an exception
# in that case, check for it here and allow it.
return
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def copytree_merge(src, dst, symlinks=False, ignore=None):
"""
Function to copy an entire directory recursively. The functionality
differs from shutil.copytree, in that this function does *not* raise
an exception if the directory already exists.
It is based on: http://docs.python.org/2.7/library/shutil.html#copytree-example
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
mkdir_p(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree_merge(srcname, dstname, symlinks, ignore)
else:
shutil.copy2(srcname, dstname)
# FIXME: What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except shutil.WindowsError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def copy_modify_file(inname, outname, subfunc):
"""
Function to copy a file from inname to outname, passing each line
through subfunc first. subfunc is expected to be a method that takes
a single argument in (the next line), and returns a string to be
written to the output file after modification (if any).
"""
if inname is None:
raise Exception("input filename is None")
if outname is None:
raise Exception("output filename is None")
if subfunc is None:
raise Exception("subfunction is None")
if not isinstance(subfunc, collections.Callable):
raise Exception("subfunction is not callable")
infile = open(inname, 'r')
outfile = open(outname, 'w')
for line in infile:
outfile.write(subfunc(line))
infile.close()
outfile.close()
def write_cpio(inputdict, outputfile):
"""
Function to write a CPIO archive in the "New ASCII Format". The
inputlist is a dictionary of files to put in the archive, where the
dictionary key is the path to the file on the local filesystem and the
dictionary value is the location that the file should have in the cpio
archive. The outputfile is the location of the final cpio archive that
will be written.
"""
if inputdict is None:
raise Exception("input dictionary was None")
if outputfile is None:
raise Exception("output file was None")
outf = open(outputfile, "w")
try:
for inputfile, destfile in list(inputdict.items()):
inf = open(inputfile, 'r')
st = os.fstat(inf.fileno())
# 070701 is the magic for new CPIO (newc in cpio parlance)
outf.write("070701")
# inode (really just needs to be unique)
outf.write("%08x" % (st[stat.ST_INO]))
# mode
outf.write("%08x" % (st[stat.ST_MODE]))
# uid is 0
outf.write("00000000")
# gid is 0
outf.write("00000000")
# nlink (always a single link for a single file)
outf.write("00000001")
# mtime
outf.write("%08x" % (st[stat.ST_MTIME]))
# filesize
outf.write("%08x" % (st[stat.ST_SIZE]))
# devmajor
outf.write("%08x" % (os.major(st[stat.ST_DEV])))
# dev minor
outf.write("%08x" % (os.minor(st[stat.ST_DEV])))
# rdevmajor (always 0)
outf.write("00000000")
# rdevminor (always 0)
outf.write("00000000")
# namesize (the length of the name plus 1 for the NUL padding)
outf.write("%08x" % (len(destfile) + 1))
# check (always 0)
outf.write("00000000")
# write the name of the inputfile minus the leading /
stripped = destfile.lstrip('/')
outf.write(stripped)
# we now need to write sentinel NUL byte(s). We need to make the
# header (110 bytes) plus the filename, plus the sentinel a
# multiple of 4 bytes. Note that we always need at *least* one NUL,
# so if it is exactly a multiple of 4 we need to write 4 NULs
outf.write("\x00"*(4 - ((110+len(stripped)) % 4)))
# now write the data from the input file
outf.writelines(inf)
inf.close()
# we now need to write out NUL byte(s) to make it a multiple of 4.
# note that unlike the name, we do *not* have to have any NUL bytes,
# so if it is already aligned on 4 bytes do nothing
remainder = st[stat.ST_SIZE] % 4
if remainder != 0:
outf.write("\x00"*(4 - remainder))
# now that we have written all of the file entries, write the trailer
outf.write("070701")
# zero inode
outf.write("00000000")
# zero mode
outf.write("00000000")
# zero uid
outf.write("00000000")
# zero gid
outf.write("00000000")
# one nlink
outf.write("00000001")
# zero mtime
outf.write("00000000")
# zero filesize
outf.write("00000000")
# zero devmajor
outf.write("00000000")
# zero devminor
outf.write("00000000")
# zero rdevmajor
outf.write("00000000")
# zero rdevminor
outf.write("00000000")
# 0xB namesize
outf.write("0000000B")
# zero check
outf.write("00000000")
# trailer
outf.write("TRAILER!!!")
# finally, we need to pad to the closest 512 bytes
outf.write("\x00"*(512 - (outf.tell() % 512)))
except:
os.unlink(outputfile)
raise
outf.close()
def config_get_key(config, section, key, default):
"""
Function to retrieve config parameters out of the config file.
"""
if config is not None and config.has_section(section) and config.has_option(section, key):
return config.get(section, key)
else:
return default
def config_get_boolean_key(config, section, key, default):
"""
Function to retrieve boolean config parameters out of the config file.
"""
value = config_get_key(config, section, key, None)
if value is None:
return default
retval = string_to_bool(value)
if retval is None:
raise Exception("Configuration parameter '%s' must be True, Yes, False, or No" % (key))
return retval
def config_get_path(config, section, key, default):
"""
Function to get an user-expanded path out of the config file at
the passed in section and key. If the value is not in the config
file, then the default value is returned. If the expanded path is
not absolute, an error is raised.
"""
path = os.path.expanduser(config_get_key(config, section, key, default))
if not os.path.isabs(path):
raise Exception("Config key '%s' must have an absolute path" % (key))
return path
def rmtree_and_sync(directory):
"""
Function to remove a directory tree and do an fsync afterwards. Because
the removal of the directory tree can cause a lot of metadata updates, it
can cause a lot of disk activity. By doing the fsync, we ensure that any
metadata updates caused by us will not cause subsequent steps to fail. This
cannot help if the system is otherwise very busy, but it does ensure that
the problem is not self-inflicted.
"""
shutil.rmtree(directory)
fd = os.open(os.path.dirname(directory), os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
def parse_config(config_file):
"""
Function to parse the configuration file. If the passed in config_file is
None, then the default configuration file is used.
"""
config = configparser.SafeConfigParser()
if config_file is not None:
# If the config_file passed in is not None, then we want to try to read
# that config file (after expanding it). If that config file doesn't
# exist, we want to throw an error (which is why we use readfp here).
config.readfp(open(os.path.expanduser(config_file)))
else:
# The config file was not passed in, so we want to use one of the
# defaults. First we check to see if a ~/.oz/oz.cfg exists; if it does,
# we use that. Otherwise we fall back to the system-wide version in
# /etc/oz/oz.cfg. If neither of those exist, we don't throw an error
# but instead let Oz pick sane defaults internally.
parsed = config.read(os.path.expanduser("~/.oz/oz.cfg"))
if len(parsed) == 0:
config.read("/etc/oz/oz.cfg")
return config
def default_output_dir():
"""
Function to get the default path to the output directory.
"""
if os.geteuid() == 0:
return "/var/lib/libvirt/images"
else:
return "~/.oz/images"
def default_data_dir():
"""
Function to get the default path to the data directory.
"""
if os.geteuid() == 0:
return "/var/lib/oz"
else:
return "~/.oz"
def default_sshprivkey():
"""
Function to get the default path to the SSH private key.
"""
if os.geteuid() == 0:
return "/etc/oz/id_rsa-icicle-gen"
else:
return "~/.oz/id_rsa-icicle-gen"
def default_screenshot_dir():
"""
Function to get the default path to the screenshot directory. The directory
is generated relative to the default data directory.
"""
return os.path.join(default_data_dir(), "screenshots")
def http_get_header(url, redirect=True):
"""
Function to get the HTTP headers from a URL. The available headers will be
returned in a dictionary. If redirect=True (the default), then this
function will automatically follow http redirects through to the final
destination, entirely transparently to the caller. If redirect=False, then
this function will follow http redirects through to the final destination,
and also store that information in the 'Redirect-URL' key. Note that
'Redirect-URL' will always be None in the redirect=True case, and may be
None in the redirect=True case if no redirects were required.
"""
info = {}
def _header(buf):
"""
Internal function that is called back from pycurl perform() for
header data.
"""
buf = buf.strip()
if len(buf) == 0:
return
split = buf.split(':')
if len(split) < 2:
# not a valid header; skip
return
key = split[0].strip()
value = split[1].strip()
info[key] = value
def _data(buf):
"""
Empty function that is called back from pycurl perform() for body data.
"""
pass
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.NOBODY, True)
c.setopt(c.HEADERFUNCTION, _header)
c.setopt(c.HEADER, True)
c.setopt(c.WRITEFUNCTION, _data)
if redirect:
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
info['HTTP-Code'] = c.getinfo(c.HTTP_CODE)
if info['HTTP-Code'] == 0:
# if this was a file:/// URL, then the HTTP_CODE returned 0.
# set it to 200 to be compatible with http
info['HTTP-Code'] = 200
if not redirect:
info['Redirect-URL'] = c.getinfo(c.REDIRECT_URL)
c.close()
return info
def http_download_file(url, fd, show_progress, logger):
"""
Function to download a file from url to file descriptor fd.
"""
class Progress(object):
"""
Internal class to represent progress on the connection. This is only
required so that we have somewhere to store the "last_mb" variable
that is not global.
"""
def __init__(self):
self.last_mb = -1
def progress(self, down_total, down_current, up_total, up_current):
"""
Function that is called back from the pycurl perform() method to
update the progress information.
"""
if down_total == 0:
return
current_mb = int(down_current) / 10485760
if current_mb > self.last_mb or down_current == down_total:
self.last_mb = current_mb
logger.debug("%dkB of %dkB" % (down_current/1024, down_total/1024))
def _data(buf):
"""
Function that is called back from the pycurl perform() method to
actually write data to disk.
"""
write_bytes_to_fd(fd, buf)
progress = Progress()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.CONNECTTIMEOUT, 5)
c.setopt(c.WRITEFUNCTION, _data)
c.setopt(c.FOLLOWLOCATION, 1)
if show_progress:
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, progress.progress)
c.perform()
c.close()
def ftp_download_directory(server, username, password, basepath, destination):
"""
Function to recursively download an entire directory structure over FTP.
"""
ftp = ftplib.FTP(server)
ftp.login(username, password)
def _recursive_ftp_download(sourcepath):
"""
Function to iterate and download a remote ftp folder
"""
original_dir = ftp.pwd()
try:
ftp.cwd(sourcepath)
except ftplib.error_perm:
relativesourcepath = os.path.relpath(sourcepath, basepath)
destinationpath = os.path.join(destination, relativesourcepath)
if not os.path.exists(os.path.dirname(destinationpath)):
os.makedirs(os.path.dirname(destinationpath))
ftp.retrbinary("RETR " + sourcepath, open(destinationpath, "wb").write)
return
names = ftp.nlst()
for name in names:
_recursive_ftp_download(os.path.join(sourcepath, name))
ftp.cwd(original_dir)
_recursive_ftp_download(basepath)
ftp.close()
def _gzip_file(inputfile, outputfile, outputmode):
"""
Internal function to gzip the input file and place it in the outputfile.
If the outputmode is 'ab', then the input file will be appended to the
output file, and if the outputmode is 'wb' then the input file will be
written over the output file.
"""
with open(inputfile, 'rb') as f:
gzf = gzip.GzipFile(outputfile, mode=outputmode)
gzf.writelines(f)
gzf.close()
def gzip_append(inputfile, outputfile):
"""
Function to gzip and append the data from inputfile onto output file.
"""
_gzip_file(inputfile, outputfile, 'ab')
def gzip_create(inputfile, outputfile):
"""
Function to gzip the data from inputfile and place it into outputfile,
overwriting any existing data in outputfile.
"""
try:
_gzip_file(inputfile, outputfile, 'wb')
except:
# since we created the output file, we should clean it up
if os.access(outputfile, os.F_OK):
os.unlink(outputfile)
raise
def check_qcow_size(filename):
"""
Function to detect if an image is in qcow format. If it is, return the size
of the underlying disk image. If it isn't, return None.
"""
# For interested parties, this is the QCOW header struct in C
# struct qcow_header {
# uint32_t magic;
# uint32_t version;
# uint64_t backing_file_offset;
# uint32_t backing_file_size;
# uint32_t cluster_bits;
# uint64_t size; /* in bytes */
# uint32_t crypt_method;
# uint32_t l1_size;
# uint64_t l1_table_offset;
# uint64_t refcount_table_offset;
# uint32_t refcount_table_clusters;
# uint32_t nb_snapshots;
# uint64_t snapshots_offset;
# };
# And in Python struct format string-ese
qcow_struct = ">IIQIIQIIQQIIQ" # > means big-endian
qcow_magic = 0x514649FB # 'Q' 'F' 'I' 0xFB
f = open(filename, "r")
pack = f.read(struct.calcsize(qcow_struct))
f.close()
unpack = struct.unpack(qcow_struct, pack)
if unpack[0] == qcow_magic:
return unpack[5]
else:
return None
def recursively_add_write_bit(inputdir):
"""
Function to walk a directory tree, adding the write it to every file
and directory. This is mostly useful right before deleting a tree of
files extracted from an ISO, since those were all read-only to begin
with.
"""
for dirpath, dirnames, filenames in os.walk(inputdir):
# If the path is a symlink, and it is an absolute symlink, this would
# attempt to change the permissions of the *host* file, not the
# file that is relative to here. That is no good, and could be a
# security problem if Oz is being run as root. We skip all paths that
# are symlinks; what they point to will be changed later on.
if os.path.islink(dirpath):
continue
os.chmod(dirpath, os.stat(dirpath).st_mode|stat.S_IWUSR)
for name in filenames:
fullpath = os.path.join(dirpath, name)
# we have the same guard for symlinks as above, for the same reason
if os.path.islink(fullpath):
continue
try:
# if there are broken symlinks in the ISO,
# then the below might fail. This probably
# isn't fatal, so just allow it and go on
os.chmod(fullpath, os.stat(fullpath).st_mode|stat.S_IWUSR)
except OSError as err:
if err.errno != errno.ENOENT:
raise
| moofrank/oz | oz/ozutil.py | Python | lgpl-2.1 | 31,713 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for testing the API service.
"""
import datetime
import pytz
from magnum.api.controllers.v1 import bay as bay_controller
from magnum.api.controllers.v1 import baymodel as baymodel_controller
from magnum.api.controllers.v1 import cluster as cluster_controller
from magnum.api.controllers.v1 import cluster_template as cluster_tmp_ctrl
from magnum.api.controllers.v1 import federation as federation_controller
from magnum.tests.unit.db import utils
def remove_internal(values, internal):
# NOTE(yuriyz): internal attributes should not be posted, except uuid
int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid']
return {k: v for (k, v) in values.items() if k not in int_attr}
def baymodel_post_data(**kw):
baymodel = utils.get_test_cluster_template(**kw)
internal = baymodel_controller.BayModelPatchType.internal_attrs()
return remove_internal(baymodel, internal)
def cluster_template_post_data(**kw):
cluster_template = utils.get_test_cluster_template(**kw)
internal = cluster_tmp_ctrl.ClusterTemplatePatchType.internal_attrs()
return remove_internal(cluster_template, internal)
def bay_post_data(**kw):
kw.update({'for_api_use': True})
bay = utils.get_test_cluster(**kw)
bay['baymodel_id'] = kw.get('baymodel_id', bay['cluster_template_id'])
bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15)
del bay['cluster_template_id']
del bay['create_timeout']
internal = bay_controller.BayPatchType.internal_attrs()
return remove_internal(bay, internal)
def cluster_post_data(**kw):
kw.update({'for_api_use': True})
cluster = utils.get_test_cluster(**kw)
cluster['create_timeout'] = kw.get('create_timeout', 15)
cluster['merge_labels'] = kw.get('merge_labels', False)
internal = cluster_controller.ClusterPatchType.internal_attrs()
return remove_internal(cluster, internal)
def cert_post_data(**kw):
return {
'cluster_uuid': kw.get('cluster_uuid',
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
'csr': kw.get('csr', 'fake-csr'),
'pem': kw.get('pem', 'fake-pem')
}
def quota_post_data(**kw):
return utils.get_test_quota(**kw)
def mservice_get_data(**kw):
"""Simulate what the RPC layer will get from DB """
faketime = datetime.datetime(2001, 1, 1, tzinfo=pytz.UTC)
return {
'binary': kw.get('binary', 'magnum-conductor'),
'host': kw.get('host', 'fake-host'),
'id': kw.get('id', 13),
'report_count': kw.get('report_count', 13),
'disabled': kw.get('disabled', False),
'disabled_reason': kw.get('disabled_reason', None),
'forced_down': kw.get('forced_down', False),
'last_seen_at': kw.get('last_seen_at', faketime),
'created_at': kw.get('created_at', faketime),
'updated_at': kw.get('updated_at', faketime),
}
def federation_post_data(**kw):
federation = utils.get_test_federation(**kw)
internal = federation_controller.FederationPatchType.internal_attrs()
return remove_internal(federation, internal)
def nodegroup_post_data(**kw):
internal = ['/cluster_id', '/project_id', '/node_addresses', '/is_default',
'/created_at', '/updated_at', '/status', '/status_reason',
'/version', '/stack_id']
nodegroup = utils.get_test_nodegroup(**kw)
nodegroup['merge_labels'] = kw.get('merge_labels', False)
return remove_internal(nodegroup, internal)
| ArchiFleKs/magnum | magnum/tests/unit/api/utils.py | Python | apache-2.0 | 4,063 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for relative positional embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
def ConvertToBlocks(x, block_size, padding_val=0.0):
"""Turns a sequence to non overlapping blocks.
Args:
x: a tensor of [batch, time, ...].
block_size: int. Number of time frames in a block.
padding_val: float. value on the padded frames.
Returns:
A tensor of [batch, num_blocks, block_size, ...], with necessary paddings,
where output[:, i, ...] are x[:, i*block_size:(i+1)*block_size, ...].
"""
shape = py_utils.GetShape(x)
b, t = shape[:2]
if block_size < 1:
raise ValueError('block_size must be at least 1, got {}'.format(block_size))
w = block_size
# Pad t to be a multiply of w.
num_blocks = (t + w - 1) // w
pad_to_length = num_blocks * w
padded = py_utils.PadSequenceDimension(x, pad_to_length, padding_val)
reshaped = tf.reshape(padded, [b, num_blocks, w] + shape[2:])
return reshaped
def ExtractBlockContext(x,
block_size,
left_context,
right_context,
padding_val=0.0):
"""Extracts temporal context for every block.
Args:
x: a tensor of [batch, time, ...].
block_size: int. Number of time frames in a block.
left_context: int. Left context size.
right_context: int. Right context size.
padding_val: float. value on the padded frames.
Returns:
A tensor of [batch, num_blocks, context_size, ...], with necessary paddings,
where context_size = block_size + (left_context - 1) + right_context,
and output[:, i, ...] are x[:, start-left_context+1:end+right_context, ...],
start = i * block_size, end = (i + 1) * block_size.
"""
if block_size < 1:
raise ValueError('block_size must be at least 1, got {}'.format(block_size))
if left_context < 1 or left_context > block_size + 1:
raise ValueError(
'left_context must be at least 1 and at most block_size + 1 = {}, '
'got {}'.format(block_size + 1, left_context))
if right_context < 0 or right_context > block_size:
raise ValueError(
'right_context must be at least 0 and at most block_size = {}, '
'got {}'.format(block_size, right_context))
block = ConvertToBlocks(x, block_size, padding_val)
concat_list = [block]
if left_context > 1:
if block_size == left_context - 1:
left_block = tf.roll(block, shift=1, axis=1)
else:
x_shift = tf.roll(x, shift=left_context - 1, axis=1)
x_shift_block = ConvertToBlocks(x_shift, block_size, padding_val)
left_block = x_shift_block[:, :, :left_context - 1:, ...]
concat_list = [left_block] + concat_list
if right_context > 0:
if block_size == right_context:
right_block = tf.roll(block, shift=-1, axis=1)
else:
x_shift = tf.roll(x, shift=-right_context, axis=1)
x_shift_block = ConvertToBlocks(x_shift, block_size, padding_val)
right_block = x_shift_block[:, :, -right_context:, ...]
concat_list += [right_block]
return tf.concat(concat_list, axis=2)
def MakeCausalPadding(seq_len, block_size, left_context, right_context):
"""Makes the causal padding tensor for a full sequence.
Args:
seq_len: int or scalar int tensor. Sequence length.
block_size: int. Number of time frames in a block.
left_context: int. Left context size.
right_context: int. Right context size.
Returns:
A tensor of [num_blocks, block_size, context_size] taking values in {0, 1},
where context_size = block_size + (left_context - 1) + right_context.
Element b, i, j is zero if in the b-th block, the i-th frame can access
the j-th frame in the context.
"""
seq_len = py_utils.with_dependencies([
py_utils.assert_greater_equal(
seq_len, 1, message='seq_len must be at least 1')
], seq_len)
num_blocks = (seq_len + block_size - 1) // block_size
context_size = block_size + (left_context - 1) + right_context
# [num_blocks, block_size]: source positions in the original sequence.
src_positions = tf.reshape(
tf.range(num_blocks * block_size), [num_blocks, block_size])
# [num_blocks,]: source positions at the start of each block.
block_start_positions = tf.range(0, num_blocks * block_size, block_size)
# [context_size]: positions relative to the block start.
relative_context_positions = tf.range(context_size) - (left_context - 1)
# [num_blocks, context_size]: target positions in the original sequence.
tgt_positions = (
block_start_positions[:, tf.newaxis] +
relative_context_positions[tf.newaxis, :])
# [num_blocks, block_size, context_size]: position differences between source-
# target pairs.
position_diff = src_positions[:, :, tf.newaxis] - tgt_positions[:,
tf.newaxis, :]
# [num_blocks, block_size, context_size]: if attention is allowed between
# source-target pairs.
valid_atten = tf.math.logical_and(-right_context <= position_diff,
position_diff < left_context)
# [num_blocks, block_size]: if the source position is valid, not padded.
valid_src = src_positions < seq_len
# [num_blocks, context_size]: if the target position is valid, not padded.
valid_tgt = tf.math.logical_and(0 <= tgt_positions, tgt_positions < seq_len)
valid_atten &= tf.math.logical_and(valid_src[:, :, tf.newaxis],
valid_tgt[:, tf.newaxis, :])
padding = 1.0 - tf.cast(valid_atten, dtype=tf.float32)
return padding
def RelShift(x):
"""Performs relative shift on 4D tensor (first 2 axis are batching dims).
Given input of shape [?, ?, W, W], this does "relative shifting" for the
last two dims, s.t. output[b, n, i, j] = 0 if i > j else input[b, n, i, j-i]
Args:
x: A Tensor of shape [?, ?, W, W]
Returns:
A Tensor of the same shape as input with its content shifted (as described
above).
"""
b, n, w, _ = py_utils.GetShape(x)
x = py_utils.HasShape(x, [-1, -1, w, w])
x = tf.pad(x, ((0, 0), (0, 0), (0, 0), (0, 1)))
x = tf.reshape(x, [b, n, w + 1, w])
x = x[:, :, :w, :]
return x
def _RelPositionBias(query, abs_pos_emb):
"""Computes relative position bias for general cases."""
_, t, n, h = py_utils.GetShape(query)
abs_pos_emb = py_utils.HasShape(abs_pos_emb, [2 * t - 1, n, h])
# abs_pos_emb is [-(T-1), -(T-2), ... 0, 1, 2, ... T-1]
# Change to [T-1, T-2, ... 0, -1, -2, ... -(T-2), -(T-1)]
abs_pos_emb = tf.reverse(abs_pos_emb, [0])
# [B, N, T, L=2T-1]
term_bd = tf.einsum('BTNH,LNH->BNTL', query, abs_pos_emb)
# Convert to [B, N, T, T]
# part1
term_bd_left = term_bd[:, :, :, :t]
term_bd_left = tf.reverse(term_bd_left, [2, 3])
term_bd_left = RelShift(term_bd_left)
# [B, N, T, T]
term_bd_left = tf.reverse(term_bd_left, [2, 3])
# part 2
term_bd_right = term_bd[:, :, :, t - 1:]
# [B, N, T, T]
term_bd_right = RelShift(term_bd_right)
# [lower triangle]
mask = tf.linalg.band_part(tf.ones_like(term_bd_right), -1, 0)
# stitching togather
return tf.where(mask > 0, term_bd_left, term_bd_right)
def _RelPositionBiasCausal(query, abs_pos_emb):
"""Computes relative position bias for causal self attention."""
_, t, n, h = py_utils.GetShape(query)
abs_pos_emb = py_utils.HasShape(abs_pos_emb, [2 * t - 1, n, h])
# abs_pos_emb is [-(T-1), -(T-2), ... 0, 1, 2, ... T-1]
# Retain only half and change order to [T-1, T-2, ... 0]
# [T, N, H]
abs_pos_emb = tf.reverse(abs_pos_emb, [0])[:t]
# [B, N, T, L=T]
term_bd = tf.einsum('BTNH,LNH->BNTL', query, abs_pos_emb)
# Perform shifting.
term_bd = tf.reverse(term_bd, [2, 3])
term_bd = RelShift(term_bd)
return tf.reverse(term_bd, [2, 3])
def RelPositionBias(content, abs_pos_emb, is_causal):
"""Compute relative position bias.
This is a subroutine used by variants of self-attentions with relative
positional embedding.
B: batch size
T: sequence length
N: num of attention heads.
H: per-head attention dimension.
output[b][n][i][j] = content[b][i][n] x abs_pos_emb[i-j+T-1][n]
Notice padding is supposed to be masked by the caller of this function.
Args:
tensors of the following shapes:
content: [B, T, N, H]
abs_pos_emb: [2T - 1, N, H], the absolute positional embedding.
abs_pos_emb[i] is the emb of relative distance i - (T-1).
is_causal: A Python bool or a scalar bool Tensor. True for causal self
attention.
Returns:
The attention logits tensor. [B, N, T, T]
"""
if not isinstance(is_causal, tf.Tensor):
fn = (_RelPositionBiasCausal if is_causal else _RelPositionBias)
res = fn(content, abs_pos_emb)
else:
res = tf.cond(is_causal,
lambda: _RelPositionBiasCausal(content, abs_pos_emb),
lambda: _RelPositionBias(content, abs_pos_emb))
return res
def _AttenLogits(query,
key,
abs_pos_emb,
content_bias=None,
positional_bias=None,
is_causal=False):
"""Attention logits from ...
Transformer-XL(https://arxiv.org/pdf/1901.02860.pdf, section 3.3) version of
self attention with relative position embedding.
Notice padding is supposed to be masked by the caller of this function.
B: batch size
T: sequence length
N: num of attention heads.
H: per-head attention dimension.
Args:
tensors of the following shapes:
query: [B, T, N, H]
key: [B, T, N, H]
abs_pos_emb: [2T - 1, N, H]. The sinusoid positional embedding from
https://arxiv.org/abs/1706.03762. abs_pos_emb[i] is the emb of relative
distance i - (T-1).
content_bias: [N, H] or None
positional_bias: [N, H] or None
is_causal: A Python bool or a scalar bool Tensor. True for causal self
attention.
Returns:
The attention logits tensor. [B, N, T, T]
"""
b, t, n, h = py_utils.GetShape(query)
key = py_utils.HasShape(key, [b, t, n, h])
if content_bias is not None:
content_bias = py_utils.HasShape(content_bias, [n, h])
else:
content_bias = 0
if positional_bias is not None:
positional_bias = py_utils.HasShape(positional_bias, [n, h])
else:
positional_bias = 0
# [B, N, T, S=T]
term_ac = tf.einsum('BTNH,BSNH->BNTS', query + content_bias, key)
term_bd = RelPositionBias(query + positional_bias, abs_pos_emb, is_causal)
return term_ac + term_bd
def AttenLogitsTransformerXL(query,
key,
abs_pos_emb,
content_bias,
positional_bias,
is_causal=False):
"""Attention logits from ...
Transformer-XL(https://arxiv.org/pdf/1901.02860.pdf, section 3.3) version of
self attention with relative position embedding.
Notice padding is supposed to be masked by the caller of this function.
B: batch size
T: sequence length
N: num of attention heads.
H: per-head attention dimension.
Args:
tensors of the following shapes:
query: [B, T, N, H]
key: [B, T, N, H]
abs_pos_emb: [2T - 1, N, H]. The sinusoid positional embedding from
https://arxiv.org/abs/1706.03762. abs_pos_emb[i] is the emb of relative
distance i - (T-1).
content_bias: [N, H]
positional_bias: [N, H]
is_causal: A Python bool or a scalar bool Tensor. True for causal self
attention.
Returns:
The attention logits tensor. [B, N, T, T]
"""
return _AttenLogits(query, key, abs_pos_emb, content_bias, positional_bias,
is_causal)
def AttenLogitsRPE(query, key, abs_pos_emb, is_causal):
"""Attention logits from ...
https://arxiv.org/pdf/1803.02155.pdf with trainable rel position emb.
Notice padding is supposed to be masked by the caller of this function.
B: batch size
T: sequence length
N: num of attention heads.
H: per-head attention dimension.
Args:
tensors of the following shapes:
query: [B, T, N, H]
key: [B, T, N, H]
abs_pos_emb: [2T - 1, N, H]. The trainable embdding. abs_pos_emb[i] is
the emb of relative distance i - (T-1).
is_causal: A Python bool or a scalar bool Tensor. True for causal self
attention.
Returns:
The attention logits tensor. [B, N, T, T]
"""
return _AttenLogits(query, key, abs_pos_emb, is_causal=is_causal)
| mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/core/relative_atten_util.py | Python | apache-2.0 | 13,443 |
#!/usr/bin/env python
"""
Tests that stresses are calculated correctly by Asap
Name: testStress.py
Description: Part of the Asap test suite. Tests that stresses are
calculated correctly by calculating various elastic constants from
strain and stress and comparing them with the same constants
calculated using strains and energies. Can also be imported as a
module, and used to test advanced calculation methods (QC,
parallel).
Usage: python testStress.py
Expected result: Some elastic constants for Copper and Silver,
followed by the text 'ALL TESTS SUCCEEDED'.
The elastic constants are calculated by straining the crystal in
various modes, and fitting to the energies or the stresses.
Generally, the energies seems to be most sensitive to numerical noise,
and requires a rather large strain interval (1% or above), whereas the
stresses are much less sensitive to this. On the other hand,
unlinearities influence the stress fits for large strain intervals. A
strain interval of [-1%, 1%] is a good compromise, where both methods
work.
C11 and C12 calculated 'directly', i.e. in uniaxial strain, is
sensitive to the strain interval when using the energy to fit C11.
Fitting to the stresses work much better.
C11 and C12 can be calculated in an alternative way using a
volume-conserving deformation and fitting to the energies.
All of the above-mentioned calculations are performed.
"""
import numpy as np
import sys
from asap3 import *
from asap3.testtools import ReportTest
# When not imported as a module, the following are imported later:
#from Setup.Lattice.FCC111Ortho import *
#from Structures.ChemicalElements import Copper, Silver
#import Structures.ChemicalElements.AtomicWeight
#import Structures.ChemicalElements.CrystalStructure
#from Structures.IonDynamics import VelocityVerlet, Langevin
defaultstrains = 0.01 * np.array((-1, -0.75, -0.5, -0.25, 0.0,
0.25, 0.5, 0.75, 1.0))
def polynomialLeastSquaresFit(parameters, data, max_iterations=None,
stopping_limit = 0.0001):
"""Least-square fit to a polynomial.
Least-squares fit to a polynomial whose order is defined by
the number of parameter values.
This is a wrapper function replacing the similar function in
Scientific.Functions.LeastSquares
"""
order = len(parameters)
return (np.polyfit(data[:,0], data[:,1], order)[::-1], None)
def makefits(atoms, strains, indices, shear=0):
"""Do the deformations, and get fits to energies and stresses.
atoms is a list of atoms.
strains are the strains as floating point numbers
indices is a list of indices for the strain components to be used.
For bulk modulus, it will be ((0,0), (1,1), (2,2)) etc.
"""
energies = []
stresses = []
basis = atoms.get_cell()
vol = np.linalg.det(basis)
for epsilon in strains:
if shear:
adjustment = np.zeros((3,3), np.float)
for idx in indices:
adjustment[idx[0]] += idx[2] * epsilon * basis[idx[1]]
atoms.set_cell(adjustment + basis, scale_atoms=True)
else:
scaling = np.ones((3,3), np.float)
for idx in indices:
scaling[idx[0]] += idx[1]*epsilon
atoms.set_cell(scaling * basis, scale_atoms=True)
energy = atoms.get_potential_energy()
stress = atoms.get_stress()
#print ""
#print epsilon, energy/len(atoms)
#print stress
energies.append((epsilon, energy/vol))
stresses.append((epsilon,) + tuple(stress))
atoms.set_cell(basis, scale_atoms=True)
energies = np.array(energies)
stresses = np.array(stresses)
#print "Energies:", energies
energyfit = polynomialLeastSquaresFit((0.0, 0.0, 0.0), energies)
#print "EnergyFit:", energyfit
stressfits = []
for i in range(6):
stressfits.append(polynomialLeastSquaresFit((0.0, 0.0),
np.take(stresses,
(0, i+1), 1)))
return (energyfit, stressfits)
def findlatticeconst(atoms, latticeconstant):
"""Adjust the volume so the atoms have their lowest energy."""
basis = atoms.get_cell()
strains = 0.01 * np.array((-0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1,
0.15, 0.2))
for i in range(5):
(energyfit, stressfit) = makefits(atoms, strains,
(((0,0), 1), ((1,1), 1), ((2,2),1)))
pressurefit = -(np.array(stressfit[0][0]) +
np.array(stressfit[1][0]) +
np.array(stressfit[2][0])) / 3.0
dilation = -pressurefit[0]/pressurefit[1]
print "Optimizing lattice constant:", latticeconstant, "->", latticeconstant*(1+dilation/3)
latticeconstant = latticeconstant*(1+dilation/3)
basis = (1+dilation/3) * basis
atoms.set_cell(basis, scale_atoms=True)
def elasticconstants(atoms, name, bookbulk, bookc11, bookc12, bookc44,
fitfact=1.0, fitfact2=1.0):
"""Check the elastic constants."""
(energyfit, stressfits) = makefits(atoms, defaultstrains,
(((0,0), 1), ((1,1), 1), ((2,2),1)))
bm = 2.0/9.0 * energyfit[0][2] / units.GPa
bms = []
for i in range(3):
bms.append(stressfits[0][0][1] / 3.0 / units.GPa)
avgbms = np.sum(bms)/3.0
print ""
print "Calculation for", name
print " Bulk modulus from energies:", bm
print " Bulk modulus from stresses:", np.sum(bms)/3.0
print " Textbook value:", bookbulk[0]
print ""
ReportTest("Bulk modulus (%s, energies)" % name, bm, bookbulk[1],
0.1*fitfact)
ReportTest("Bulk modulus (%s, pressure)" % name, avgbms, bm, 0.1*fitfact)
ReportTest("Bulk modulus (%s, s_xx)" % name, bms[0], avgbms,
0.00001*avgbms)
ReportTest("Bulk modulus (%s, s_yy)" % name, bms[1], avgbms,
0.00001*avgbms)
ReportTest("Bulk modulus (%s, s_zz)" % name, bms[2], avgbms,
0.00001*avgbms)
(energyfit, stressfits) = makefits(atoms, defaultstrains, (((0,0),1),))
c11 = 2.0 * energyfit[0][2] / units.GPa
c11s = stressfits[0][0][1] / units.GPa
c12s = stressfits[1][0][1] / units.GPa
print ""
print "Calculation for", name
print " C_11 from energies:", c11
print " C_11 from stress:", c11s
print " C_12 from stress:", c12s
print " Bulk modulus from C_11 and C_12:", (c11s + 2 * c12s) / 3.0
print " Textbook values: C_11 = %.1f; C_12 = %.1f" % (bookc11[0],
bookc12[0])
(energyfit, stressfits) = makefits(atoms, defaultstrains,
(((0,0),1), ((1,1), -0.5),
((2,2), -0.5)))
# C11 - C12
c11mc12 = 4.0/3.0 * energyfit[0][2] / units.GPa
# B = (C11 + 2 C12)/3
altc11 = (3*bm + 2*c11mc12) / 3.0
altc12 = (3*bm - c11mc12) / 3.0
print " C_11 from alternative energies:", altc11
print " C_12 from alternative energies:", altc12
print " Bulk modulus from C_11 and C_12:", (altc11 + 2 * altc12) / 3.0
print ""
ReportTest("C11 from stress", c11s, bookc11[1], 0.1*fitfact)
ReportTest("C11 from energies", c11, c11s, 0.1*fitfact)
ReportTest("C12 from stress", c12s, bookc12[1], 0.1*fitfact)
ReportTest("B from C11 and C12", (c11s + 2 * c12s) / 3.0, bookbulk[1],
0.5*fitfact)
ReportTest("C11 from alt. energies", altc11, c11s, 0.5*fitfact)
ReportTest("C12 from alt. energies", altc12, c12s, 0.5*fitfact)
print ""
(energyfit, stressfits) = makefits(atoms, defaultstrains,
(((2,1), (2,2), 1),
((1,2), (1,1), 1)), shear=1)
c44 = 0.5 * energyfit[0][2] / units.GPa
c44s = 0.5 * stressfits[3][0][1] / units.GPa
print ""
print "Calculation for", name
print " C_44 from energies:", c44
print " C_44 from stresses:", c44s
print " Textbook value:", bookc44[0]
print " (please do not expect good agreement for the usual EMT potential)"
print ""
ReportTest("C44 from energies", c44, bookc44[1], 0.1*fitfact)
ReportTest("C44 from stresses", c44s, c44, 0.2*fitfact)
nulls = (("C14", 0), ("C24", 1), ("C34", 2), ("C45", 4), ("C46", 5))
print "Testing that the non-existing elastic constants are indeed zero:"
for name, idx in nulls:
cnull = stressfits[idx][0][1] / units.GPa
#print cnull
ReportTest(name+" from stresses", cnull, 0.0, 0.00001)
print ""
print "Testing other shear modes:"
(energyfit, stressfits) = makefits(atoms, defaultstrains,
(((2,0), (2,2), 1),
((0,2), (0,0), 1)), shear=1)
c44x = 0.5 * energyfit[0][2] / units.GPa
c44sx = 0.5 * stressfits[4][0][1] / units.GPa
ReportTest("C44(alt) from energies", c44x, c44, 0.2*fitfact2)
ReportTest("C44(alt) from stresses", c44sx, c44s, 0.2*fitfact2)
nulls = (("C14", 0), ("C24", 1), ("C34", 2), ("C45", 3), ("C46", 5))
for name, idx in nulls:
cnull = stressfits[idx][0][1] / units.GPa
ReportTest(name+"(alt) from stresses", cnull, 0.0, 0.00001)
(energyfit, stressfits) = makefits(atoms, defaultstrains,
(((1,0), (1,1), 1),
((0,1), (0,0), 1)), shear=1)
c44x = 0.5 * energyfit[0][2] / units.GPa
c44sx = 0.5 * stressfits[5][0][1] / units.GPa
ReportTest("C44(alt) from energies", c44x, c44, 0.1*fitfact)
ReportTest("C44(alt) from stresses", c44sx, c44s, 0.1*fitfact)
nulls = (("C14", 0), ("C24", 1), ("C34", 2), ("C45", 3), ("C46", 4))
for name, idx in nulls:
cnull = stressfits[idx][0][1] / units.GPa
ReportTest(name+"(alt) from stresses", cnull, 0.0, 0.00001)
print ""
book = {}
book['Cu'] = {"bookbulk": (134.3, 133.52),
"bookc11": (168.3, 171.65),
"bookc12": (122.1, 114.82),
"bookc44": (75.7, 88.45)}
book['Copper_Rasmussen'] = {"bookbulk": (134.3, 140.17),
"bookc11": (168.3, 172.75),
"bookc12": (122.1, 124.32),
"bookc44": (75.7, 80.39)}
book['Ag'] = {"bookbulk": (103.8, 99.01),
"bookc11": (124.0, 124.45),
"bookc12": (93.7, 86.27),
"bookc44": (46.12, 53.25)}
if __name__ == "__main__":
print "This it not a test, but a module imported from a few tests."
| auag92/n2dm | Asap-3.8.4/Test/StressModule.py | Python | mit | 10,790 |
import os
import dill as pickle
# import pickle
from collections import namedtuple
Checkpoint = namedtuple('Checkpoint', 'u0 V v lss G_lss g_lss J G_dil g_dil')
def save_checkpoint(checkpoint_path, cp):
'''
save a checkpoint file under the path checkpoint_path,
naming convention is mXX_segmentYYY, where XX and YY are given by cp.lss
'''
filename = 'm{0}_segment{1}'.format(cp.lss.m_modes(), cp.lss.K_segments())
with open(os.path.join(checkpoint_path, filename), 'wb') as f:
pickle.dump(cp, f)
def load_checkpoint(checkpoint_file):
return pickle.load(open(checkpoint_file, 'rb'))
def verify_checkpoint(checkpoint):
u0, V, v, lss, G_lss, g_lss, J_hist, G_dil, g_dil = checkpoint
return lss.K_segments() == len(G_lss) \
== len(g_lss) \
== len(J_hist) \
== len(G_dil) \
== len(g_dil)
def load_last_checkpoint(checkpoint_path, m):
'''
load checkpoint in path checkpoint_path, with file name mXX_segmentYYY,
where XX matches the given m, and YY is the largest
'''
def m_modes(filename):
try:
m, _ = filename.split('_segment')
assert m.startswith('m')
return int(m[1:])
except:
return None
def segments(filename):
try:
_, segments = filename.split('_segment')
return int(segments)
except:
return None
files = filter(lambda f : m_modes(f) == m and segments(f) is not None,
os.listdir(checkpoint_path))
files = sorted(files, key=segments)
if len(files):
return load_checkpoint(os.path.join(checkpoint_path, files[-1]))
| qiqi/fds | fds/checkpoint.py | Python | gpl-3.0 | 1,755 |
#!/usr/bin/env python
# Imports
import unittest
import os
import sys
import tempfile
import shutil
import filecmp
PATH_TO_DATA = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
sys.path = [os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')] + sys.path
from enrichm.generate import GenerateModel
###############################################################################
class Tests(unittest.TestCase):
ml_data = 'ml_data'
sample_matrix = 'matrix.tsv'
sample_metadata = 'metadata.tsv'
sample_generate = 'generate_example'
sample_matrix_path = os.path.join(PATH_TO_DATA, ml_data, sample_matrix)
sample_metadata_path = os.path.join(PATH_TO_DATA, ml_data, sample_metadata)
sample_generate_path = os.path.join(PATH_TO_DATA, ml_data, sample_generate)
def test_hello_generate(self):
tmp = tempfile.mkdtemp()
generate_model = GenerateModel()
generate_model.generate_pipeline(self.sample_matrix_path,
self.sample_metadata_path,
generate_model.classifier,
0.2, # Default testing portion
False, # Dont do a grid search for fine tuning
2, # Threads
tmp # Output directory
)
expected_files = sorted(os.listdir(tmp))
observed_files = sorted(os.listdir(self.sample_generate_path))
self.assertEqual(len(expected_files), len(observed_files))
for expected_file, observed_file in zip(expected_files, observed_files):
#expected_file_path = os.path.join(tmp, expected_file)
#observed_file_path = os.path.join(self.sample_generate_path, observed_file)
# Are all files present?
self.assertEqual(expected_file, observed_file)
shutil.rmtree(tmp)
# Note I chose not to match files exactly here. They change because ml models are
# estimations and will be different every time you make them. More tests that ensure the
# internal functions of generate will be implemented as an alternative.
if __name__ == "__main__":
unittest.main()
| geronimp/enrichM | test/test_generate.py | Python | gpl-3.0 | 2,310 |
import asyncore
import logging
from dbgpproxy.dispatcher import RegistrationServer, DebugConnectionServer
__author__ = 'gkralik'
class Proxy:
def __init__(self, idehost=None, ideport=None, dbghost=None, dbgport=None):
"""
Initialize the Proxy manager.
Sets up the RegistrationServer and DebugConnectionServer instances.
@param idehost: The host to listen on for IDE requests.
@param ideport: The port to listen on for IDE requests.
@param dbghost: The host to listen on for debugger engine requests.
@param dbgport: The port to listen on for debugger engine requests.
"""
self.logger = logging.getLogger('dbgpproxy')
self._servers = {}
self._registration_server = RegistrationServer(idehost, ideport, dbghost, dbgport, proxy_manager=self)
self._debugger_connection_server = DebugConnectionServer(dbghost, dbgport, proxy_manager=self)
@staticmethod
def start():
"""
Start the asyncore loop.
"""
asyncore.loop()
@staticmethod
def stop():
"""
Close all sockets handled by asyncore.
"""
asyncore.close_all()
def add_server(self, idekey, host, port, multi):
"""
Add a server (IDE) to the list of known servers.
@param idekey: The IDEKEY identifying the server.
@param host: The host of the IDE process.
@param port: The port of the IDE process.
@param multi: Not used.
@return: The IDEKEY or None if IDEKEY is already registered.
"""
if idekey in self._servers:
return None
self.logger.debug('add_server: idekey = {}, host = {}, port = {}, multi = {}'.format(idekey, host, port, multi))
self._servers[idekey] = [[host, port], multi]
return idekey
def remove_server(self, idekey):
"""
Remove a server (IDE) from the list of known servers.
@param idekey: The IDEKEY identifying the server.
@return: The IDEKEY or None if the server is not registered.
"""
if idekey in self._servers:
self.logger.debug('remove_server: idekey = {}'.format(idekey))
del self._servers[idekey]
return idekey
return None
def get_server(self, idekey):
"""
Get a server by its IDEKEY.
@param idekey: The IDEKEY identifying the server.
@return: The IDEKEY or None if the server is not registered.
"""
if idekey in self._servers:
return self._servers[idekey]
return None
| gkralik/dbgpproxy | dbgpproxy/proxy.py | Python | mit | 2,611 |
# -*- coding: utf-8 -*-
#
# Minimum amount of settings to run the googlytics test suite
#
# googlytics options are often overriden during tests
GOOGLE_ANALYTICS_KEY = 'U-TEST-XXX'
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'googlytics_test.sqlite3'
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'googlytics',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'googlytics.context_processors.googlytics',
)
| rikpg/django-googlytics | test_settings.py | Python | bsd-3-clause | 510 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Hate Crime Table 1."""
import os
import sys
import unittest
import tempfile
import json
import pandas as pd
from . import preprocess
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_SCRIPT_PATH, '..')) # for utils
import utils
_YEAR_INDEX = 0
_OUTPUT_COLUMNS = ['Year', 'StatVar', 'Quantity']
class HateCrimeTable1Test(unittest.TestCase):
def test_csv(self):
csv_files = []
test_config = {
'type': 'xls',
'path': 'testdata/2019.xls',
'args': {
'header': 3,
'skipfooter': 3
}
}
with tempfile.TemporaryDirectory() as tmp_dir:
xls_file_path = os.path.join(_SCRIPT_PATH, test_config['path'])
csv_file_path = os.path.join(tmp_dir, '2019.csv')
read_file = pd.read_excel(xls_file_path, **test_config['args'])
read_file = preprocess._clean_dataframe(read_file)
read_file.insert(_YEAR_INDEX, 'Year', '2019')
read_file.to_csv(csv_file_path, index=None, header=True)
csv_files.append(csv_file_path)
config_path = os.path.join(_SCRIPT_PATH, 'config.json')
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
cleaned_csv_path = os.path.join(tmp_dir, 'cleaned.csv')
utils.create_csv_mcf(csv_files, cleaned_csv_path, config,
_OUTPUT_COLUMNS, preprocess._write_output_csv)
with open(cleaned_csv_path, 'r', encoding='utf-8') as f_result:
test_result = f_result.read()
expected_csv_path = os.path.join(_SCRIPT_PATH, 'testdata',
'expected.csv')
with open(expected_csv_path, 'r',
encoding='utf-8') as f_expected:
expected_result = f_expected.read()
self.assertEqual(test_result, expected_result)
| datacommonsorg/data | scripts/fbi/hate_crime/table1/preprocess_test.py | Python | apache-2.0 | 2,606 |
from .blueprint import Kibble
from .auth import Authenticator, GAEAuthenticator
from .list import List
from .edit import Edit, Create
from .operation import Operation
from .delete import Delete
from .base import KibbleView
from .util.forms import BaseCSRFForm
from . import query_composers
from . import query_filters
from . import polymodel
| xlevus/flask-kibble | flask_kibble/__init__.py | Python | bsd-3-clause | 347 |
__author__ = 'hydezhang'
from oslo.config import cfg
from glanceclient import exc
from tests.flow.test_base import TestBase
from flow.imagetask import ImageMigrationTask
from utils.db_handlers import images
# testing inputs
owner_target_id = '2ddc4b6528f24b039cf4ec093ae8a214'
image_name = "public_image_on_source_cloud"
class ImageTaskTest(TestBase):
"""Unit test for Tenant migration"""
def __init__(self, *args, **kwargs):
super(ImageTaskTest, self).__init__(*args, **kwargs)
self.migration_task = ImageMigrationTask('image_migration_task')
self.s_cloud_name = cfg.CONF.SOURCE.os_cloud_name
self.t_cloud_name = cfg.CONF.TARGET.os_cloud_name
self.image_id = 'cce60962-c008-46ed-919d-b003f7c78ea2'
def create_images(self):
image_to_migrate = self.migration_task.gl_source.images.create(
name=image_name,
disk_format='qcow2',
container_format='bare',
is_public=True,
location='http://cloud-images.ubuntu.com/releases/12.04.2/release/'
'ubuntu-12.04-server-cloudimg-amd64-disk1.img')
self.image_id = image_to_migrate.id
return image_to_migrate
def test_check_image_migrated(self):
# migrate an image, manually check if it is correctly migrated
image_to_migrate = self.create_images()
image = self.migration_task.gl_source.images.get(self.image_id)
self.migration_task.migrate_one_image(image, owner_target_id)
# get the image data that has been migrated from src to dst
filters = {"src_image_name": image_name,
"src_uuid": image_to_migrate.id,
"src_cloud": self.s_cloud_name,
"dst_cloud": self.t_cloud_name}
image_migration_record = images.get_migrated_image(filters)
if not image_migration_record:
self.assertTrue(False,
"No migration detail recorded "
"for image '%s'" % image_name)
m_image = image_migration_record[0] \
if image_migration_record else None
dest_id = m_image['dst_uuid']
dest_image = self.migration_task.gl_target.images.get(dest_id)
self.assertEqual(image_to_migrate.name, dest_image.name)
self.assertEqual(image_to_migrate.disk_format,
dest_image.disk_format)
self.assertEqual(image_to_migrate.container_format,
dest_image.container_format)
self.assertEqual(image_to_migrate.is_public, dest_image.is_public)
# self.clean_up(image_to_migrate, dest_image)
# test check_image_migrated
result = self.migration_task.check_image_migrated(image)
print result
self.assertTrue(result)
self.clean_up(image_to_migrate, dest_image)
def test_get_image(self):
image_to_migrate = self.create_images()
print self.migration_task.get_image(self.image_id)
self.clean_up(image_to_migrate)
def test_upload_image(self):
image_to_migrate = self.create_images()
image_meta = self.migration_task.gl_source.images.get(self.image_id)
image_data = self.migration_task.get_image(image_meta.id)
dest_image = self.migration_task.upload_image(image_meta,
image_data, owner_target_id)
self.assertEqual(image_to_migrate.name, dest_image.name)
self.assertEqual(image_to_migrate.disk_format,
dest_image.disk_format)
self.assertEqual(image_to_migrate.container_format,
dest_image.container_format)
self.assertEqual(image_to_migrate.is_public, dest_image.is_public)
self.clean_up(image_to_migrate, dest_image)
def test_get_and_upload_img(self):
image_to_migrate = self.create_images()
image_meta = self.migration_task.gl_source.images.get(self.image_id)
self.migration_task.get_and_upload_img(image_meta, owner_target_id)
# get the image data that has been migrated from src to dst
filters = {"src_image_name": image_name,
"src_uuid": image_to_migrate.id,
"src_cloud": self.s_cloud_name,
"dst_cloud": self.t_cloud_name}
image_migration_record = images.get_migrated_image(filters)
if not image_migration_record:
self.assertTrue(False,
"No migration detail recorded "
"for image '%s'" % image_name)
m_image = image_migration_record[0] \
if image_migration_record else None
dest_id = m_image['dst_uuid']
dest_image = self.migration_task.gl_target.images.get(dest_id)
self.assertEqual(image_to_migrate.name, dest_image.name)
self.assertEqual(image_to_migrate.disk_format,
dest_image.disk_format)
self.assertEqual(image_to_migrate.container_format,
dest_image.container_format)
self.assertEqual(image_to_migrate.is_public, dest_image.is_public)
self.clean_up(image_to_migrate, dest_image)
def test_migrate_one_image(self):
image_to_migrate = self.create_images()
image = self.migration_task.gl_source.images.get(self.image_id)
self.migration_task.migrate_one_image(image, owner_target_id)
# get the image data that has been migrated from src to dst
filters = {"src_image_name": image_name,
"src_uuid": image_to_migrate.id,
"src_cloud": self.s_cloud_name,
"dst_cloud": self.t_cloud_name}
image_migration_record = images.get_migrated_image(filters)
if not image_migration_record:
self.assertTrue(False,
"No migration detail recorded "
"for image '%s'" % image_name)
m_image = image_migration_record[0] \
if image_migration_record else None
dest_id = m_image['dst_uuid']
dest_image = self.migration_task.gl_target.images.get(dest_id)
self.assertEqual(image_to_migrate.name, dest_image.name)
self.assertEqual(image_to_migrate.disk_format,
dest_image.disk_format)
self.assertEqual(image_to_migrate.container_format,
dest_image.container_format)
self.assertEqual(image_to_migrate.is_public, dest_image.is_public)
self.clean_up(image_to_migrate, dest_image)
def test_execute(self):
image_to_migrate = self.create_images()
dest_image = None
try:
self.migration_task.execute(
images_to_migrate=[image_to_migrate.id], tenant_to_process=None)
# get the image data that has been migrated from src to dst
filters = {"src_image_name": image_name,
"src_uuid": image_to_migrate.id,
"src_cloud": self.s_cloud_name,
"dst_cloud": self.t_cloud_name}
image_migration_record = images.get_migrated_image(filters)
if not image_migration_record:
self.assertTrue(False,
"No migration detail recorded "
"for image '%s'" % image_name)
m_image = image_migration_record[0] \
if image_migration_record else None
dest_id = m_image['dst_uuid']
dest_image = self.migration_task.gl_target.images.get(dest_id)
self.assertEqual(image_to_migrate.name, dest_image.name)
self.assertEqual(image_to_migrate.disk_format,
dest_image.disk_format)
self.assertEqual(image_to_migrate.container_format,
dest_image.container_format)
self.assertEqual(image_to_migrate.is_public,
dest_image.is_public)
image = self.migration_task.gl_source.images.get(self.image_id)
result = self.migration_task.check_image_migrated(image)
print result
self.assertTrue(result)
except exc.HTTPNotFound as e:
self.assertTrue(False, e.message)
except Exception as e:
self.assertTrue(False, e.message)
finally:
print 'finished'
self.clean_up(image_to_migrate, dest_image)
def clean_up(self, image_to_migrate, migrated_image=None):
self.migration_task.gl_source.images.delete(image_to_migrate)
# clean database
filter_values = [image_to_migrate.name,
image_to_migrate.id,
image_to_migrate.owner,
cfg.CONF.SOURCE.os_cloud_name,
cfg.CONF.TARGET.os_cloud_name]
images.delete_migration_record(filter_values)
if migrated_image:
self.migration_task.gl_target.images.delete(migrated_image)
| Phoenix1708/OpenAcademy_OpenStack_Flyway | flyway/tests/flow/test_imagetask.py | Python | apache-2.0 | 9,130 |
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
class AuthenticationError(LookupError):
'''Error raised when a user fails to authenticate a User.'''
INVALID_CREDENTIALS = 'The credentials you provided are invalid.'
MISSING_FIELDS = 'One or more of the fields is blank.'
INACTIVE_ACCOUNT = 'Your account is inactive.'
def __init__(self, message):
super(AuthenticationError, self).__init__(message)
@classmethod
def invalid_credentials(cls):
return cls(cls.INVALID_CREDENTIALS)
@classmethod
def missing_fields(cls):
return cls(cls.MISSING_FIELDS)
@classmethod
def inactive_account(cls):
return cls(cls.INACTIVE_ACCOUNT)
class PasswordChangeFailed(Exception):
'''Error raised when a user fails to change their password.'''
MISSING_FIELDS = 'One or more fields is blank.'
PASSWORD_TOO_SHORT = 'New password must be at least 6 characters long.'
INVALID_CHARACTERS = 'New password can only consist of alphanumeric characters and symbols (above numbers).'
INCORRECT_PASSWORD = 'Incorrect password.'
def __init__(self, message):
super(PasswordChangeFailed, self).__init__(message)
@classmethod
def missing_fields(cls):
return cls(cls.MISSING_FIELDS)
@classmethod
def password_too_short(cls):
return cls(cls.PASSWORD_TOO_SHORT)
@classmethod
def invalid_characters(cls):
return cls(cls.INVALID_CHARACTERS)
@classmethod
def incorrect_password(cls):
return cls(cls.INCORRECT_PASSWORD)
| ctmunwebmaster/huxley | huxley/accounts/exceptions.py | Python | bsd-3-clause | 1,659 |
from _external import *
xml = LibChecker('xml2')
| tuttleofx/sconsProject | autoconf/xml.py | Python | mit | 50 |
# -*- coding: utf-8 -*-
"""Resources that don't change over time for PyBEL."""
SPECIES_PATTERN = r"^\d+$"
CONFIDENCE_URL = "https://arty.scai.fraunhofer.de/artifactory/bel/annotation/confidence/confidence-1.0.0.belanno"
| pybel/pybel | src/pybel/resources/constants.py | Python | mit | 222 |
#!/usr/bin/env python3
from ietf.utility.environment import (get_db_session, get_editor, get_file,
get_pager)
from ietf.utility.query_doc import query_std
from ietf.utility.query_is_also import query_std_is_also
from subprocess import run
import sys
def get_docs(args):
"""Get documents from the passed list and display them."""
DbSession = get_db_session()
numbers = sort_preserve_order(args.number) # Remove duplicate arguments
docs = []
dne = []
if args.is_also:
for number in numbers:
std = query_std(DbSession, number)
if std is None:
dne.append("STD {} does not exist.".format(number))
else:
aliases = query_std_is_also(DbSession, number)
docs.extend(aliases)
else:
for number in numbers:
rfc = query_std(DbSession, number)
if rfc is not None:
docs.append(rfc)
else:
dne.append("STD {} does not exist.".format(number))
# Display found documents
show_docs(sort_preserve_order(docs), args.editor, args.pager)
# Display messages about nonexistent documents
for msg in dne:
print(msg)
# Exit successfully
sys.exit(0)
def sort_preserve_order(sequence):
"""Return a set with the original order of elements preserved.
credit: https://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set() # Create an empty set
seen_add = seen.add # Resolve once instead of per-iteration
return [x for x in sequence if not (x in seen or seen_add(x))]
def show_docs(docs, edit, page):
"""Display the passed documents."""
# Get the command to run (if any)
if edit:
cmd = get_editor()
elif page:
cmd = get_pager()
# Run `cmd` on the passed docs if `cmd` exists.
if 'cmd' in vars():
added_to_cmd = False # To see if any files actually exist
for doc in docs:
file_path = get_file(doc) # Get the doc's plaintext file
if file_path:
added_to_cmd = True # We have a reason to run `cmd`
cmd.append(file_path) # Add the path as an argument to `cmd`
if added_to_cmd:
run(cmd) # Block while running external process
# Otherwise print to stdout
else:
for doc in docs:
print(doc)
print() # newline
def add_subparser(parent_parser):
"""Create the parser for the `std` subcommand."""
parser = parent_parser.add_parser(
'std',
help='view information about STDs',
)
# Add mutually exclusive group for pager and editor
view_group = parser.add_mutually_exclusive_group()
view_group.add_argument(
'-e', '--editor',
action='store_true',
help='open files in $EDITOR',
)
view_group.add_argument(
'-p', '--pager',
action='store_true',
help='open files in $PAGER',
)
# Add option for looking up aliases
parser.add_argument(
'-i', '--is_also',
action='store_true',
help='lookup documents that are aliases for the specified STDs',
)
# Add STD number as a required argument
parser.add_argument(
'number',
type=int,
nargs='+', # 1 or more arguments
help='STD ID number',
)
# Pass arguments to `collect_ids()`
parser.set_defaults(func=get_docs)
| lafrenierejm/ietf-cli | ietf/cmd/std.py | Python | isc | 3,474 |
# N A large safe prime (N = 2q+1, where q is prime)
# All arithmetic is done modulo N.
# g A generator modulo N
# k Multiplier parameter (k = H(N, g) in SRP-6a, k = 3 for legacy SRP-6)
# s User's salt
# I Username
# p Cleartext Password
# H() One-way hash function
# ^ (Modular) Exponentiation
# u Random scrambling parameter
# a,b Secret ephemeral values
# A,B Public ephemeral values
# x Private key (derived from p and s)
# v Password verifier
import os
import sys
import hashlib
import random
import ctypes
import time
SHA1 = 0
SHA224 = 1
SHA256 = 2
SHA384 = 3
SHA512 = 4
NG_1024 = 0
NG_2048 = 1
NG_4096 = 2
NG_8192 = 3
NG_CUSTOM = 4
NG_1536 = 5
_hash_map = { SHA1 : hashlib.sha1,
SHA224 : hashlib.sha224,
SHA256 : hashlib.sha256,
SHA384 : hashlib.sha384,
SHA512 : hashlib.sha512 }
_ng_const = (
# 1024-bit
('''\
EEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496\
EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8E\
F4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA\
9AFD5138FE8376435B9FC61D2FC0EB06E3''',
"2"),
# 2048
('''\
AC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4\
A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF60\
95179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF\
747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B907\
8717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB37861\
60279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DB\
FBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73''',
"2"),
# 4096
('''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26\
99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB\
04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2\
233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127\
D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199\
FFFFFFFFFFFFFFFF''',
"5"),
# 8192
('''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26\
99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB\
04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2\
233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127\
D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492\
36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406\
AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918\
DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151\
2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03\
F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F\
BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA\
CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B\
B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632\
387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E\
6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA\
3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C\
5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9\
22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC886\
2F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A6\
6D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC5\
0846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268\
359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6\
FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E71\
60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
'13'),
# 1536-bit
('''\
9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA961\
4B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F843\
80B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0B\
E3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF5\
6EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734A\
F7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E\
8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB''',
"2"),
)
#N_HEX = "AC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73"
#G_HEX = "2"
#HNxorg = None
dlls = list()
if 'win' in sys.platform:
for d in ('libeay32.dll', 'libssl32.dll', 'ssleay32.dll'):
try:
dlls.append( ctypes.cdll.LoadLibrary(d) )
except:
pass
else:
dlls.append( ctypes.cdll.LoadLibrary('libssl.so') )
class BIGNUM_Struct (ctypes.Structure):
_fields_ = [ ("d", ctypes.c_void_p),
("top", ctypes.c_int),
("dmax", ctypes.c_int),
("neg", ctypes.c_int),
("flags", ctypes.c_int) ]
class BN_CTX_Struct (ctypes.Structure):
_fields_ = [ ("_", ctypes.c_byte) ]
BIGNUM = ctypes.POINTER( BIGNUM_Struct )
BN_CTX = ctypes.POINTER( BN_CTX_Struct )
def load_func( name, args, returns = ctypes.c_int):
d = sys.modules[ __name__ ].__dict__
f = None
for dll in dlls:
try:
f = getattr(dll, name)
f.argtypes = args
f.restype = returns
d[ name ] = f
return
except:
pass
raise ImportError('Unable to load required functions from SSL dlls')
load_func( 'BN_new', [], BIGNUM )
load_func( 'BN_free', [ BIGNUM ], None )
load_func( 'BN_init', [ BIGNUM ], None )
load_func( 'BN_clear', [ BIGNUM ], None )
load_func( 'BN_CTX_new', [] , BN_CTX )
load_func( 'BN_CTX_init', [ BN_CTX ], None )
load_func( 'BN_CTX_free', [ BN_CTX ], None )
load_func( 'BN_cmp', [ BIGNUM, BIGNUM ], ctypes.c_int )
load_func( 'BN_num_bits', [ BIGNUM ], ctypes.c_int )
load_func( 'BN_add', [ BIGNUM, BIGNUM, BIGNUM ] )
load_func( 'BN_sub', [ BIGNUM, BIGNUM, BIGNUM ] )
load_func( 'BN_mul', [ BIGNUM, BIGNUM, BIGNUM, BN_CTX ] )
load_func( 'BN_div', [ BIGNUM, BIGNUM, BIGNUM, BIGNUM, BN_CTX ] )
load_func( 'BN_mod_add', [ BIGNUM, BIGNUM, BIGNUM, BIGNUM, BN_CTX ] )
load_func( 'BN_mod_mul', [ BIGNUM, BIGNUM, BIGNUM, BIGNUM, BN_CTX ] )
load_func( 'BN_mod_exp', [ BIGNUM, BIGNUM, BIGNUM, BIGNUM, BN_CTX ] )
load_func( 'BN_rand', [ BIGNUM, ctypes.c_int, ctypes.c_int, ctypes.c_int ] )
load_func( 'BN_bn2bin', [ BIGNUM, ctypes.c_char_p ] )
load_func( 'BN_bin2bn', [ ctypes.c_char_p, ctypes.c_int, BIGNUM ], BIGNUM )
load_func( 'BN_hex2bn', [ ctypes.POINTER(BIGNUM), ctypes.c_char_p ] )
load_func( 'BN_bn2hex', [ BIGNUM ], ctypes.c_char_p )
load_func( 'CRYPTO_free', [ ctypes.c_char_p ] )
load_func( 'RAND_seed', [ ctypes.c_char_p, ctypes.c_int ] )
def BN_num_bytes(a):
return ((BN_num_bits(a)+7)/8)
def BN_mod(rem,m,d,ctx):
return BN_div(None, rem, m, d, ctx)
def BN_is_zero( n ):
return n[0].top == 0
def bn_to_bytes( n ):
b = ctypes.create_string_buffer( BN_num_bytes(n) )
BN_bn2bin(n, b)
return b.raw
def bytes_to_bn( dest_bn, bytes ):
BN_bin2bn(bytes, len(bytes), dest_bn)
def H_str( hash_class, dest_bn, s ):
d = hash_class(s).digest()
buff = ctypes.create_string_buffer( s )
BN_bin2bn(d, len(d), dest)
def H_bn( hash_class, dest, n ):
bin = ctypes.create_string_buffer( BN_num_bytes(n) )
BN_bn2bin(n, bin)
d = hash_class( bin.raw ).digest()
BN_bin2bn(d, len(d), dest)
def H_bn_bn_orig( hash_class, dest, n1, n2 ):
h = hash_class()
bin1 = ctypes.create_string_buffer( BN_num_bytes(n1) )
bin2 = ctypes.create_string_buffer( BN_num_bytes(n2) )
BN_bn2bin(n1, bin1)
BN_bn2bin(n2, bin2)
h.update( bin1.raw )
h.update( bin2.raw )
d = h.digest()
BN_bin2bn(d, len(d), dest)
def H_bn_bn_rfc5054( hash_class, dest, N, n1, n2 ):
h = hash_class()
len_N = BN_num_bytes(N)
len_n1 = BN_num_bytes(n1)
len_n2 = BN_num_bytes(n2)
head = '\0'*(len_N-len_n1)
middle = '\0'*(len_N-len_n2)
bin1 = ctypes.create_string_buffer( len_n1 )
bin2 = ctypes.create_string_buffer( len_n2 )
BN_bn2bin(n1, bin1)
BN_bn2bin(n2, bin2)
h.update( head )
h.update( bin1.raw )
h.update( middle )
h.update( bin2.raw )
d = h.digest()
BN_bin2bn(d, len(d), dest)
def H_bn_str( hash_class, dest, n, s ):
h = hash_class()
bin = ctypes.create_string_buffer( BN_num_bytes(n) )
BN_bn2bin(n, bin)
h.update( bin.raw )
h.update( s )
d = h.digest()
BN_bin2bn(d, len(d), dest)
def calculate_x( hash_class, dest, salt, username, password ):
up = hash_class('%s:%s' % (username, password )).digest()
H_bn_str( hash_class, dest, salt, up )
def update_hash( ctx, n ):
buff = ctypes.create_string_buffer( BN_num_bytes(n) )
BN_bn2bin(n, buff)
ctx.update( buff.raw )
def calculate_M( hash_class, N, g, I, s, A, B, K ):
h = hash_class()
h.update( HNxorg( hash_class, N, g ) )
h.update( hash_class(I).digest() )
update_hash( h, s )
update_hash( h, A )
update_hash( h, B )
h.update( K )
return h.digest()
def calculate_H_AMK( hash_class, A, M, K ):
h = hash_class()
update_hash( h, A )
h.update( M )
h.update( K )
return h.digest()
def HNxorg( hash_class, N, g ):
bN = ctypes.create_string_buffer( BN_num_bytes(N) )
bg = ctypes.create_string_buffer( BN_num_bytes(g) )
BN_bn2bin(N, bN)
BN_bn2bin(g, bg)
hN = hash_class( bN.raw ).digest()
hg = hash_class( bg.raw ).digest()
return ''.join( chr( ord(hN[i]) ^ ord(hg[i]) ) for i in range(0,len(hN)) )
def get_ngk( hash_class, ng_type, n_hex, g_hex, rfc5054=False ):
if ng_type < NG_CUSTOM:
n_hex, g_hex = _ng_const[ ng_type if ng_type < NG_CUSTOM else ng_type - 1 ]
N = BN_new()
g = BN_new()
k = BN_new()
BN_hex2bn( N, n_hex )
BN_hex2bn( g, g_hex )
if rfc5054:
H_bn_bn_rfc5054(hash_class, k, N, N, g)
else:
H_bn_bn_orig(hash_class, k, N, g)
return N, g, k
def create_salted_verification_key( username, password, hash_alg=SHA1, ng_type=NG_2048, n_hex=None, g_hex=None ):
if ng_type == NG_CUSTOM and (n_hex is None or g_hex is None):
raise ValueError("Both n_hex and g_hex are required when ng_type = NG_CUSTOM")
s = BN_new()
v = BN_new()
x = BN_new()
ctx = BN_CTX_new()
hash_class = _hash_map[ hash_alg ]
N,g,k = get_ngk( hash_class, ng_type, n_hex, g_hex )
BN_rand(s, 32, -1, 0);
calculate_x( hash_class, x, s, username, password )
BN_mod_exp(v, g, x, N, ctx)
salt = bn_to_bytes( s )
verifier = bn_to_bytes( v )
BN_free(s)
BN_free(v)
BN_free(x)
BN_free(N)
BN_free(g)
BN_free(k)
BN_CTX_free(ctx)
return salt, verifier
class Verifier (object):
def __init__(self, username, bytes_s, bytes_v, bytes_A, hash_alg=SHA1, ng_type=NG_2048, n_hex=None, g_hex=None,
rfc5054_compat=False):
if ng_type == NG_CUSTOM and (n_hex is None or g_hex is None):
raise ValueError("Both n_hex and g_hex are required when ng_type = NG_CUSTOM")
self.A = BN_new()
self.B = BN_new()
self.K = None
self.S = BN_new()
self.u = BN_new()
self.b = BN_new()
self.s = BN_new()
self.v = BN_new()
self.tmp1 = BN_new()
self.tmp2 = BN_new()
self.ctx = BN_CTX_new()
self.I = username
self.M = None
self.H_AMK = None
self._authenticated = False
self.rfc5054 = rfc5054_compat
self.safety_failed = False
hash_class = _hash_map[ hash_alg ]
N,g,k = get_ngk( hash_class, ng_type, n_hex, g_hex, rfc5054_compat )
self.hash_class = hash_class
self.N = N
self.g = g
self.k = k
bytes_to_bn( self.s, bytes_s )
bytes_to_bn( self.v, bytes_v )
bytes_to_bn( self.A, bytes_A )
# SRP-6a safety check
BN_mod(self.tmp1, self.A, N, self.ctx)
if BN_is_zero(self.tmp1):
self.safety_failed = True
else:
BN_rand(self.b, 256, -1, 0)
# B = kv + g^b
if rfc5054_compat:
BN_mod_mul(self.tmp1, k, self.v, N, self.ctx)
BN_mod_exp(self.tmp2, g, self.b, N, self.ctx)
BN_mod_add(self.B, self.tmp1, self.tmp2, N, self.ctx)
H_bn_bn_rfc5054(hash_class, self.u, self.N, self.A, self.B)
else:
BN_mul(self.tmp1, k, self.v, self.ctx)
BN_mod_exp(self.tmp2, g, self.b, N, self.ctx)
BN_add(self.B, self.tmp1, self.tmp2)
H_bn_bn_orig(hash_class, self.u, self.A, self.B)
# S = (A *(v^u)) ^ b
BN_mod_exp(self.tmp1, self.v, self.u, N, self.ctx)
BN_mul(self.tmp2, self.A, self.tmp1, self.ctx)
BN_mod_exp(self.S, self.tmp2, self.b, N, self.ctx)
self.K = hash_class( bn_to_bytes(self.S) ).digest()
self.M = calculate_M( hash_class, N, g, self.I, self.s, self.A, self.B, self.K )
self.H_AMK = calculate_H_AMK( hash_class, self.A, self.M, self.K )
def __del__(self):
if not hasattr(self, 'A'):
return # __init__ threw exception. no clean up required
BN_free(self.A)
BN_free(self.B)
BN_free(self.S)
BN_free(self.u)
BN_free(self.b)
BN_free(self.s)
BN_free(self.v)
BN_free(self.N)
BN_free(self.g)
BN_free(self.k)
BN_free(self.tmp1)
BN_free(self.tmp2)
BN_CTX_free(self.ctx)
def authenticated(self):
return self._authenticated
def get_username(self):
return self.I
def get_session_key(self):
return self.K if self._authenticated else None
# returns (bytes_s, bytes_B) on success, (None,None) if SRP-6a safety check fails
def get_challenge(self):
if self.safety_failed:
return None, None
else:
return (bn_to_bytes(self.s), bn_to_bytes(self.B))
def verify_session(self, user_M):
if user_M == self.M:
self._authenticated = True
return self.H_AMK
class User (object):
def __init__(self, username, password, hash_alg=SHA1, ng_type=NG_2048, n_hex=None, g_hex=None,
rfc5054_compat=False):
if ng_type == NG_CUSTOM and (n_hex is None or g_hex is None):
raise ValueError("Both n_hex and g_hex are required when ng_type = NG_CUSTOM")
self.username = username
self.password = password
self.a = BN_new()
self.A = BN_new()
self.B = BN_new()
self.s = BN_new()
self.S = BN_new()
self.u = BN_new()
self.x = BN_new()
self.v = BN_new()
self.tmp1 = BN_new()
self.tmp2 = BN_new()
self.tmp3 = BN_new()
self.ctx = BN_CTX_new()
self.M = None
self.K = None
self.H_AMK = None
self._authenticated = False
self.rfc5054 = rfc5054_compat
hash_class = _hash_map[ hash_alg ]
N,g,k = get_ngk( hash_class, ng_type, n_hex, g_hex, rfc5054_compat )
self.hash_class = hash_class
self.N = N
self.g = g
self.k = k
BN_rand(self.a, 256, -1, 0)
BN_mod_exp(self.A, g, self.a, N, self.ctx)
def __del__(self):
if not hasattr(self, 'a'):
return # __init__ threw exception. no clean up required
BN_free(self.a)
BN_free(self.A)
BN_free(self.B)
BN_free(self.s)
BN_free(self.S)
BN_free(self.u)
BN_free(self.x)
BN_free(self.v)
BN_free(self.N)
BN_free(self.g)
BN_free(self.k)
BN_free(self.tmp1)
BN_free(self.tmp2)
BN_free(self.tmp3)
BN_CTX_free(self.ctx)
def authenticated(self):
return self._authenticated
def get_username(self):
return self.username
def get_session_key(self):
return self.K if self._authenticated else None
def start_authentication(self):
return (self.username, bn_to_bytes(self.A))
# Returns M or None if SRP-6a safety check is violated
def process_challenge(self, bytes_s, bytes_B):
hash_class = self.hash_class
N = self.N
g = self.g
k = self.k
bytes_to_bn( self.s, bytes_s )
bytes_to_bn( self.B, bytes_B )
# SRP-6a safety check
if BN_is_zero(self.B):
return None
if self.rfc5054:
H_bn_bn_rfc5054(hash_class, self.u, self.N, self.A, self.B)
else:
H_bn_bn_orig(hash_class, self.u, self.A, self.B)
# SRP-6a safety check
if BN_is_zero(self.u):
return None
calculate_x( hash_class, self.x, self.s, self.username, self.password )
BN_mod_exp(self.v, g, self.x, N, self.ctx)
# S = (B - k*(g^x)) ^ (a + ux)
BN_mul(self.tmp1, self.u, self.x, self.ctx)
BN_add(self.tmp2, self.a, self.tmp1) # tmp2 = (a + ux)
BN_mod_exp(self.tmp1, g, self.x, N, self.ctx)
BN_mul(self.tmp3, k, self.tmp1, self.ctx) # tmp3 = k*(g^x)
BN_sub(self.tmp1, self.B, self.tmp3) # tmp1 = (B - K*(g^x))
BN_mod_exp(self.S, self.tmp1, self.tmp2, N, self.ctx)
self.K = hash_class( bn_to_bytes(self.S) ).digest()
self.M = calculate_M( hash_class, N, g, self.username, self.s, self.A, self.B, self.K )
self.H_AMK = calculate_H_AMK( hash_class, self.A, self.M, self.K )
return self.M
def verify_session(self, host_HAMK):
if self.H_AMK == host_HAMK:
self._authenticated = True
#---------------------------------------------------------
# Init
#
RAND_seed( os.urandom(32), 32 )
| arpa2/pysrp-pkcs11 | srp/_ctsrp.py | Python | mit | 20,354 |
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = request.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang = Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api')
| spezifanta/Paste-It | api/v01/views.py | Python | mit | 749 |
from lampost.di.config import config_value
from lampmud.lpmud.archetype import PlayerRace
from lampmud.lpmud import attributes
def first_time_setup(args, db):
root_area = config_value('root_area_id')
room_id = "{0}:0".format(root_area)
imm_name = args.imm_name.lower()
db.create_object('area', {'dbo_id': root_area, 'name': root_area, 'next_room_id': 1})
db.create_object('room', {'dbo_id': room_id, 'title': "Immortal Start Room", 'desc': "A brand new start room for immortals."})
attributes.init()
race_dto = PlayerRace.new_dto()
race_dto.update(config_value('default_player_race'))
race = db.create_object(PlayerRace, race_dto)
supreme_level = config_value('imm_levels')['supreme']
player = db.create_object('player', {'dbo_id': imm_name, 'room_id': room_id, 'race': race.dbo_id, 'home_room': room_id, 'imm_level': supreme_level})
db.save_object(player)
return player
| genzgd/Lampost-Mud | lampmud/lpmud/newsetup.py | Python | mit | 934 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LocationSourceType'
db.create_table('location_locationsourcetype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('icon', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('location', ['LocationSourceType'])
# Adding model 'LocationSource'
db.create_table('location_locationsource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['location.LocationSourceType'])),
('data', self.gf('jsonfield.fields.JSONField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('location', ['LocationSource'])
# Adding model 'LocationSnapshot'
db.create_table('location_locationsnapshot', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('location', self.gf('django.contrib.gis.db.models.fields.PointField')(geography=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='points', null=True, to=orm['location.LocationSource'])),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('location', ['LocationSnapshot'])
def backwards(self, orm):
# Deleting model 'LocationSourceType'
db.delete_table('location_locationsourcetype')
# Deleting model 'LocationSource'
db.delete_table('location_locationsource')
# Deleting model 'LocationSnapshot'
db.delete_table('location_locationsnapshot')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'location.locationsnapshot': {
'Meta': {'object_name': 'LocationSnapshot'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'geography': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'points'", 'null': 'True', 'to': "orm['location.LocationSource']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'location.locationsource': {
'Meta': {'object_name': 'LocationSource'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['location.LocationSourceType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'location.locationsourcetype': {
'Meta': {'object_name': 'LocationSourceType'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['location'] | coddingtonbear/django-location | location/migrations/0001_initial.py | Python | mit | 7,723 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Jeff Weiss <trac@jeffweiss.org>
# Copyright (C) 2006 Andres Salomon <dilinger@athenacr.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os, re, types
from genshi.core import Markup
from trac.core import *
from trac.config import Option
from trac.db.api import IDatabaseConnector, _parse_db_str
from trac.db.util import ConnectionWrapper, IterableCursor
from trac.util import get_pkginfo
from trac.util.compat import close_fds
from trac.util.text import exception_to_unicode, to_unicode
from trac.util.translation import _
_like_escape_re = re.compile(r'([/_%])')
try:
import MySQLdb
import MySQLdb.cursors
has_mysqldb = True
class MySQLUnicodeCursor(MySQLdb.cursors.Cursor):
def _convert_row(self, row):
return tuple([(isinstance(v, str) and [v.decode('utf-8')] or [v])[0]
for v in row])
def fetchone(self):
row = super(MySQLUnicodeCursor, self).fetchone()
return row and self._convert_row(row) or None
def fetchmany(self, num):
rows = super(MySQLUnicodeCursor, self).fetchmany(num)
return rows != None and [self._convert_row(row)
for row in rows] or []
def fetchall(self):
rows = super(MySQLUnicodeCursor, self).fetchall()
return rows != None and [self._convert_row(row)
for row in rows] or []
except ImportError:
has_mysqldb = False
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int64': 'bigint',
}
class MySQLConnector(Component):
"""Database connector for MySQL version 4.1 and greater.
Database URLs should be of the form:
{{{
mysql://user[:password]@host[:port]/database
}}}
"""
implements(IDatabaseConnector)
mysqldump_path = Option('trac', 'mysqldump_path', 'mysqldump',
"""Location of mysqldump for MySQL database backups""")
def __init__(self):
self._version = None
self.error = None
def get_supported_schemes(self):
if not has_mysqldb:
self.error = _("Cannot load Python bindings for MySQL")
yield ('mysql', self.error and -1 or 1)
def get_connection(self, path, log=None, user=None, password=None,
host=None, port=None, params={}):
cnx = MySQLConnection(path, log, user, password, host, port, params)
if not self._version:
self._version = get_pkginfo(MySQLdb).get('version',
MySQLdb.__version__)
mysql_info = 'server: "%s", client: "%s", thread-safe: %s' % \
(cnx.cnx.get_server_info(),
MySQLdb.get_client_info(),
MySQLdb.thread_safe())
self.env.systeminfo.extend([('MySQL', mysql_info),
('MySQLdb', self._version)])
self.required = True
return cnx
def init_db(self, path, log=None, user=None, password=None, host=None,
port=None, params={}):
cnx = self.get_connection(path, log, user, password, host, port,
params)
cursor = cnx.cursor()
from trac.db_default import schema
for table in schema:
for stmt in self.to_sql(table):
self.env.log.debug(stmt)
cursor.execute(stmt)
cnx.commit()
def _collist(self, table, columns):
"""Take a list of columns and impose limits on each so that indexing
works properly.
Some Versions of MySQL limit each index prefix to 500 bytes total, with
a max of 255 bytes per column.
"""
cols = []
limit = 333 / len(columns)
if limit > 255:
limit = 255
for c in columns:
name = '`%s`' % c
table_col = filter((lambda x: x.name == c), table.columns)
if len(table_col) == 1 and table_col[0].type.lower() == 'text':
if table_col[0].key_size is not None:
name += '(%d)' % table_col[0].key_size
elif name == '`rev`':
name += '(20)'
elif name == '`path`':
name += '(255)'
elif name == '`change_type`':
name += '(2)'
else:
name += '(%s)' % limit
# For non-text columns, we simply throw away the extra bytes.
# That could certainly be optimized better, but for now let's KISS.
cols.append(name)
return ','.join(cols)
def to_sql(self, table):
sql = ['CREATE TABLE %s (' % table.name]
coldefs = []
for column in table.columns:
ctype = column.type
ctype = _type_map.get(ctype, ctype)
if column.auto_increment:
ctype = 'INT UNSIGNED NOT NULL AUTO_INCREMENT'
# Override the column type, as a text field cannot
# use auto_increment.
column.type = 'int'
coldefs.append(' `%s` %s' % (column.name, ctype))
if len(table.key) > 0:
coldefs.append(' PRIMARY KEY (%s)' %
self._collist(table, table.key))
sql.append(',\n'.join(coldefs) + '\n)')
yield '\n'.join(sql)
for index in table.indices:
unique = index.unique and 'UNIQUE' or ''
yield 'CREATE %s INDEX %s_%s_idx ON %s (%s);' % (unique, table.name,
'_'.join(index.columns), table.name,
self._collist(table, index.columns))
def alter_column_types(self, table, columns):
"""Yield SQL statements altering the type of one or more columns of
a table.
Type changes are specified as a `columns` dict mapping column names
to `(from, to)` SQL type tuples.
"""
alterations = []
for name, (from_, to) in sorted(columns.iteritems()):
to = _type_map.get(to, to)
if to != _type_map.get(from_, from_):
alterations.append((name, to))
if alterations:
yield "ALTER TABLE %s %s" % (table,
', '.join("MODIFY %s %s" % each
for each in alterations))
def backup(self, dest_file):
from subprocess import Popen, PIPE
db_url = self.env.config.get('trac', 'database')
scheme, db_prop = _parse_db_str(db_url)
db_name = os.path.basename(db_prop['path'])
args = [self.mysqldump_path]
if 'host' in db_prop:
args.extend(['-h', db_prop['host']])
if 'port' in db_prop:
args.extend(['-P', str(db_prop['port'])])
if 'user' in db_prop:
args.extend(['-u', db_prop['user']])
args.extend(['-r', dest_file, db_name])
environ = os.environ.copy()
if 'password' in db_prop:
environ['MYSQL_PWD'] = str(db_prop['password'])
try:
p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds)
except OSError, e:
raise TracError(_("Unable to run %(path)s: %(msg)s",
path=self.pg_dump_path,
msg=exception_to_unicode(e)))
errmsg = p.communicate()[1]
if p.returncode != 0:
raise TracError(_("mysqldump failed: %(msg)s",
msg=to_unicode(errmsg.strip())))
if not os.path.exists(dest_file):
raise TracError(_("No destination file created"))
return dest_file
class MySQLConnection(ConnectionWrapper):
"""Connection wrapper for MySQL."""
poolable = True
def __init__(self, path, log, user=None, password=None, host=None,
port=None, params={}):
if path.startswith('/'):
path = path[1:]
if password == None:
password = ''
if port == None:
port = 3306
cnx = MySQLdb.connect(db=path, user=user, passwd=password,
host=host, port=port, charset='utf8')
if hasattr(cnx, 'encoders'):
# 'encoders' undocumented but present since 1.2.1 (r422)
cnx.encoders[Markup] = cnx.encoders[types.UnicodeType]
ConnectionWrapper.__init__(self, cnx, log)
self._is_closed = False
def cast(self, column, type):
if type == 'int'or type == 'int64':
type = 'signed'
elif type == 'text':
type = 'char'
return 'CAST(%s AS %s)' % (column, type)
def concat(self, *args):
return 'concat(%s)' % ', '.join(args)
def like(self):
"""Return a case-insensitive LIKE clause."""
return "LIKE %s COLLATE utf8_general_ci ESCAPE '/'"
def like_escape(self, text):
return _like_escape_re.sub(r'/\1', text)
def quote(self, identifier):
"""Return the quoted identifier."""
return "`%s`" % identifier
def get_last_id(self, cursor, table, column='id'):
return cursor.lastrowid
def update_sequence(self, cursor, table, column='id'):
# MySQL handles sequence updates automagically
pass
def rollback(self):
self.cnx.ping()
try:
self.cnx.rollback()
except MySQLdb.ProgrammingError:
self._is_closed = True
def close(self):
if not self._is_closed:
try:
self.cnx.close()
except MySQLdb.ProgrammingError:
pass # this error would mean it's already closed. So, ignore
self._is_closed = True
def cursor(self):
return IterableCursor(MySQLUnicodeCursor(self.cnx), self.log)
| zjj/trac_hack | trac/db/mysql_backend.py | Python | bsd-3-clause | 10,379 |
# -*- coding: utf-8 -*-
import attr
from datetime import datetime
import logging
import shutil
import os
import pandas as pd
import numpy as np
from pandas.io.common import EmptyDataError
import re
from .const import BLAST_TABLE_COLS
from ..utils import exc_exists, run_command
@attr.s
class BlastRunner:
fasta_path = attr.ib()
tmp_work_dir = attr.ib(default='/tmp', validator=attr.validators.instance_of(str))
blast_db_created = attr.ib(default=False, validator=attr.validators.instance_of(bool))
makeblastdb_exc = attr.ib(default='makeblastdb', validator=attr.validators.instance_of(str))
blastn_exc = attr.ib(default='blastn', validator=attr.validators.instance_of(str))
@makeblastdb_exc.validator
def _check_makeblastdb_exists(self, attribute, value):
if not exc_exists(value):
raise OSError('makeblastdb executable "{}" does not exist in the user $PATH'.format(value))
@blastn_exc.validator
def _check_blastn_exists(self, attribute, value):
if not exc_exists(value):
raise OSError('blast executable "{}" does not exist in the user $PATH'.format(value))
@fasta_path.validator
def _fasta_path_exists(self, attribute, value):
if not os.path.exists(value):
raise OSError('FASTA file does not exist at {}'.format(value))
def _create_tmp_folder(self):
count = 1
tmp_dir = self.tmp_work_dir
while True:
try:
logging.info('Trying to create analysis directory at: %s', tmp_dir)
os.makedirs(tmp_dir)
break
except OSError as e:
logging.warning('Error on creation of tmp analysis directory "{}"! {}'.format(
tmp_dir,
e
))
tmp_dir = '{}_{}'.format(self.tmp_work_dir, count)
count += 1
self.tmp_work_dir = tmp_dir
return self.tmp_work_dir
def _copy_fasta_to_work_dir(self):
filename = os.path.basename(self.fasta_path)
filename, ext = os.path.splitext(filename)
filename_no_spaces = re.sub(r'\W', '_', filename)
dest_path = os.path.join(self.tmp_work_dir, filename_no_spaces + ext)
if self.fasta_path == dest_path:
self.tmp_fasta_path = dest_path
return dest_path
shutil.copyfile(self.fasta_path, dest_path)
self.tmp_fasta_path = dest_path
return dest_path
def _run_makeblastdb(self):
work_dir = os.path.dirname(self.tmp_fasta_path)
filename = os.path.basename(self.tmp_fasta_path)
nin_filepath = os.path.join(work_dir, filename + '.nin')
if os.path.exists(nin_filepath):
self.blast_db_created = True
return self.tmp_fasta_path
cmdlist = [self.makeblastdb_exc,
'-in', '{}'.format(self.tmp_fasta_path),
'-dbtype', 'nucl']
exit_code, stdout, stderr = run_command(cmdlist)
if exit_code != 0:
raise Exception('Error {}: makeblastdb could not create a BLAST DB for {}. stderr: {}'.format(exit_code,
self.tmp_fasta_path,
stderr))
if stdout is not None and stdout != '':
logging.debug('makeblastdb on {0} STDOUT: {1}'.format(self.tmp_fasta_path, stdout))
if stderr is not None and stderr != '':
logging.debug('makeblastdb on {0} STDERR: {1}'.format(self.tmp_fasta_path, stderr))
if os.path.exists(nin_filepath):
self.blast_db_created = True
return self.tmp_fasta_path
else:
ex_msg = 'makeblastdb was not able to create a BLAST DB for {0}. STDERR: {1}'.format(filename, stderr)
logging.error(ex_msg)
raise Exception(ex_msg)
def blast_against_query(self, query_fasta_path, blast_task='megablast', evalue=1e-4, min_pid=85, word_size=22):
if not self.blast_db_created:
self.prep_blast()
query_filename = os.path.basename(query_fasta_path)
db_filename = os.path.basename(self.tmp_fasta_path)
timestamp = '{:%Y%b%d_%H_%M_%S}'.format(datetime.now())
outfile = os.path.join(self.tmp_work_dir, '{}-{}-{}.blast'.format(query_filename,
db_filename,
timestamp))
cmd_list = [self.blastn_exc,
'-task', blast_task,
'-query', query_fasta_path,
'-db', '{}'.format(self.tmp_fasta_path),
'-word_size', '{}'.format(word_size),
'-evalue', '{}'.format(evalue),
'-dust', 'no',
'-perc_identity', '{}'.format(min_pid),
'-out', outfile,
'-outfmt', '6 {}'.format(' '.join(BLAST_TABLE_COLS))]
logging.info('Running commandline "{}"'.format(' '.join(cmd_list)))
exit_code, stdout, stderr = run_command(cmd_list)
if os.path.exists(outfile):
return outfile
else:
err_msg_fmt = 'error code {}: blastn on db {} and query {} did not produce expected output file at {}. stderr: {}'
ex_msg = err_msg_fmt.format(exit_code, db_filename, query_filename, outfile, stderr)
raise Exception(ex_msg)
def cleanup(self):
self.blast_db_created = False
shutil.rmtree(self.tmp_work_dir)
def prep_blast(self):
self._create_tmp_folder()
self._copy_fasta_to_work_dir()
self._run_makeblastdb()
def __enter__(self):
if self.blast_db_created:
return self
self.prep_blast()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
@attr.s
class BlastReader:
blast_outfile = attr.ib(validator=attr.validators.instance_of(str))
df = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(pd.DataFrame)))
def parse(self):
"""Parse tabular blastn output file into a pandas DataFrame
Sort the DataFrame by BLAST bitscore, compute query coverage and if result is truncated by end of subject
sequence/contig.
Returns:
pandas.DataFrame: dataframe of tabular BLAST results
None: if no results could be parsed from BLAST output file
Exceptions:
EmptyDataError: No data could be parsed from the `blastn` output file
"""
try:
self.df = pd.read_table(self.blast_outfile, header=None)
self.df.columns = BLAST_TABLE_COLS
# calculate the coverage for when results need to be validated
self.df.loc[:, 'coverage'] = self.df.length / self.df.qlen
self.df.sort_values(by='bitscore', ascending=False, inplace=True)
self.df.loc[:, 'is_trunc'] = BlastReader.trunc(qstart=self.df.qstart,
qend=self.df.qend,
qlen=self.df.qlen,
sstart=self.df.sstart,
send=self.df.send,
slen=self.df.slen)
return self.df
except EmptyDataError as exc:
logging.warning('No BLASTN results to parse from file %s', self.blast_outfile)
return None
def to_dict(self):
if self.df is not None:
return self.df.to_dict()
else:
return None
@staticmethod
def trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int pandas.Series): Query sequence start index
qend (int pandas.Series): Query sequence end index
sstart (int pandas.Series): Subject sequence start index
send (int pandas.Series): Subject sequence end index
qlen (int pandas.Series): Query sequence length
slen (int pandas.Series): Subject sequence length
Returns:
Boolean pandas.Series: Result truncated by subject sequence end?
"""
ssum2 = (send + sstart) / 2.0
sabs2 = np.abs(send - sstart) / 2.0
smax = ssum2 + sabs2
smin = ssum2 - sabs2
q_match_len = np.abs(qstart - qend) + 1
return (q_match_len < qlen) & ((smax >= slen) | (smin <= 1))
def perfect_matches(self):
"""
Return pandas DataFrame with perfect BLAST matches (100% identity and coverage)
Returns:
pandas.DataFrame or None: DataFrame of perfect BLAST matches or None if no perfect matches exist
"""
if self.df is None:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0] == 0:
return None
return df_perfect_matches
def __enter__(self):
if self.blast_outfile is None:
return self
self.parse()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.df = None
self.blast_outfile = None
| peterk87/heidelberg_subtyping | heidelberg_subtyping/blast_wrapper/__init__.py | Python | gpl-3.0 | 9,579 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''cg_minst.py: conges multi-installer
outil d'installation multiple adapté au portage des applications
conges d'un env vers un autre
'''
import getpass, sys, os, subprocess, pprint, string, re, time
from traceback import print_exc
from cg_lib import exec_sql_file
import MySQLdb
lcmde = ['dryrun', 'realrun']
scmd = ""
version = "1_0"
def usage(cmd):
fmt = ''' Usage: %s \
[--help] [--debug] --conf=<filename> --cmd=commande
conf file should define
{
'rootdb': {'id':'root', 'pw':'' },
# pw saisie en interactif
'cdbprefixe': 'r1_',
'cdbcharset': 'latin1',
# les base de donnees seront nomme RRRRR_sg
'crefpath' : '/tmp/work/v2ns',
# endroit ou est installer logiciel de reference
'cdbconnectpath' : 'dbconnect.php',
# path relatif au fichier dbconnect.php
'csqlinstallpath' : 'install/sql/php_conges_v1.4.2ac2_01.sql',
# path relatif au fichier sql de creation initiale de la base de donnees
'cdestpath': '/tmp/work/srv/www/conges',
# endroit ou seront installes les instances logicielles
'clinstance': [ ['sg','c_sg_dba','******'] ,
['dsacn','c_dsacn_dba','******'] ,
]
# liste des instances : nom, usergestionnaire, passwd
# si passwd est "", il sera demandé en interactif
}
cmd among [ %s ]
'''
print fmt % (cmd, string.join(lcmde," "))
def neatconflines(ofile):
lrealine = []
while 1 :
sali = ofile.readline()
if sali == "" : break
elif sali[0] == "#" : pass # discard comment
else:
lrealine.append(sali[:-1])
return string.join(lrealine)
def mygetopt(cmd,largs):
''' process argument and if success, return
command name, debug mode , odbifile= database access, la commande ,
un dictionnaire avec les options non traitees '''
lpathitem = string.split(sys.argv[0],'/')
sacmd = lpathitem[-1]
scom = "dryrun" # default
debug = 0
odbifile = None
dallopt, dropt = {}, {}
idx = 0
soptegex1 = re.compile('^--([^\=]+)\=(.*)')
soptegex2 = re.compile('^--(.*)')
while idx < len(largs) :
s1m = soptegex1.match(largs[idx])
if s1m :
dallopt[s1m.group(1)] = s1m.group(2)
else :
s2m = soptegex2.match(largs[idx])
if s2m :
dallopt[s2m.group(1)] = 1
else :
print "arg %s is not well formatted " % largs[idx]
usage(sacmd)
sys.exit(1)
idx += 1
for (skey,svalue) in dallopt.items() :
if skey == "help" :
usage(sacmd)
sys.exit(1)
elif skey == "debug":
debug = 1
elif skey == "conf" :
try:
odbifile = open(svalue)
except:
print "conf file %s cannot be opened" % svalue
usage(sacmd)
sys.exit(1)
elif skey == "cmd":
scom = svalue
if scom not in lcmde :
print " a commande among %s should be given" % string.join(lcmde," ")
usage(sacmd)
sys.exit(1)
else:
dropt[skey] = svalue
if odbifile == None :
print "--dbid option is mandatory"
usage(sacmd)
sys.exit(1)
if debug:
print "dallopt is ", dallopt
print "dropt is ", dropt
return sacmd, debug, odbifile, scom, dropt
def subcmd(lcommand,santemsg,saftermsg):
sys.stdout.write(santemsg)
sys.stdout.flush()
op = subprocess.Popen(lcommand,stdout=subprocess.PIPE,stderr=None)
output, err = op.communicate()
sys.stdout.write(' '+saftermsg+'\n')
sys.stdout.flush()
# op = subprocess.Popen(lcommand, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# output, err = op.communicate() # capture stdout n stderr but do not display
# status =
# subprocess.check_call(lcommand, shell=True)
#if status :
# sys.stdout.write("status: %s\n" % status)
# print santemsg + " ",
# status = subprocess.call(lcommand, shell=True)
# print saftermsg
def patch_dbconnect(dirpath, filename,dpara):
''' patch dbconnect.php to substitute operational variable
$mysql_user="__CONGEDBA__" ;
$mysql_pass="__CONGEPW__";
$mysql_database= "__DBNAME__" ; '''
lpattern = [["__CONGEDBA__" , dpara['dbauser']],
["__CONGEPW__" , dpara['dbapw']] ,
["__DBNAME__" , dpara['dbname']],
["__CHARSET__" , dpara['charset']],
]
sfnname = "%s/%s" % (dirpath,filename)
ssavname = sfnname + '.sav'
try:
os.rename(sfnname,ssavname)
except:
print_exc()
print "%s file name cannot be renamed" % sfnname
sys.exit(1)
infile = open(ssavname, 'r')
outfile = open(sfnname, 'w')
# methode assez lourde mais bon ...
while True:
aline = infile.readline()
if aline == "" :
break
newline = aline
for apat in lpattern :
newline = string.replace(newline,apat[0],apat[1])
outfile.write(newline)
print "%s was patched" % sfnname
infile.close()
outfile.close()
if __name__ == '__main__':
#0 get parameter/arg
scmd, ndebug, odbif, scommand, dopt = mygetopt(sys.argv[0],sys.argv[1:])
#1.1 read conf file and check it brieffly
try:
dcgid = eval(neatconflines(odbif))
except:
print_exc()
print "database id file cannot be evaluated"
sys.exit(1)
if ndebug: print pprint.pformat(dcgid)
#1.2 ask root access and check access to mysql/mariadb
smysqlrpw = getpass.getpass("mysql root pw:")
try:
dcgid['rootdb']['pw'] = smysqlrpw
except:
print "conf file does not define ['rootdb']['pw'] key "
sys.exit(1)
odbconn_std, odbcursor_std = None, None
try:
odbconn_std = MySQLdb.connect(
"localhost",dcgid['rootdb']['id'],dcgid['rootdb']['pw'],
charset=dcgid['cdbcharset'], use_unicode=1)
odbcursor_std = odbconn_std.cursor()
except:
# print_exc()
print "root acces to mysql refused"
sys.exit(1)
if ndebug: print "root acces to mysql ok."
#1.3 check validity of crefpath
screfpath = None
try:
screfpath = dcgid['crefpath']
except:
print "ref path is not defined in conf file"
sys.exit(1)
# make sure screfpath does not end with /
nj = len(screfpath)
while nj > 0 :
if screfpath[nj - 1] == '/' :
nj -= 1
else :
break
screfpath = screfpath[0:nj]
if not os.access(screfpath, os.F_OK & os.R_OK) :
print "read acces to %s conges ref path refused" % dcgid['crefpath']
sys.exit(1)
screfpathdbconnect = None
try:
screfpathdbconnect = "%s/%s" % (dcgid['crefpath'],dcgid['cdbconnectpath'])
except:
print "ref path is not defined in conf file"
sys.exit(1)
if not os.access(screfpathdbconnect, os.F_OK & os.R_OK) :
print "read acces to %s file refused" % screfpathdbconnect
sys.exit(1)
scsqlinstallpath = None
try:
scsqlinstallpath = "%s/%s" % (dcgid['crefpath'],dcgid['csqlinstallpath'])
except:
print "csqlinstallpath is not defined in conf file"
sys.exit(1)
if not os.access(scsqlinstallpath, os.F_OK & os.R_OK) :
print "read acces to %s file refused" % scsqlinstallpath
sys.exit(1)
sdestpath = None
try:
sdestpath = dcgid['cdestpath']
except:
print "dest path is not defined in conf file"
sys.exit(1)
if not os.access(sdestpath, os.W_OK) :
print "write acces to %s conges dest path refused" % dcgid['cdestpath']
sys.exit(1)
#1.4 check and complete linstance definition
# (passwd captured interactively if required)
try:
leninstance = len(dcgid['clinstance'])
except:
print "'clinstance' is not defined in conf file"
sys.exit(1)
if leninstance == 0 :
print "'clinstance' shoud defined one target app"
sys.exit(1)
linstnew = []
for sappname,sdba,sdbapass in dcgid['clinstance'] : # iterating over clinstance :
while sdbapass == "" :
sdbapass = getpass.getpass("mysql pw for %s:" % sdba)
linstnew.append([sappname,sdba,sdbapass])
dcgid['clinstance'] = linstnew
#print "prematured end."
#sys.exit(0)
lsqlinst = [
"DROP DATABASE IF EXISTS %(dbname)s ;",
"CREATE DATABASE `%(dbname)s` DEFAULT CHARACTER SET %(charset)s DEFAULT COLLATE %(charset)s_general_ci; ",
"GRANT ALL PRIVILEGES ON `%(dbname)s`.* TO '%(dbauser)s'@'localhost' ;",
]
# 2 checking basically clinstance :
for sappname,sdba,sdbapass in dcgid['clinstance'] : # iterating over clinstance :
#2.1 issuing sql order
try:
dpara = {'dbname': dcgid['cdbprefixe']+sappname ,
'dbauser': sdba, 'dbapw':sdbapass,
'charset': dcgid['cdbcharset'] }
except:
print_exc()
print "conf file should defined cdbprefixe creader pw cdba pw"
sys.exit(1)
for asql in lsqlinst :
sqlorder = asql % dpara
if scommand == "realrun" :
try:
odbcursor_std.execute(sqlorder)
except:
print "mysql error on %s" % sqlorder
print_exc()
sys.exit(1)
print "%s ok." % sqlorder
else :
print "would do %s" % sqlorder
#2.2 duplicating appfile rsync -av v2ref/ v2new
# alternative a rsync : cp
# mkdir /tmp/work/srv/www/conges/sg && (tar -C /tmp/work/v2ns -cvf - . | tar -C /tmp/work/srv/www/conges/sg/ -xf - )
sdirpath = "%s/%s" % (dcgid['cdestpath'], sappname)
# ldupcmd = ['ls', '%s/' % dcgid['crefpath'],
# CARE : the terminating / is essential to not create a level of hierarchy
ldupcmd = ["rsync", "-av", "%s/" % dcgid['crefpath'],sdirpath ]
saction = string.join(ldupcmd,' ')
if scommand == "realrun" :
try:
subcmd(ldupcmd,saction,"done.")
except:
print "sys error on %s" % saction
print_exc()
sys.exit(1)
else:
print "would do %s" % saction
#2.3 patching v2ref/dbconnect.php with the right stuff
sfn = "dbconnect.php"
if scommand == "realrun" :
try:
patch_dbconnect(sdirpath,sfn,dpara)
except:
print "error on patching %s/%s" % (sdirpath,sfn)
print_exc()
sys.exit(1)
else:
print "would patch %s/%s" % (sdirpath,sfn)
#2.4 executing sqlinstallation for the instance
odbcgconn_std, odbcgcursor_std = None, None
try:
odbcgconn_std = MySQLdb.connect(
"localhost",dpara['dbauser'],dpara['dbapw'],
dpara['dbname'],
charset=dcgid['cdbcharset'], use_unicode=1)
odbcgcursor_std = odbcgconn_std.cursor()
except:
# print_exc()
print "%s acces to mysql (%s) refused" % ( dpara['dbauser'],'******')
sys.exit(1)
sqlorder = "use %s ;" % dpara['dbname']
if scommand == "realrun" :
try:
odbcgcursor_std.execute(sqlorder)
except:
print "mysql error on %s" % sqlorder
print_exc()
sys.exit(1)
print "%s ok." % sqlorder
else :
print "would do %s" % sqlorder
# reading and executing sqlfile
exec_sql_file(odbcgcursor_std,scsqlinstallpath,scommand)
# doing a print to separate instance
print
#end iterating over clinstance :
print "cdminst ok."
sys.exit(0)
| coz787/conges4ac | actools/cg_minst.py | Python | gpl-2.0 | 12,134 |
#!/usr/bin/python3
"""Runs all.bash against each commit on a git development branch.
For a given development branch, do an all.bash test for each commit on the
branch, storing results in. Example from git log --oneline for hypothetical
development branch 'mybranch':
ca3b66ca8d (HEAD -> mybranch) final-thing
51df6b49da anotherthing
2b3ddf5180 firstthing
7fa195c1b9 (origin/master, origin/HEAD, master) unrelated
This script will produce the following dumps:
/tmp/item=1.branch=mybranch.commit=ca3b66ca8d.txt
/tmp/item=2.branch=mybranch.commit=51df6b49da.txt
/tmp/item=3.branch=mybranch.commit=2b3ddf5180.txt
/tmp/item=4.branch=mybranch.index.txt
where each 'commit' file contains the output from an all.bash run
on that commit.
"""
import getopt
import os
import re
import sys
import tempfile
import script_utils as u
# Echo command before executing
flag_echo = True
# Dry run mode
flag_dryrun = False
# Tag to apply to output files.
flag_tag = None
# Script to run
flag_script_to_run = "all.bash"
# Package tests to run
flag_pkgtests = []
# Files emitted
files_emitted = []
# Failures
num_failures = 0
def docmd(cmd):
"""Execute a command."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
u.docmd(cmd)
def doscmd(cmd):
"""Execute a command."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
u.doscmd(cmd)
def docmdout(cmd, outfile):
"""Execute a command to an output file."""
if flag_echo:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
u.docmdout(cmd, outfile)
def docmdinout(cmd, infile, outfile):
"""Execute a command reading from input file and writing to output file."""
if flag_echo:
sys.stderr.write("executing: %s < %s > %s\n" % (cmd, infile, outfile))
if flag_dryrun:
return
u.docmdinout(cmd, infile, outfile)
def process_commit(idx, branchname, githash, comment, summaryf):
"""Process a commit by hash."""
tag = ""
if flag_tag:
tag = ".tag=%s" % flag_tag
fn = "/tmp/item%d.branch%s%s.commit=%s.txt" % (idx, branchname, tag, githash)
if flag_dryrun:
u.verbose(0, "<dryrun: run %s for %s to %s>" % (flag_script_to_run,
githash, fn))
return
files_emitted.append(fn)
doscmd("git checkout %s" % githash)
try:
outf = open(fn, "w")
except IOError as e:
u.error("unable to open %s: %s" % (fn, e.strerror))
outf.write("// comment: %s\n" % comment)
outf.write("//\n")
lines = u.docmdlines("git log --name-only -1 %s" % githash)
if not lines:
u.error("empty output from 'git log --name-only -1 %s'" % githash)
for line in lines:
outf.write(line)
outf.write("\n")
outf.write("--------------------------------------------------------------\n")
lines = u.docmdlines("git diff %s^ %s" % (githash, githash))
if not lines:
u.error("empty output from 'git diff %s^ %s'" % (githash, githash))
for line in lines:
outf.write(line)
outf.write("\n")
u.verbose(1, "wrote %d diff lines to %s" % (len(lines), fn))
if flag_script_to_run:
dotestaction("bash %s" % flag_script_to_run, githash, outf, idx, summaryf)
for pk in flag_pkgtests:
dotestaction("go test %s" % pk, githash, outf, idx, summaryf)
outf.close()
def dotestaction(action, githash, outf, idx, summaryf):
"""Perform a test action, writing results to outf."""
global num_failures
u.verbose(0, "starting %s run for %s" % (action, githash))
outf.write("// --------------- test %s\n" % action)
tf = tempfile.NamedTemporaryFile(mode="w", delete=True)
status = u.docmderrout(action, tf.name, True)
if status != 0:
u.verbose(0, "warning: '%s' run failed for commit %s" % (action, githash))
summaryf.write("%d: hash %s failed action: %s\n" % (idx, githash, action))
num_failures += 1
try:
with open(tf.name, "r") as rf:
lines = rf.readlines()
for line in lines:
outf.write(line)
u.verbose(1, "wrote %d test output lines to %s" % (len(lines), outf.name))
except IOError:
u.error("open failed for %s temp output %s" % (action, tf.name))
def perform():
"""Main driver routine."""
if flag_script_to_run and not os.path.exists(flag_script_to_run):
u.error("no %s here, can't proceed" % flag_script_to_run)
lines = u.docmdlines("git status -sb")
if not lines:
u.error("empty output from git status -sb")
brnreg = re.compile(r"^## (\S+)\.\.(\S+) \[ahead (\d+)\]\s*$")
m = brnreg.match(lines[0])
if not m:
u.error("can't pattern match output of git status -sb: %s" % lines[0])
branchname = m.group(1).strip(".")
commits = int(m.group(3))
u.verbose(1, "branch is: %s commits: %d" % (branchname, commits))
# Grab info on commits
lines = u.docmdlines("git log --oneline -%d" % commits)
if not lines:
u.error("empty output from 'git log --oneline'")
# Open index file for output
fn = "/tmp/branch=%s.index.txt" % branchname
try:
outf = open(fn, "w")
except IOError as e:
u.error("unable to open %s: %s" % (fn, e.strerror))
# Process commits in reverse order
firsthash = None
lasthash = None
creg = re.compile(r"^(\S+) (\S.+)$")
lines.reverse()
idx = 0
for cl in lines:
idx += 1
m = creg.match(cl)
if not m:
u.error("can't pattern match git log output: %s" % cl)
githash = m.group(1)
lasthash = githash
if not firsthash:
firsthash = githash
comment = m.group(2)
u.verbose(0, "processing hash %s comment %s" % (githash, comment))
process_commit(idx, branchname, githash, comment, outf)
doscmd("git checkout %s" % branchname)
# Emit index file
n = len(files_emitted) + 1
outf.write("\nFiles emitted:\n\n")
outf.write("\n".join(files_emitted))
outf.write("\n\nBranch log:\n\n")
u.verbose(1, "index diff cmd hashes: %s %s" % (firsthash, lasthash))
outf.write("\n")
lines = u.docmdlines("git log --name-only -%d HEAD" % len(files_emitted))
for line in lines:
outf.write(line)
outf.write("\n")
outf.close()
u.verbose(0, "... index file emitted to %s\n" % fn)
u.verbose(0, "... total failures: %d\n" % num_failures)
def usage(msgarg):
"""Print usage and exit."""
me = os.path.basename(sys.argv[0])
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options]
options:
-t T tag output files with tag T
-e echo commands before executing
-d increase debug msg verbosity level
-m run make.bash instead of all.bash
-n don't run make.bash or all.bash
-S X run script X (e.g. "bash X") instead of all.bash
-p P run 'go test P' for package P at each commit
-D dryrun mode (echo commands but do not execute)
This program walks the stack of commits for a given git
development branch and runs all.bash for each commit
into /tmp.
Example usage:
%s
""" % (me, me))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_echo, flag_dryrun, flag_tag, flag_script_to_run, flag_pkgtests
try:
optlist, args = getopt.getopt(sys.argv[1:], "dmneDp:t:S:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
if args:
usage("unknown extra args: %s" % " ".join(args))
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-m":
flag_script_to_run = "make.bash"
elif opt == "-n":
flag_script_to_run = None
elif opt == "-S":
if not os.path.exists(arg):
u.warning("can't access script %s, ignored for -S" % arg)
else:
flag_script_to_run = arg
elif opt == "-D":
u.verbose(0, "+++ dry run mode")
flag_dryrun = True
flag_echo = True
elif opt == "-e":
flag_echo = True
elif opt == "-t":
flag_tag = arg
elif opt == "-p":
if not os.path.exists(arg):
u.warning("can't access package %s, ignored for -p" % arg)
flag_pkgtests.append(arg)
#......................................................................
#
# Main portion of script
#
parse_args()
u.setdeflanglocale()
perform()
exit(0)
| thanm/devel-scripts | test-git-branch-stack.py | Python | apache-2.0 | 8,204 |
from collections import UserList
import pygame
from attr import Factory, attrib, attrs
from .movement import Direction
from .entity import Entity
from .system import SystemFlag
@attrs(slots=True)
class AiController:
opponent = attrib(type=Entity)
direction = attrib(
type=pygame.Rect,
default=Factory(lambda: pygame.Rect(0, 0, 0, 0)),
)
fire = attrib(type=bool, default=False)
close_range = attrib(type=int, default=0)
long_range = attrib(type=int, default=0)
y_range = attrib(type=int, default=0)
class AiControllerSystem(UserList):
flags = SystemFlag.CONTROLLER + SystemFlag.MOVEMENT
def update(self):
for entity in self.data:
controller = entity.controller
movement = entity.movement
opponent = controller.opponent
x_delta = movement.position.x - opponent.movement.position.x
if x_delta < 0:
movement.facing = Direction.RIGHT
elif x_delta > 0:
movement.facing = Direction.LEFT
y_delta = movement.position.y - opponent.movement.position.y
if abs(y_delta) > controller.y_range:
if y_delta <= 0:
controller.direction.y = 1
elif y_delta > 0:
controller.direction.y = -1
else:
controller.direction.y = 0
if abs(x_delta) < controller.close_range:
controller.direction.x = movement.facing.value * -1
elif abs(x_delta) > controller.long_range:
controller.direction.x = movement.facing.value
else:
controller.direction.x = 0
| joetsoi/moonstone | python/combat/ai_controller.py | Python | agpl-3.0 | 1,726 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-08 03:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0023_auto_20160408_0114'),
]
operations = [
migrations.AlterField(
model_name='offer',
name='confirmation',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='payout',
name='confirmation',
field=models.CharField(blank=True, max_length=255),
),
]
| codesy/codesy | auctions/migrations/0024_auto_20160408_0307.py | Python | agpl-3.0 | 641 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
import logging
from openerp import models, api
from ..models.gp_connector import GPConnect
logger = logging.getLogger(__name__)
class MigrationR4(models.TransientModel):
""" Perform migrations after upgrading the module
"""
_name = 'migration.r4'
@api.model
def perform_migration(self):
# Only execute migration for 8.0.1.4 -> 8.0.3.0
child_sync_module = self.env['ir.module.module'].search([
('name', '=', 'child_sync_gp')
])
if child_sync_module.latest_version == '8.0.1.4':
self._perform_migration()
return True
def _perform_migration(self):
"""
Synchronize GP for all children
"""
logger.info("MIGRATION 8.0.3 ----> Synchronize GP child codes.")
# Synchronize GP
gp = GPConnect()
for child in self.env['compassion.child'].search([]):
gp.transfer(self.env.uid, child.code, child.local_id)
logger.info("MIGRATION 8.0.3 ----> Synchronize GP project codes.")
gp.query("""
UPDATE Projet
SET code_projet = CONCAT(LEFT(code_projet, 2), '0',
RIGHT(code_projet, 3))
WHERE CHAR_LENGTH(code_projet) = 5;
""")
| MickSandoz/compassion-switzerland | child_sync_gp/wizards/migration_r4.py | Python | agpl-3.0 | 1,666 |
from __future__ import unicode_literals, division, absolute_import
import base64
import glob
import logging
import pkg_resources
import os
import re
import sys
import time
import warnings
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('deluge')
def add_deluge_windows_install_dir_to_sys_path():
# Deluge does not install to python system on Windows, add the install directory to sys.path if it is found
if not (sys.platform.startswith('win') or os.environ.get('ProgramFiles')):
return
deluge_dir = os.path.join(os.environ['ProgramFiles'], 'Deluge')
log.debug('Looking for deluge install in %s' % deluge_dir)
if not os.path.isdir(deluge_dir):
return
deluge_egg = glob.glob(os.path.join(deluge_dir, 'deluge-*-py2.?.egg'))
if not deluge_egg:
return
minor_version = int(re.search(r'py2\.(\d).egg', deluge_egg[0]).group(1))
if minor_version != sys.version_info[1]:
log.verbose('Cannot use deluge from install directory because its python version doesn\'t match.')
return
log.debug('Found deluge install in %s adding to sys.path' % deluge_dir)
sys.path.append(deluge_dir)
for item in os.listdir(deluge_dir):
if item.endswith(('.egg', '.zip')):
sys.path.append(os.path.join(deluge_dir, item))
add_deluge_windows_install_dir_to_sys_path()
# Some twisted import is throwing a warning see #2434
warnings.filterwarnings('ignore', message='Not importing directory .*')
try:
from twisted.python import log as twisted_log
from twisted.internet.main import installReactor
from twisted.internet.selectreactor import SelectReactor
class PausingReactor(SelectReactor):
"""A SelectReactor that can be paused and resumed."""
def __init__(self):
SelectReactor.__init__(self)
self.paused = False
self._return_value = None
self._release_requested = False
self._mainLoopGen = None
# Older versions of twisted do not have the _started attribute, make it a synonym for running in that case
if not hasattr(self, '_started'):
PausingReactor._started = property(lambda self: self.running)
def _mainLoopGenerator(self):
"""Generator that acts as mainLoop, but yields when requested."""
while self._started:
try:
while self._started:
if self._release_requested:
self._release_requested = False
self.paused = True
yield self._return_value
self.paused = False
self.iterate()
except KeyboardInterrupt:
# Keyboard interrupt pauses the reactor
self.pause()
except GeneratorExit:
# GeneratorExit means stop the generator; Do it cleanly by stopping the whole reactor.
log.debug('Got GeneratorExit, stopping reactor.', exc_info=True)
self.paused = False
self.stop()
except:
twisted_log.msg("Unexpected error in main loop.")
twisted_log.err()
else:
twisted_log.msg('Main loop terminated.')
def run(self, installSignalHandlers=False):
"""Starts or resumes the reactor."""
if not self._started:
self.startRunning(installSignalHandlers)
self._mainLoopGen = self._mainLoopGenerator()
try:
return self._mainLoopGen.next()
except StopIteration:
pass
def pause(self, return_value=None):
"""Causes reactor to pause after this iteration.
If :return_value: is specified, it will be returned by the reactor.run call."""
self._return_value = return_value
self._release_requested = True
def stop(self):
"""Stops the reactor."""
SelectReactor.stop(self)
# If this was called while the reactor was paused we have to resume in order for it to complete
if self.paused:
self.run()
# These need to be re-registered so that the PausingReactor can be safely restarted after a stop
self.addSystemEventTrigger('during', 'shutdown', self.crash)
self.addSystemEventTrigger('during', 'shutdown', self.disconnectAll)
# Configure twisted to use the PausingReactor.
installReactor(PausingReactor())
except ImportError:
# If twisted is not found, errors will be shown later
pass
# Define a base class with some methods that are used for all deluge versions
class DelugePlugin(object):
"""Base class for deluge plugins, contains settings and methods for connecting to a deluge daemon."""
def validate_connection_info(self, dict_validator):
dict_validator.accept('text', key='host')
dict_validator.accept('integer', key='port')
dict_validator.accept('text', key='username')
dict_validator.accept('text', key='password')
# Deprecated
dict_validator.accept('text', key='user')
dict_validator.accept('text', key='pass')
def prepare_connection_info(self, config):
config.setdefault('host', 'localhost')
config.setdefault('port', 58846)
if 'user' in config or 'pass' in config:
warnings.warn('deluge `user` and `pass` options have been renamed `username` and `password`',
DeprecationWarning)
config.setdefault('username', config.get('user', ''))
config.setdefault('password', config.get('pass', ''))
config.setdefault('username', '')
config.setdefault('password', '')
def on_task_start(self, task, config):
"""Raise a DependencyError if our dependencies aren't available"""
# This is overridden by OutputDeluge to add deluge 1.1 support
try:
from deluge.ui.client import client
except ImportError as e:
log.debug('Error importing deluge: %s' % e)
raise plugin.DependencyError('output_deluge', 'deluge',
'Deluge module and it\'s dependencies required. ImportError: %s' % e, log)
try:
from twisted.internet import reactor
except:
raise plugin.DependencyError('output_deluge', 'twisted.internet', 'Twisted.internet package required', log)
log.debug('Using deluge 1.2 api')
def on_task_abort(self, task, config):
pass
# Add some more methods to the base class if we are using deluge 1.2+
try:
from twisted.internet import reactor
from deluge.ui.client import client
from deluge.ui.common import get_localhost_auth
class DelugePlugin(DelugePlugin):
def on_disconnect(self):
"""Pauses the reactor. Gets called when we disconnect from the daemon."""
# pause the reactor, so flexget can continue
reactor.callLater(0, reactor.pause)
def on_connect_fail(self, result):
"""Pauses the reactor, returns PluginError. Gets called when connection to deluge daemon fails."""
log.debug('Connect to deluge daemon failed, result: %s' % result)
reactor.callLater(0, reactor.pause, plugin.PluginError('Could not connect to deluge daemon', log))
def on_connect_success(self, result, task, config):
"""Gets called when successfully connected to the daemon. Should do the work then call client.disconnect"""
raise NotImplementedError
def connect(self, task, config):
"""Connects to the deluge daemon and runs on_connect_success """
if config['host'] in ['localhost', '127.0.0.1'] and not config.get('username'):
# If an username is not specified, we have to do a lookup for the localclient username/password
auth = get_localhost_auth()
if auth[0]:
config['username'], config['password'] = auth
else:
raise plugin.PluginError('Unable to get local authentication info for Deluge. You may need to '
'specify an username and password from your Deluge auth file.')
client.set_disconnect_callback(self.on_disconnect)
d = client.connect(
host=config['host'],
port=config['port'],
username=config['username'],
password=config['password'])
d.addCallback(self.on_connect_success, task, config).addErrback(self.on_connect_fail)
result = reactor.run()
if isinstance(result, Exception):
raise result
return result
@event('manager.shutdown')
def stop_reactor(manager):
"""Shut down the twisted reactor after all tasks have run."""
if not reactor._stopped:
log.debug('Stopping twisted reactor.')
reactor.stop()
except (ImportError, pkg_resources.DistributionNotFound):
pass
class InputDeluge(DelugePlugin):
"""Create entries for torrents in the deluge session."""
#
settings_map = {
'name': 'title',
'hash': 'torrent_info_hash',
'num_peers': 'torrent_peers',
'num_seeds': 'torrent_seeds',
'progress': 'deluge_progress',
'seeding_time': ('deluge_seed_time', lambda time: time / 3600),
'private': 'deluge_private',
'state': 'deluge_state',
'eta': 'deluge_eta',
'ratio': 'deluge_ratio',
'move_on_completed_path': 'deluge_movedone',
'save_path': 'deluge_path',
'label': 'deluge_label',
'total_size': ('content_size', lambda size: size / 1024 / 1024),
'files': ('content_files', lambda file_dicts: [f['path'] for f in file_dicts])}
def __init__(self):
self.entries = []
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('boolean')
advanced = root.accept('dict')
advanced.accept('path', key='config_path')
self.validate_connection_info(advanced)
filter = advanced.accept('dict', key='filter')
filter.accept('text', key='label')
filter.accept('choice', key='state').accept_choices(
['active', 'downloading', 'seeding', 'queued', 'paused'], ignore_case=True)
return root
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
if 'filter' in config:
filter = config['filter']
if 'label' in filter:
filter['label'] = filter['label'].lower()
if 'state' in filter:
filter['state'] = filter['state'].capitalize()
self.prepare_connection_info(config)
return config
def on_task_input(self, task, config):
"""Generates and returns a list of entries from the deluge daemon."""
# Reset the entries list
self.entries = []
# Call connect, entries get generated if everything is successful
self.connect(task, self.prepare_config(config))
return self.entries
def on_connect_success(self, result, task, config):
"""Creates a list of FlexGet entries from items loaded in deluge and stores them to self.entries"""
from deluge.ui.client import client
def on_get_torrents_status(torrents):
config_path = os.path.expanduser(config.get('config_path', ''))
for hash, torrent_dict in torrents.iteritems():
# Make sure it has a url so no plugins crash
entry = Entry(deluge_id=hash, url='')
if config_path:
torrent_path = os.path.join(config_path, 'state', hash + '.torrent')
if os.path.isfile(torrent_path):
entry['location'] = torrent_path
if not torrent_path.startswith('/'):
torrent_path = '/' + torrent_path
entry['url'] = 'file://' + torrent_path
else:
log.warning('Did not find torrent file at %s' % torrent_path)
for key, value in torrent_dict.iteritems():
flexget_key = self.settings_map[key]
if isinstance(flexget_key, tuple):
flexget_key, format_func = flexget_key
value = format_func(value)
entry[flexget_key] = value
self.entries.append(entry)
client.disconnect()
filter = config.get('filter', {})
client.core.get_torrents_status(filter, self.settings_map.keys()).addCallback(on_get_torrents_status)
class OutputDeluge(DelugePlugin):
"""Add the torrents directly to deluge, supporting custom save paths."""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('boolean')
deluge = root.accept('dict')
self.validate_connection_info(deluge)
deluge.accept('path', key='path', allow_replacement=True, allow_missing=True)
deluge.accept('path', key='movedone', allow_replacement=True, allow_missing=True)
deluge.accept('text', key='label')
deluge.accept('boolean', key='queuetotop')
deluge.accept('boolean', key='automanaged')
deluge.accept('number', key='maxupspeed')
deluge.accept('number', key='maxdownspeed')
deluge.accept('integer', key='maxconnections')
deluge.accept('integer', key='maxupslots')
deluge.accept('number', key='ratio')
deluge.accept('boolean', key='removeatratio')
deluge.accept('boolean', key='addpaused')
deluge.accept('boolean', key='compact')
deluge.accept('text', key='content_filename')
deluge.accept('boolean', key='main_file_only')
deluge.accept('boolean', key='enabled')
return root
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
self.prepare_connection_info(config)
config.setdefault('enabled', True)
config.setdefault('path', '')
config.setdefault('movedone', '')
config.setdefault('label', '')
return config
def __init__(self):
self.deluge12 = None
self.deluge_version = None
self.options = {'maxupspeed': 'max_upload_speed', 'maxdownspeed': 'max_download_speed',
'maxconnections': 'max_connections', 'maxupslots': 'max_upload_slots',
'automanaged': 'auto_managed', 'ratio': 'stop_ratio', 'removeatratio': 'remove_at_ratio',
'addpaused': 'add_paused', 'compact': 'compact_allocation'}
@plugin.priority(120)
def on_task_start(self, task, config):
"""
Detect what version of deluge is loaded.
"""
if self.deluge12 is None:
logger = log.info if task.options.test else log.debug
try:
log.debug('Looking for deluge 1.1 API')
from deluge.ui.client import sclient
log.debug('1.1 API found')
except ImportError:
log.debug('Looking for deluge 1.2 API')
DelugePlugin.on_task_start(self, task, config)
logger('Using deluge 1.2 api')
self.deluge12 = True
else:
logger('Using deluge 1.1 api')
self.deluge12 = False
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load into deluge
then verify they are valid torrents
"""
import deluge.ui.common
config = self.prepare_config(config)
if not config['enabled']:
return
# If the download plugin is not enabled, we need to call it to get our temp .torrent files
if not 'download' in task.config:
download = plugin.get_plugin_by_name('download')
for entry in task.accepted:
if not entry.get('deluge_id'):
download.instance.get_temp_file(task, entry, handle_magnets=True)
# Check torrent files are valid
for entry in task.accepted:
if os.path.exists(entry.get('file', '')):
# Check if downloaded file is a valid torrent file
try:
deluge.ui.common.TorrentInfo(entry['file'])
except Exception:
entry.fail('Invalid torrent file')
log.error('Torrent file appears invalid for: %s', entry['title'])
@plugin.priority(135)
def on_task_output(self, task, config):
"""Add torrents to deluge at exit."""
config = self.prepare_config(config)
# don't add when learning
if task.options.learn:
return
if not config['enabled'] or not (task.accepted or task.options.test):
return
add_to_deluge = self.connect if self.deluge12 else self.add_to_deluge11
add_to_deluge(task, config)
# Clean up temp file if download plugin is not configured for this task
if not 'download' in task.config:
for entry in task.accepted + task.failed:
if os.path.exists(entry.get('file', '')):
os.remove(entry['file'])
del(entry['file'])
def add_to_deluge11(self, task, config):
"""Add torrents to deluge using deluge 1.1.x api."""
try:
from deluge.ui.client import sclient
except:
raise plugin.PluginError('Deluge module required', log)
sclient.set_core_uri()
for entry in task.accepted:
try:
before = sclient.get_session_state()
except Exception as e:
(errno, msg) = e.args
raise plugin.PluginError('Could not communicate with deluge core. %s' % msg, log)
if task.options.test:
return
opts = {}
path = entry.get('path', config['path'])
if path:
try:
opts['download_location'] = os.path.expanduser(entry.render(path))
except RenderError as e:
log.error('Could not set path for %s: %s' % (entry['title'], e))
for fopt, dopt in self.options.iteritems():
value = entry.get(fopt, config.get(fopt))
if value is not None:
opts[dopt] = value
if fopt == 'ratio':
opts['stop_at_ratio'] = True
# check that file is downloaded
if not 'file' in entry:
entry.fail('file missing?')
continue
# see that temp file is present
if not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
log.debug('entry: %s' % entry)
log.debug('temp: %s' % ', '.join(os.listdir(tmp_path)))
entry.fail('Downloaded temp file \'%s\' doesn\'t exist!?' % entry['file'])
continue
sclient.add_torrent_file([entry['file']], [opts])
log.info('%s torrent added to deluge with options %s' % (entry['title'], opts))
movedone = entry.get('movedone', config['movedone'])
label = entry.get('label', config['label']).lower()
queuetotop = entry.get('queuetotop', config.get('queuetotop'))
# Sometimes deluge takes a moment to add the torrent, wait a second.
time.sleep(2)
after = sclient.get_session_state()
for item in after:
# find torrentid of just added torrent
if not item in before:
try:
movedone = entry.render(movedone)
except RenderError as e:
log.error('Could not set movedone for %s: %s' % (entry['title'], e))
movedone = ''
if movedone:
movedone = os.path.expanduser(movedone)
if not os.path.isdir(movedone):
log.debug('movedone path %s doesn\'t exist, creating' % movedone)
os.makedirs(movedone)
log.debug('%s move on complete set to %s' % (entry['title'], movedone))
sclient.set_torrent_move_on_completed(item, True)
sclient.set_torrent_move_on_completed_path(item, movedone)
if label:
if not 'label' in sclient.get_enabled_plugins():
sclient.enable_plugin('label')
if not label in sclient.label_get_labels():
sclient.label_add(label)
log.debug('%s label set to \'%s\'' % (entry['title'], label))
sclient.label_set_torrent(item, label)
if queuetotop:
log.debug('%s moved to top of queue' % entry['title'])
sclient.queue_top([item])
break
else:
log.info('%s is already loaded in deluge. Cannot change label, movedone, or queuetotop' %
entry['title'])
def on_connect_success(self, result, task, config):
"""Gets called when successfully connected to a daemon."""
from deluge.ui.client import client
from twisted.internet import reactor, defer
if not result:
log.debug('on_connect_success returned a failed result. BUG?')
if task.options.test:
log.debug('Test connection to deluge daemon successful.')
client.disconnect()
return
def format_label(label):
"""Makes a string compliant with deluge label naming rules"""
return re.sub('[^\w-]+', '_', label.lower())
def set_torrent_options(torrent_id, entry, opts):
"""Gets called when a torrent was added to the daemon."""
dlist = []
if not torrent_id:
log.error('There was an error adding %s to deluge.' % entry['title'])
# TODO: Fail entry? How can this happen still now?
return
log.info('%s successfully added to deluge.' % entry['title'])
entry['deluge_id'] = torrent_id
def create_path(result, path):
"""Creates the specified path if deluge is older than 1.3"""
from deluge.common import VersionSplit
# Before 1.3, deluge would not create a non-existent move directory, so we need to.
if VersionSplit('1.3.0') > VersionSplit(self.deluge_version):
if client.is_localhost():
if not os.path.isdir(path):
log.debug('path %s doesn\'t exist, creating' % path)
os.makedirs(path)
else:
log.warning('If path does not exist on the machine running the daemon, move will fail.')
if opts.get('movedone'):
dlist.append(version_deferred.addCallback(create_path, opts['movedone']))
dlist.append(client.core.set_torrent_move_completed(torrent_id, True))
dlist.append(client.core.set_torrent_move_completed_path(torrent_id, opts['movedone']))
log.debug('%s move on complete set to %s' % (entry['title'], opts['movedone']))
if opts.get('label'):
def apply_label(result, torrent_id, label):
"""Gets called after labels and torrent were added to deluge."""
return client.label.set_torrent(torrent_id, label)
dlist.append(label_deferred.addCallback(apply_label, torrent_id, opts['label']))
if opts.get('queuetotop') is not None:
if opts['queuetotop']:
dlist.append(client.core.queue_top([torrent_id]))
log.debug('%s moved to top of queue' % entry['title'])
else:
dlist.append(client.core.queue_bottom([torrent_id]))
log.debug('%s moved to bottom of queue' % entry['title'])
def on_get_torrent_status(status):
"""Gets called with torrent status, including file info.
Sets the torrent options which require knowledge of the current status of the torrent."""
main_file_dlist = []
# Determine where the file should be
move_now_path = None
if opts.get('movedone'):
if status['progress'] == 100:
move_now_path = opts['movedone']
else:
# Deluge will unset the move completed option if we move the storage, forgo setting proper
# path, in favor of leaving proper final location.
log.debug('Not moving storage for %s, as this will prevent movedone.' % entry['title'])
elif opts.get('path'):
move_now_path = opts['path']
if move_now_path and os.path.normpath(move_now_path) != os.path.normpath(status['save_path']):
main_file_dlist.append(version_deferred.addCallback(create_path, move_now_path))
log.debug('Moving storage for %s to %s' % (entry['title'], move_now_path))
main_file_dlist.append(client.core.move_storage([torrent_id], move_now_path))
if opts.get('content_filename') or opts.get('main_file_only'):
def file_exists():
# Checks the download path as well as the move completed path for existence of the file
if os.path.exists(os.path.join(status['save_path'], filename)):
return True
elif status.get('move_on_completed') and status.get('move_on_completed_path'):
if os.path.exists(os.path.join(status['move_on_completed_path'], filename)):
return True
else:
return False
for file in status['files']:
# Only rename file if it is > 90% of the content
if file['size'] > (status['total_size'] * 0.9):
if opts.get('content_filename'):
filename = opts['content_filename'] + os.path.splitext(file['path'])[1]
counter = 1
if client.is_localhost():
while file_exists():
# Try appending a (#) suffix till a unique filename is found
filename = ''.join([opts['content_filename'], '(', str(counter), ')',
os.path.splitext(file['path'])[1]])
counter += 1
else:
log.debug('Cannot ensure content_filename is unique '
'when adding to a remote deluge daemon.')
log.debug('File %s in %s renamed to %s' % (file['path'], entry['title'], filename))
main_file_dlist.append(
client.core.rename_files(torrent_id, [(file['index'], filename)]))
if opts.get('main_file_only'):
file_priorities = [1 if f['index'] == file['index'] else 0 for f in status['files']]
main_file_dlist.append(
client.core.set_torrent_file_priorities(torrent_id, file_priorities))
break
else:
log.warning('No files in %s are > 90%% of content size, no files renamed.' % entry['title'])
return defer.DeferredList(main_file_dlist)
status_keys = ['files', 'total_size', 'save_path', 'move_on_completed_path',
'move_on_completed', 'progress']
dlist.append(client.core.get_torrent_status(torrent_id, status_keys).addCallback(on_get_torrent_status))
return defer.DeferredList(dlist)
def on_fail(result, task, entry):
"""Gets called when daemon reports a failure adding the torrent."""
log.info('%s was not added to deluge! %s' % (entry['title'], result))
entry.fail('Could not be added to deluge')
# dlist is a list of deferreds that must complete before we exit
dlist = []
# loop through entries to get a list of labels to add
labels = set([format_label(entry['label']) for entry in task.accepted if entry.get('label')])
if config.get('label'):
labels.add(format_label(config['label']))
label_deferred = defer.succeed(True)
if labels:
# Make sure the label plugin is available and enabled, then add appropriate labels
def on_get_enabled_plugins(plugins):
"""Gets called with the list of enabled deluge plugins."""
def on_label_enabled(result):
""" This runs when we verify the label plugin is enabled. """
def on_get_labels(d_labels):
"""Gets available labels from deluge, and adds any new labels we need."""
dlist = []
for label in labels:
if not label in d_labels:
log.debug('Adding the label %s to deluge' % label)
dlist.append(client.label.add(label))
return defer.DeferredList(dlist)
return client.label.get_labels().addCallback(on_get_labels)
if 'Label' in plugins:
return on_label_enabled(True)
else:
# Label plugin isn't enabled, so we check if it's available and enable it.
def on_get_available_plugins(plugins):
"""Gets plugins available to deluge, enables Label plugin if available."""
if 'Label' in plugins:
log.debug('Enabling label plugin in deluge')
return client.core.enable_plugin('Label').addCallback(on_label_enabled)
else:
log.error('Label plugin is not installed in deluge')
return client.core.get_available_plugins().addCallback(on_get_available_plugins)
label_deferred = client.core.get_enabled_plugins().addCallback(on_get_enabled_plugins)
dlist.append(label_deferred)
def on_get_daemon_info(ver):
"""Gets called with the daemon version info, stores it in self."""
log.debug('deluge version %s' % ver)
self.deluge_version = ver
version_deferred = client.daemon.info().addCallback(on_get_daemon_info)
dlist.append(version_deferred)
def on_get_session_state(torrent_ids):
"""Gets called with a list of torrent_ids loaded in the deluge session.
Adds new torrents and modifies the settings for ones already in the session."""
dlist = []
# add the torrents
for entry in task.accepted:
def add_entry(entry, opts):
"""Adds an entry to the deluge session"""
magnet, filedump = None, None
if entry.get('url', '').startswith('magnet:'):
magnet = entry['url']
else:
if not os.path.exists(entry['file']):
entry.fail('Downloaded temp file \'%s\' doesn\'t exist!' % entry['file'])
del(entry['file'])
return
with open(entry['file'], 'rb') as f:
filedump = base64.encodestring(f.read())
log.verbose('Adding %s to deluge.' % entry['title'])
if magnet:
return client.core.add_torrent_magnet(magnet, opts)
else:
return client.core.add_torrent_file(entry['title'], filedump, opts)
# Generate deluge options dict for torrent add
add_opts = {}
try:
path = entry.render(entry.get('path', config['path']))
if path:
add_opts['download_location'] = pathscrub(os.path.expanduser(path))
except RenderError as e:
log.error('Could not set path for %s: %s' % (entry['title'], e))
for fopt, dopt in self.options.iteritems():
value = entry.get(fopt, config.get(fopt))
if value is not None:
add_opts[dopt] = value
if fopt == 'ratio':
add_opts['stop_at_ratio'] = True
# Make another set of options, that get set after the torrent has been added
modify_opts = {'label': format_label(entry.get('label', config['label'])),
'queuetotop': entry.get('queuetotop', config.get('queuetotop')),
'main_file_only': entry.get('main_file_only', config.get('main_file_only', False))}
try:
movedone = entry.render(entry.get('movedone', config['movedone']))
modify_opts['movedone'] = pathscrub(os.path.expanduser(movedone))
except RenderError as e:
log.error('Error setting movedone for %s: %s' % (entry['title'], e))
try:
content_filename = entry.get('content_filename', config.get('content_filename', ''))
modify_opts['content_filename'] = pathscrub(entry.render(content_filename))
except RenderError as e:
log.error('Error setting content_filename for %s: %s' % (entry['title'], e))
torrent_id = entry.get('deluge_id') or entry.get('torrent_info_hash')
torrent_id = torrent_id and torrent_id.lower()
if torrent_id in torrent_ids:
log.info('%s is already loaded in deluge, setting options' % entry['title'])
# Entry has a deluge id, verify the torrent is still in the deluge session and apply options
# Since this is already loaded in deluge, we may also need to change the path
modify_opts['path'] = add_opts.pop('download_location', None)
dlist.extend([set_torrent_options(torrent_id, entry, modify_opts),
client.core.set_torrent_options([torrent_id], add_opts)])
else:
dlist.append(add_entry(entry, add_opts).addCallbacks(
set_torrent_options, on_fail, callbackArgs=(entry, modify_opts), errbackArgs=(task, entry)))
return defer.DeferredList(dlist)
dlist.append(client.core.get_session_state().addCallback(on_get_session_state))
def on_complete(result):
"""Gets called when all of our tasks for deluge daemon are complete."""
client.disconnect()
tasks = defer.DeferredList(dlist).addBoth(on_complete)
def on_timeout(result):
"""Gets called if tasks have not completed in 30 seconds.
Should only happen when something goes wrong."""
log.error('Timed out while adding torrents to deluge.')
log.debug('dlist: %s' % result.resultList)
client.disconnect()
# Schedule a disconnect to happen if FlexGet hangs while connected to Deluge
# Leave the timeout long, to give time for possible lookups to occur
reactor.callLater(600, lambda: tasks.called or on_timeout(tasks))
def on_task_exit(self, task, config):
"""Make sure all temp files are cleaned up when task exits"""
# If download plugin is enabled, it will handle cleanup.
if not 'download' in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
def on_task_abort(self, task, config):
"""Make sure normal cleanup tasks still happen on abort."""
DelugePlugin.on_task_abort(self, task, config)
self.on_task_exit(task, config)
@event('plugin.register')
def register_plugin():
plugin.register(InputDeluge, 'from_deluge', api_ver=2)
plugin.register(OutputDeluge, 'deluge', api_ver=2)
| protomouse/Flexget | flexget/plugins/plugin_deluge.py | Python | mit | 37,776 |
import binascii
data = '''
D9 51 44 5C 65 D5 3D 7D C8 67 BC 68 C8 68 6F 3F
C8 64 3F 30 48 41 72 3F 75 C8 67 F4 68 48 B9 6E
7C C8 7F 3C 74 5C 74 3C 74 3C 5C 3C 74 3C 5C 77
48 FE E8 67 C8 49 48 48 48 48 48 48 48 48 48 48
71 43 00 00 00 00 00 00
'''
data = data.replace(' ', '')
data = data.replace('\n', '')
data = list(map(ord, binascii.unhexlify(data)))
def unbit(val, bitsize=8):
diff = []
for _ in range(bitsize):
diff.append(val & 1)
val = val >> 1
last = 0
acc = 0
for i in range(bitsize - 1, -1, -1):
now = last ^ diff[i]
acc = (acc << 1) ^ now
last = now
return acc
length = len(data)
for i in range(0, length-8, 8):
acc = 0
shift = 0
for j in range(8):
acc += data[i+j] << shift
shift += 8
acc = unbit(acc, 8 * 8)
for j in range(8):
data[i+j] = acc & 0xFF
acc = acc >> 8
for i in range(length):
data[i] = unbit(data[i])
print ''.join(map(chr, data))
| Qwaz/solved-hacking-problem | sciencewar/2018/ezbt/solver.py | Python | gpl-2.0 | 990 |
import os
import random
import string
import unittest
from fs.errors import ResourceNotFoundError
from fs.path import relpath
from fs.tempfs import TempFS
from fs.tests import FSTestCases
from fs.tests import ThreadingTestCases
from versioning_fs import VersioningFS
from versioning_fs.errors import VersionError
KB = 1024
MB = pow(1024, 2)
def generate_file(fs, path, size, generator=None):
with fs.open(path, 'wb') as f:
if generator is None:
text = '12345678'
else:
text = generator().next()
for _ in range(size/len(text)):
f.write(text)
def generate_user_files(fs, dir_path, count, size):
for _ in range(count):
path = os.path.join(dir_path, random_filename())
generate_file(fs, path, size)
def random_filename(size=20):
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
class BaseTest(unittest.TestCase):
def setUp(self):
rootfs = TempFS()
backup = TempFS(temp_dir=rootfs.getsyspath('/'))
self.fs = VersioningFS(rootfs, backup=backup, tmp=TempFS(),
testing={'time': 1})
def tearDown(self):
self.fs.close()
class BaseTimeSensitiveTest(unittest.TestCase):
"""The base class for tests that should not bypass the time settings for
rdiff-backup.
"""
def setUp(self):
rootfs = TempFS()
backup = TempFS(temp_dir=rootfs.getsyspath('/'))
self.fs = VersioningFS(rootfs, backup=backup, tmp=TempFS())
def tearDown(self):
self.fs.close()
class TestVersioningFS(FSTestCases, ThreadingTestCases, BaseTimeSensitiveTest):
maxDiff = None
class TestSnapshotAttributes(BaseTimeSensitiveTest):
"""Test meta data manipulation for the files involved in snapshots."""
def test_snapshot_file_versions(self):
# make sure no snapshot information exists yet
self.assert_all_files_have_snapshot_info(should_exist=False)
repeat_text = 'smartfile_versioning_rocks_\n'
def file_contents():
while True:
yield repeat_text
# generate file 1
file_name = random_filename()
generate_file(fs=self.fs, path=file_name, size=5*KB,
generator=file_contents)
# make sure each user file is version 1
self.assert_all_file_versions_equal(1)
# generate file 2
file_name = random_filename()
generate_file(fs=self.fs, path=file_name, size=5*KB,
generator=file_contents)
# make sure each user file is version 1
self.assert_all_file_versions_equal(1)
with self.fs.open(file_name, 'wb') as f:
f.write('hello world')
# check that the updated file is at version 2
self.assertEqual(self.fs.version(file_name), 2)
# not all of the files will be at the same version
with self.assertRaises(AssertionError):
self.assert_all_file_versions_equal(1)
# check that only one file was updated to version 1
self.fs.remove(file_name)
self.assert_all_file_versions_equal(1)
# make sure all files in the user folder have snapshot information
self.assert_all_files_have_snapshot_info(should_exist=True)
def test_file_version_timestamps(self):
"""Test version information for a specific path."""
file_name = random_filename()
with self.fs.open(file_name, 'wb') as f:
f.write('hello world\n')
self.assertEqual(len(self.fs.list_info(file_name).keys()), 1)
with self.fs.open(file_name, 'wb') as f:
f.write('hello world123\n')
with self.fs.open(file_name, 'wb') as f:
f.write('hello world123456\n')
version_info = self.fs.list_info(file_name)
dates = version_info.values()
for z in range(len(dates) - 1):
current_date = dates[z]
next_date = dates[z+1]
self.assertTrue(current_date <= next_date)
def test_file_version_sizes(self):
"""Test version sizes for a specific path."""
file_name = random_filename()
for _ in range(3):
with self.fs.open(file_name, 'wb') as f:
f.write(random_filename())
f.write('\n')
self.assertEqual(len(self.fs.list_sizes(file_name).keys()), 3)
def assert_all_file_versions_equal(self, version):
for path in self.fs.walkfiles('/'):
if not 'abcdefg' in path and 'tmp' not in path:
path = relpath(path)
file_version = self.fs.version(path)
self.assertEqual(file_version, version)
def assert_all_files_have_snapshot_info(self, should_exist=True):
for path in self.fs.walkfiles('/'):
if not 'abcdefg' in path and 'tmp' not in path:
path = relpath(path)
snapshot_info_exists = self.fs.has_snapshot(path)
self.assertEqual(snapshot_info_exists, should_exist)
class TestFileVersions(BaseTest):
"""Test file versions."""
def test_single_file_write(self):
file_name = random_filename()
f = self.fs.open(file_name, 'wb')
f.write('smartfile_versioning_rocks\n')
f.close()
# check that version 1 was created
self.assertEqual(self.fs.version(file_name), 1)
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.read(), 'smartfile_versioning_rocks\n')
f.close()
# make some changes to the file and check for version increment
f = self.fs.open(file_name, 'wb')
f.writelines("hello world!\nhello world!")
f.close()
self.assertEqual(self.fs.version(file_name), 2)
# check the contents when we open the file
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.readlines(), ["hello world!\n", "hello world!"])
f.close()
# make sure the version has not been updated since reading
self.assertEqual(self.fs.version(file_name), 2)
def test_single_file_append(self):
file_name = random_filename()
f = self.fs.open(file_name, 'ab')
f.write('smartfile_versioning_rocks\n')
f.close()
# check that version 1 was created
self.assertEqual(self.fs.version(file_name), 1)
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.read(), 'smartfile_versioning_rocks\n')
f.close()
# make some changes to the file and check for version increment
f = self.fs.open(file_name, 'ab')
f.writelines("hello world!\nhello world!")
f.close()
self.assertEqual(self.fs.version(file_name), 2)
# check the contents when we open the file
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.readlines(), ['smartfile_versioning_rocks\n',
"hello world!\n", "hello world!"])
f.close()
# make sure the version has not been updated since reading
self.assertEqual(self.fs.version(file_name), 2)
def test_open_old_version(self):
file_name = random_filename()
f = self.fs.open(file_name, 'wb')
f.write("smartfile")
f.close()
f = self.fs.open(file_name, 'wb')
f.write("smartfile versioning")
f.close()
f = self.fs.open(file_name, 'wb')
f.write("smartfile versioning rocks")
f.close()
# now try opening previous versions of the file and check content
f = self.fs.open(file_name, 'rb', version=1)
self.assertEqual(f.read(), "smartfile")
f.close()
f = self.fs.open(file_name, 'rb', version=2)
self.assertEqual(f.read(), "smartfile versioning")
f.close()
f = self.fs.open(file_name, 'rb', version=3)
self.assertEqual(f.read(), "smartfile versioning rocks")
f.close()
# the file version has not changed since we only read the version
self.assertEqual(self.fs.version(file_name), 3)
def test_bad_version(self):
repeat_text = 'smartfile_versioning_rocks_\n'
def file_contents():
while True:
yield repeat_text
# generate file 1
file_name = random_filename()
generate_file(fs=self.fs, path=file_name, size=5*KB,
generator=file_contents)
# version 0 should never exist
with self.assertRaises(ResourceNotFoundError):
self.fs.open(file_name, 'rb', version=0)
# version 2 has not been created yet
with self.assertRaises(ResourceNotFoundError):
self.fs.open(file_name, 'rb', version=2)
def test_skip_version_snapshot(self):
"""
Test opening a file but setting 'take_snapshot' to False.
A version should not be created.
"""
file_name = random_filename()
f = self.fs.open(file_name, 'wb', take_snapshot=False)
f.write('smartfile_versioning_rocks\n')
f.close()
# check that version 1 was not created
self.assertEqual(self.fs.version(file_name), 0)
class TestVersionDeletion(BaseTimeSensitiveTest):
"""Test the deletion of older versions."""
def test_delete_older_versions(self):
file_name = random_filename()
iterations = 5
# generate some files
for _ in range(iterations):
with self.fs.open(file_name, 'wb') as f:
f.write(random_filename())
# try a bad version: remove versions before 1
with self.assertRaises(VersionError):
self.fs.remove_versions_before(file_name, version=1)
# try a bad version: remove versions after the current+1
with self.assertRaises(VersionError):
invalid_version = iterations + 1
self.fs.remove_versions_before(file_name, version=invalid_version)
# try a bad version: use an invalid time format
with self.assertRaises(VersionError):
invalid_version = "3/4/1998T13:00"
self.fs.remove_versions_before(file_name, version=invalid_version)
# look at the time of version 2 and delete anything older than it
self.fs.remove_versions_before(path=file_name, version=2)
# we deleted versions older than 2 which deleted version 1
total_versions = self.fs.version(file_name)
self.assertEqual(total_versions, 4)
# try deleting with a timestamp string rather than version number
delete_date = self.fs.list_info(file_name)[2]
self.fs.remove_versions_before(path=file_name, version=delete_date)
# we deleted versions before the date of the second version
total_versions = self.fs.version(file_name)
self.assertEqual(total_versions, 3)
# try deleting a version with a string that is also a digit
self.fs.remove_versions_before(path=file_name, version=u'2')
# we deleted versions older than 2 which deleted version 1
total_versions = self.fs.version(file_name)
self.assertEqual(total_versions, 2)
class TestRdiffBackupSleep(BaseTimeSensitiveTest):
"""Rdiff backup cannot make two snapshots within 1 second.
This test checks that the filewrapper sleeps for 1 second before
trying to make a snapshot.
"""
def test_quick_file_changes(self):
# test two file edits within 1 second
file_name = random_filename()
iterations = 3
for _ in range(iterations):
with self.fs.open(file_name, 'wb') as f:
f.write(random_filename())
self.assertEqual(self.fs.version(file_name), iterations)
class TestFileOperations(BaseTest):
"""Test fs.move, fs.movedir, fs.remove, and fs.removedir"""
def test_move_single_file(self):
"""Move a single file, which should also move its backups."""
# have 2 versions of a file we create
file_name = random_filename()
contents = ["smartfile", "smartfile versioning"]
for content in contents:
with self.fs.open(file_name, 'wb') as f:
f.write(content)
# move the file somewhere else
new_filename = random_filename()
self.fs.move(file_name, new_filename)
# check if versioning is still available
for version, content in enumerate(contents):
with self.fs.open(new_filename, 'rb', version=version+1) as f:
self.assertEqual(f.read(), contents[version])
def test_move_file_into_directory(self):
"""Move a file into a directory and check that backups were moved."""
file_name = random_filename()
dir_name = random_filename()
file_path = os.path.join(dir_name, file_name)
contents = ["smartfile", "smartfile versioning",
"smartfile versioning rocks"]
for content in contents:
with self.fs.open(file_name, 'wb') as f:
f.write(content)
# create a directory for the file to be moved into
self.fs.makedir(dir_name)
# move the file into the directory
self.fs.move(file_name, file_path)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file_path))
for version, content in enumerate(contents):
f = self.fs.open(file_path, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_move_directory(self):
"""Move a directory and check that backups were moved."""
file1_name = random_filename()
dir1_name = random_filename()
dir2_name = random_filename()
file1_full_path = os.path.join(dir1_name, file1_name)
file1_new_full_path = os.path.join(dir2_name, file1_name)
# create a directory for the file we are going to create
self.fs.makedir(dir1_name)
contents = ["smartfile", "smartfile versioning"]
for content in contents:
with self.fs.open(file1_full_path, 'wb') as f:
f.write(content)
# move the directory
self.fs.movedir(dir1_name, dir2_name)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file1_new_full_path))
for version, content in enumerate(contents):
f = self.fs.open(file1_new_full_path, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_rename_file(self):
"""Rename a file and check that backups were moved."""
file_name = random_filename()
file2_name = random_filename()
contents = ["smartfile", "smartfile versioning",
"smartfile versioning rocks"]
for content in contents:
with self.fs.open(file_name, 'wb') as f:
f.write(content)
# Rename the file
self.fs.rename(file_name, file2_name)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file2_name))
for version, content in enumerate(contents):
f = self.fs.open(file2_name, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_rename_directory(self):
"""Rename a directory and check that backups were moved."""
file1_name = random_filename()
dir1_name = random_filename()
dir2_name = random_filename()
file1_full_path = os.path.join(dir1_name, file1_name)
file1_new_full_path = os.path.join(dir2_name, file1_name)
# create a directory for the file we are going to create
self.fs.makedir(dir1_name)
contents = ["smartfile", "smartfile versioning"]
for content in contents:
with self.fs.open(file1_full_path, 'wb') as f:
f.write(content)
# move the directory
self.fs.rename(dir1_name, dir2_name)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file1_new_full_path))
for version, content in enumerate(contents):
f = self.fs.open(file1_new_full_path, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_remove_single_file(self):
"""Remove a single file along with its backups."""
file_name = random_filename()
with self.fs.open(file_name, 'wb') as f:
f.write("smartfile")
self.fs.remove(file_name)
self.assertFalse(self.fs.has_snapshot(file_name))
def test_remove_single_dir(self):
"""Remove a single dir along with its backups."""
dir_name = random_filename()
self.fs.makedir(dir_name)
files = [random_filename() for x in range(4)]
paths = [os.path.join(dir_name, path) for path in files]
for path in paths:
for _ in range(2):
with self.fs.open(path, 'wb') as f:
f.write('hello world')
self.fs.removedir(dir_name, force=True)
for path in paths:
self.assertTrue(not self.fs.has_snapshot(path))
if __name__ == "__main__":
unittest.main()
| smartfile/file-versioning | tests.py | Python | mit | 17,361 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from setuptools import setup
import unittest
def test_suite():
return unittest.TestLoader().discover('tests', pattern='test_*.py')
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='spdx-tools',
version='0.7.0a3',
description='SPDX parser and tools.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=[
'spdx',
'spdx.parsers',
'spdx.parsers.lexers',
'spdx.writers',
'examples',
],
include_package_data=True,
zip_safe=False,
test_suite='setup.test_suite',
install_requires=[
'ply',
'rdflib',
'click',
'pyyaml',
'xmltodict',
],
python_requires='>=3.6',
entry_points={
'console_scripts': [
'convertor = spdx.cli_tools.convertor:main',
'parser = spdx.cli_tools.parser:main',
],
},
author='Ahmed H. Ismail',
author_email='ahm3d.hisham@gmail.com',
maintainer='Philippe Ombredanne, SPDX group at the Linux Foundation and others',
maintainer_email='pombredanne@gmail.com',
url='https://github.com/spdx/tools-python',
license='Apache-2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
| spdx/tools-python | setup.py | Python | apache-2.0 | 1,592 |
#
# Spongioblast - A desktop media metadata service.
# Copyright (C) 2015 Michael Gratton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gi
import os
import signal
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, GLib, Gtk
from gi_composites import GtkTemplate
class Spongioblast(Gtk.Application):
"""Main GUI application class.
"""
__gtype_name__ = 'Spongioblast'
APPLICATION_NAME = _('Spongioblast')
APPLICATION_ID = "net.vee.Spongioblast"
PROGRAM_NAME = 'spongioblast'
LOGO_ICON_NAME = 'media-tape-symbolic'
VERSION = '0.1'
COPYRIGHT = 'Copyright © 2015 Michael Gratton'
WEBSITE = 'http://vee.net/projects/spongioblast'
def __init__(self):
Gtk.Application.__init__(
self,
application_id=self.APPLICATION_ID,
flags=Gio.ApplicationFlags.FLAGS_NONE
)
GLib.set_application_name(self.APPLICATION_NAME)
GLib.set_prgname(self.PROGRAM_NAME)
self.settings = None
self.connect('startup', self._startup_cb)
self.connect('activate', self._activate_cb)
# quit on appropriate signals
for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGHUP):
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, sig, self.quit)
def _startup_cb(self, user_data):
#self.settings = Gio.Settings.new(self.APPLICATION_ID)
#cssProviderFile = Gio.File.new_for_uri(
# 'resource:///net/vee/Spongioblast/application.css'
#)
#cssProvider = Gtk.CssProvider()
#cssProvider.load_from_file(cssProviderFile)
#styleContext = Gtk.StyleContext()
#styleContext.add_provider_for_screen(screen, cssProvider,
# Gtk.STYLE_PROVIDER_PRIORITY_USER)
action = Gio.SimpleAction(name='quit', parameter_type=None)
action.connect('activate', lambda action, param: self.quit())
self.add_action(action)
action = Gio.SimpleAction(name='about', parameter_type=None)
action.connect('activate', lambda action, param: self.show_about())
self.add_action(action)
# user data directory
self.user_data_dir = os.path.join(GLib.get_user_data_dir(), self.PROGRAM_NAME)
self.to_user_data_dir() # ensure it exists
def _activate_cb(self, user_data):
window = MainWindow(self)
window.show_all()
def to_user_data_dir(self, suffix=None):
"""
Returns the user data directory as a Gio.GFile
This method ensures the diretcory is created it it does not
already exist.
@param suffix: optional directory suffix to be added to the end of
the user directory
@return the path to an exit sing user directory
"""
path = self.user_data_dir
if suffix:
path = os.path.join(path, suffix)
if not os.path.isdir(path):
os.makedirs(path)
return Gio.File.new_for_path(path)
def to_app_data_dir(self, suffix=None):
"""Returns the application's data directory with optional suffix.
This method does not ensure the diretcory is created if it
does not already exist.
@param suffix: optional directory suffix to be added to the end of
the app data directory
@return the path to the app data directory
"""
path = 'res'
if suffix:
path = os.path.join(path, suffix)
return path
def show_about(self, parent=None):
about = Gtk.AboutDialog(
parent=self.get_active_window(),
name=self.APPLICATION_NAME,
logo_icon_name=self.LOGO_ICON_NAME,
version=self.VERSION,
copyright=self.COPYRIGHT,
license_type=Gtk.License.GPL_3_0,
website=self.WEBSITE
)
about.run()
about.hide()
about.destroy()
@GtkTemplate(ui='/net/vee/Spongioblast/main-window.ui')
class MainWindow(Gtk.ApplicationWindow):
"""Window displayed to the user on normal application startup.
"""
stack = GtkTemplate.Child()
def __init__(self, application):
super(Gtk.ApplicationWindow, self).__init__(application=application)
self.init_template()
self.stack.add_titled(ListBox(), 'services-box', _('Services'))
self.stack.add_titled(ListBox(), 'applications-box', _('Applications'))
@GtkTemplate(ui='/net/vee/Spongioblast/list-box.ui')
class ListBox(Gtk.Box):
"""A box widget containing a ListBox and Toolbar for manipulating it.
"""
def __init__(self):
super(Gtk.Box, self).__init__()
self.init_template()
| mjog/spongioblast | spongioblast/application.py | Python | gpl-3.0 | 5,285 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import subprocess, os
def is_git_available():
return (subprocess.call(['which', 'git'], stdout=open(os.devnull, 'wb')) is 0)
# -----------------------------------------------------------------------------
def git_show(format, ref='HEAD'):
""" git_show
returns a string containing the output from git show
"""
r = subprocess.check_output(['git', '--no-pager', 'show', ref, '--quiet', '--pretty=format:"' + format + '"'])
# only use first line, because quiet does not seem to have the expected
# results for older git versions
return r.split('\n', 1)[0][1:-1]
def git_config(key):
""" git_show
returns a string containing the output from git config
returns an empty string if the command fails
"""
try:
return subprocess.check_output(['git', 'config', key]).split('\n', 1)[0]
except subprocess.CalledProcessError:
return ""
def git_info_header(env):
defines = {}
try:
# Last Commit Values
defines['XPCC_GIT_SHA'] = env.CStringLiteral(git_show('%H'))
defines['XPCC_GIT_SHA_ABBR'] = env.CStringLiteral(git_show('%h'))
defines['XPCC_GIT_AUTHOR'] = env.CStringLiteral(git_show('%an'))
defines['XPCC_GIT_AUTHOR_EMAIL'] = env.CStringLiteral(git_show('%ae'))
defines['XPCC_GIT_AUTHOR_DATE'] = env.CStringLiteral(git_show('%ad'))
defines['XPCC_GIT_AUTHOR_DATE_TIMESTAMP'] = git_show('%at')
defines['XPCC_GIT_COMMITTER'] = env.CStringLiteral(git_show('%cn'))
defines['XPCC_GIT_COMMITTER_EMAIL'] = env.CStringLiteral(git_show('%ce'))
defines['XPCC_GIT_COMMITTER_DATE'] = env.CStringLiteral(git_show('%cd'))
defines['XPCC_GIT_COMMITTER_DATE_TIMESTAMP'] = git_show('%ct')
defines['XPCC_GIT_SUBJECT'] = env.CStringLiteral(git_show('%s'))
# Git Config
defines['XPCC_GIT_CONFIG_USER_NAME'] = env.CStringLiteral(git_config('user.name'))
defines['XPCC_GIT_CONFIG_USER_EMAIL'] = env.CStringLiteral(git_config('user.email'))
# Status
s = subprocess.check_output(['git', '--no-pager', 'status', '--porcelain']).split('\n')
f = { 'M': 0, 'A': 0, 'D': 0, 'R': 0, 'C': 0, 'U': 0, '?': 0}
for line in s:
if len(line.strip()) > 0:
c = line.strip()[0]
f[c] = f[c] + 1
defines['XPCC_GIT_MODIFIED'] = f['M']
defines['XPCC_GIT_ADDED'] = f['A']
defines['XPCC_GIT_DELETED'] = f['D']
defines['XPCC_GIT_RENAMED'] = f['R']
defines['XPCC_GIT_COPIED'] = f['C']
defines['XPCC_GIT_UPDATED_NOT_MERGED'] = f['U']
defines['XPCC_GIT_UNTRACKED'] = f['?']
except subprocess.CalledProcessError as e:
env.Error('failed to run git command: %s' % e)
c = "Its content is created by a call to env.GitInfoHeader() in your SConstruct file."
env.DefineHeader(defines=defines, header="xpcc_git_info.hpp", comment=c)
def generate(env, **kw):
env.AddMethod(git_info_header, 'GitInfoHeader')
def exists(env):
return is_git_available()
| chrism333/xpcc | scons/site_tools/git.py | Python | bsd-3-clause | 3,191 |
import json
import socket
import collections
def parse_knx_address(address):
"""Parse physical/individual KNX address.
Address structure (A=Area, L=Line, B=Bus device):
--------------------
|AAAA|LLLL|BBBBBBBB|
--------------------
4 Bit|4 Bit| 8 Bit
parse_knx_address(99999)
'8.6.159'
"""
assert isinstance(address, int), 'Address should be an integer, got %s instead' % type(address)
return '{}.{}.{}'.format((address >> 12) & 0xf, (address >> 8) & 0xf, address & 0xff)
def pack_knx_address(address):
"""Pack physical/individual KNX address.
pack_knx_address('15.15.255')
65535
"""
assert isinstance(address, str), 'Address should be a string, got %s instead' % type(address)
parts = address.split('.')
return (int(parts[0]) << 12) + (int(parts[1]) << 8) + (int(parts[2]))
def parse_knx_group_address(address):
"""Parse KNX group address.
parse_knx_group_address(12345)
'6/0/57'
"""
assert isinstance(address, int), 'Address should be an integer, got %s instead' % type(address)
return '{}/{}/{}'.format((address >> 11) & 0x1f, (address >> 8) & 0x7, address & 0xff)
def pack_knx_group_address(address):
"""Pack KNX group address.
pack_knx_group_address('6/0/57')
12345
"""
assert isinstance(address, str), 'Address should be a string, got %s instead' % type(address)
parts = address.split('/')
return (int(parts[0]) << 11) + (int(parts[1]) << 8) + (int(parts[2]))
def parse_knx_device_serial(address):
"""Parse a KNX device serial to human readable format.
parse_knx_device_serial(b'\x00\x00\x00\x00\X12\x23')
'000000005C58'
"""
assert isinstance(address, bytes), 'Address should be bytes, got %s instead' % type(address)
return '{0:02X}{1:02X}{2:02X}{3:02X}{4:02X}{5:02X}'.format(*address)
def parse_mac_address(address):
"""Parse a MAC address to human readable format.
parse_mac_address(b'\x12\x34\x56\x78\x90\x12')
'12:34:56:78:90:12'
"""
assert isinstance(address, bytes), 'Address should be bytes, got %s instead' % type(address)
return '{0:02X}:{1:02X}:{2:02X}:{3:02X}:{4:02X}:{5:02X}'.format(*address)
def parse_device_descriptor(desc):
"""Parse device descriptors to three separate integers.
parse_device_descriptor(1793)
(0, 112, 1)
"""
assert isinstance(desc, int), 'Device descriptor is not an integer, got %s instead' % type(desc)
desc = format(desc, '04x')
medium = int(desc[0])
dev_type = int(desc[1:-1], 16)
version = int(desc[-1])
return medium, dev_type, version
def unpack_ip_address(address):
return socket.inet_aton(address)
def get_manufacturer_by_id(mid):
assert isinstance(mid, int)
with open('knxmap/data/manufacturers.json', 'rb') as f:
m = json.load(f)
for _m in m.get('manufacturers'):
if int(_m.get('knx_manufacturer_id')) == mid:
return _m.get('name')
def make_runstate_printable(runstate):
_runstate = collections.OrderedDict()
if isinstance(runstate, bytes):
runstate = unpack_cemi_runstate(runstate)
for k, v in runstate.items():
if k == 'PROG_MODE':
_runstate['Programming Mode'] = 'ENABLED' if v else 'disabled'
elif k == 'LINK_LAYER':
_runstate['Link Layer'] = 'ENABLED' if v else 'disabled'
elif k == 'TRANSPORT_LAYER':
_runstate['Transport Layer'] = 'ENABLED' if v else 'disabled'
elif k == 'APP_LAYER':
_runstate['Application Layer'] = 'ENABLED' if v else 'disabled'
elif k == 'SERIAL_INTERFACE':
_runstate['Serial Interface'] = 'ENABLED' if v else 'disabled'
elif k == 'USER_APP':
_runstate['User Application'] = 'ENABLED' if v else 'disabled'
elif k == 'BC_DM':
_runstate['BC DM'] = v
return _runstate
def unpack_cemi_runstate(data):
"""Parse runstate field to a dict."""
if isinstance(data, bytes):
data = int.from_bytes(data, 'big')
state = collections.OrderedDict()
state['PROG_MODE'] = (data >> 0) & 1
state['LINK_LAYER'] = (data >> 1) & 1
state['TRANSPORT_LAYER'] = (data >> 2) & 1
state['APP_LAYER'] = (data >> 3) & 1
state['SERIAL_INTERFACE'] = (data >> 4) & 1
state['USER_APP'] = (data >> 5) & 1
state['BC_DM'] = (data >> 6) & 1
# We don't really care about the parity
# state['parity'] = (data >> 7) & 1
return state
| ernw/knxmap | knxmap/utils.py | Python | gpl-3.0 | 4,490 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enable Layer-wise Adaptive Rate Scaling optimizer in ResNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tf2_common.utils.mlp_log import mlp_log
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
FLAGS = flags.FLAGS
def define_lars_flags():
"""Defines flags needed by LARS optimizer."""
flags.DEFINE_float(
'end_learning_rate',
default=None,
help=('Polynomial decay end learning rate.'))
flags.DEFINE_float(
'lars_epsilon', default=0.0, help=('Override autoselected LARS epsilon.'))
flags.DEFINE_float(
'warmup_epochs',
default=None,
help=('Override autoselected polynomial decay warmup epochs.'))
flags.DEFINE_float(
'momentum',
default=0.9,
help=('Momentum parameter used in the MomentumOptimizer.'))
class PolynomialDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay with warmup."""
def __init__(self,
batch_size,
steps_per_epoch,
train_steps,
initial_learning_rate=None,
end_learning_rate=None,
warmup_epochs=None,
compute_lr_on_cpu=False,
name=None):
"""Applies a polynomial decay to the learning rate with warmup."""
super(PolynomialDecayWithWarmup, self).__init__()
self.batch_size = batch_size
self.steps_per_epoch = steps_per_epoch
self.train_steps = train_steps
self.name = name
self.learning_rate_ops_cache = {}
self.compute_lr_on_cpu = compute_lr_on_cpu
if batch_size < 16384:
self.initial_learning_rate = 10.0
warmup_epochs_ = 5
elif batch_size < 32768:
self.initial_learning_rate = 25.0
warmup_epochs_ = 5
else:
self.initial_learning_rate = 31.2
warmup_epochs_ = 25
# Override default poly learning rate and warmup epochs
if initial_learning_rate:
self.initial_learning_rate = initial_learning_rate
if end_learning_rate:
self.end_learning_rate = end_learning_rate
else:
self.end_learning_rate = 0.0001
if warmup_epochs is not None:
warmup_epochs_ = warmup_epochs
self.warmup_epochs = warmup_epochs_
opt_name = FLAGS.optimizer.lower()
mlp_log.mlperf_print('opt_name', opt_name)
if opt_name == 'lars':
mlp_log.mlperf_print('{}_epsilon'.format(opt_name), FLAGS.lars_epsilon)
mlp_log.mlperf_print('{}_opt_weight_decay'.format(opt_name),
FLAGS.weight_decay)
mlp_log.mlperf_print('{}_opt_base_learning_rate'.format(opt_name),
self.initial_learning_rate)
mlp_log.mlperf_print('{}_opt_learning_rate_warmup_epochs'.format(opt_name),
warmup_epochs_)
mlp_log.mlperf_print('{}_opt_end_learning_rate'.format(opt_name),
self.end_learning_rate)
warmup_steps = warmup_epochs_ * steps_per_epoch
self.warmup_steps = tf.cast(warmup_steps, tf.float32)
self.decay_steps = train_steps - warmup_steps + 1
mlp_log.mlperf_print('{}_opt_learning_rate_decay_steps'.format(opt_name),
int(self.decay_steps))
mlp_log.mlperf_print(
'{}_opt_learning_rate_decay_poly_power'.format(opt_name), 2.0)
mlp_log.mlperf_print('{}_opt_momentum'.format(opt_name), FLAGS.momentum)
self.poly_rate_scheduler = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=self.initial_learning_rate,
decay_steps=self.decay_steps,
end_learning_rate=self.end_learning_rate,
power=2.0)
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
with ops.name_scope_v2(self.name or 'PolynomialDecayWithWarmup') as name:
initial_learning_rate = ops.convert_to_tensor_v2(
self.initial_learning_rate, name='initial_learning_rate')
warmup_steps = ops.convert_to_tensor_v2(
self.warmup_steps, name='warmup_steps')
warmup_rate = (
initial_learning_rate * step / warmup_steps)
poly_steps = math_ops.subtract(step, warmup_steps)
poly_rate = self.poly_rate_scheduler(poly_steps)
decay_rate = tf.where(step <= warmup_steps,
warmup_rate, poly_rate, name=name)
return decay_rate
def get_config(self):
return {
'batch_size': self.batch_size,
'steps_per_epoch': self.steps_per_epoch,
'train_steps': self.train_steps,
'initial_learning_rate': self.initial_learning_rate,
'end_learning_rate': self.end_learning_rate,
'warmup_epochs': self.warmup_epochs,
'name': self.name,
}
| mlperf/training_results_v0.7 | Google/benchmarks/resnet/implementations/resnet-cloud-TF2.0-tpu-v3-32/lars_util.py | Python | apache-2.0 | 6,225 |
import json
from collections import OrderedDict
from dc_base_scrapers.common import (
format_json,
get_data_from_url,
save,
sync_file_to_github
)
class CkanScraper:
def __init__(self, base_url, council_id, dataset, return_format, extra_fields, encoding):
self.url = None
self.council_id = council_id
self.base_url = base_url
self.dataset = dataset
self.return_format = return_format
self.extra_fields = extra_fields
self.encoding = encoding
def get_data(self): # pragma: no cover
data_str = get_data_from_url(self.url)
data = json.loads(data_str.decode(self.encoding))
return (data_str, data)
def scrape(self):
self.url = "%s%s" % (self.base_url, self.dataset)
return_url = None
# load json
data_str, data = self.get_data()
print(
"found %i %s resources" %
(len(data['result']['resources']), self.dataset)
)
for resource in data['result']['resources']:
# assemble record
record = {
'format': resource['format'],
'revision_id': resource['revision_id'],
'created': resource['created'],
'url': resource['url'],
'dataset': self.dataset,
}
for field in self.extra_fields:
record[field] = resource[field]
# save to db
save(['dataset', 'revision_id', 'format'], record, 'resources')
if resource['format'].lower() == self.return_format.lower():
return_url = resource['url']
sync_file_to_github(
self.council_id,
self.dataset,
format_json(data_str.decode(self.encoding))
)
return return_url
| wdiv-scrapers/dc-base-scrapers | dc_base_scrapers/ckan_scraper.py | Python | mit | 1,840 |
# -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
from cfme.rest.gen_data import a_provider as _a_provider
from cfme.rest.gen_data import vm as _vm
from cfme.utils import error
from cfme.utils.wait import wait_for
pytestmark = [test_requirements.provision]
@pytest.fixture(scope="function")
def a_provider(request):
return _a_provider(request)
@pytest.fixture(scope="function")
def vm_name(request, a_provider, appliance):
return _vm(request, a_provider, appliance.rest_api)
@pytest.mark.tier(3)
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_vm_from_detail(vm_name, appliance, method):
status = 204 if method == 'delete' else 200
vm = appliance.rest_api.collections.vms.get(name=vm_name)
vm.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == status
wait_for(
lambda: not appliance.rest_api.collections.vms.find_by(name=vm_name), num_sec=300, delay=10)
with error.expected('ActiveRecord::RecordNotFound'):
vm.action.delete(force_method=method)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.tier(3)
def test_delete_vm_from_collection(vm_name, appliance):
vm = appliance.rest_api.collections.vms.get(name=vm_name)
collection = appliance.rest_api.collections.vms
collection.action.delete(vm)
assert appliance.rest_api.response.status_code == 200
wait_for(lambda: not collection.find_by(name=vm_name), num_sec=300, delay=10)
with error.expected('ActiveRecord::RecordNotFound'):
collection.action.delete(vm)
assert appliance.rest_api.response.status_code == 404
| okolisny/integration_tests | cfme/tests/infrastructure/test_vm_rest.py | Python | gpl-2.0 | 1,682 |
import cherrypy
# 這是 C2G12 類別的定義
class C2G12(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014C2 協同專案下的 c2g12 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="fillpoly">c2g12 fillpoly 繪圖</a><br />
<a href="drawline">c2g12 drawline 繪圖</a><br />
<a href="drawsquare">c2g12 drawsquare 繪圖</a><br />
<a href="drawstar">c2g12 drawstar 繪圖</a><br />
<a href="triangle">c2g12 triangle 繪圖</a><br />
<a href="triangle2">c2g12 triangle2 繪圖</a><br />
<a href="japan">c2g12 japan 繪圖</a><br />
<a href="usa">c2g12 usa 繪圖</a><br />
'''
return outstring
# 以下為 c2g12 組所建立的 CherryPy 程式方法, 這裡的 fillpoly 利用 Brython 執行網際繪圖
'''
假如採用下列規畫
import programs.c2g12 as c2g12
root.c2g12 = c2g12.C2G12()
則程式啟動後, 可以利用 /c2g12/fillpoly 呼叫函式執行
'''
@cherrypy.expose
def fillpoly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入數學模組的所有方法
from math import *
# 導入時間模組
import time
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義座標轉換(0, 0) 到 (75, 20)
def change_ref_system(x, y):
return (20 + x * 8, 420 - y * 20)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(75,50)
ctx.lineTo(100,75)
ctx.lineTo(100,25)
ctx.fill()
def star():
ctx.beginPath()
ctx.moveTo(0,50)
ctx.lineTo(11,16)
ctx.lineTo(48,16)
ctx.fill()
ctx.fillStyle = "blue"
fill()
star()
x1, y1 = change_ref_system(0, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="blue")
x1, y1 = change_ref_system(70, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="red")
</script>
</body>
</html>
'''
return outstring
'''
假如採用下列規畫
import programs.c2g12 as c2g12
root.c2g12 = c2g12.C2G12()
則程式啟動後, 可以利用 /c2g12/drawline 呼叫函式執行
'''
@cherrypy.expose
def drawline(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(0, 0, 100, 100)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def drawsquare(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(300,300, 500,300)
draw_line(500, 300, 500, 500)
draw_line(500, 500, 300, 500)
draw_line(300, 500, 300, 300)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def drawstar(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(300, 300, 358.78, 342.71)
draw_line(358.78, 342.71, 417.56, 300)
draw_line(417.56, 300, 395.11, 369.1)
draw_line(395.11, 369.1, 453.88, 411.8)
draw_line(453.88, 411.8, 381.23, 411.8)
draw_line(381.23, 411.8, 358.78, 480.9)
draw_line(358.78, 480.9, 336.33, 411.8)
draw_line(336.33, 411.8, 263.67, 411.8)
draw_line(263.67, 411.8, 322.45, 369.1)
draw_line(322.45, 369.1, 300, 300)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def triangle2(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "blue"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(100,100)
ctx.lineTo(150,250)
ctx.lineTo(400,400)
ctx.fill()
ctx.fillStyle = "red"
fill()
draw_line(100, 100, 150 , 250)
draw_line(150, 250 ,400 , 400)
draw_line(400, 400, 100 , 100)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def japan(self, *args, **kwargs):
'''
原始程式來源: http://blog.roodo.com/esabear/archives/19215194.html
改寫為 Brython 程式
'''
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="300" height="200"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import math
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 canvas.height 單位光點
# ctx.setTransform(1, 0, 0, -1, 0, canvas.height)
# 以下採用 canvas 原始座標繪圖
flag_w = canvas.width
flag_h = canvas.height
square_x = flag_w/2
square_y = flag_h/2
circle_x = flag_w/2
circle_y = flag_h/2
#黑色邊框
ctx.fillStyle='rbg(0, 0, 0)'
ctx.fillRect(0,0,flag_w,flag_h)
#國旗白底
ctx.fillStyle='#fff'
ctx.fillRect(flag_w/20, flag_h/20, flag_w/1.111, flag_h/1.111)
ctx.beginPath()
ctx.arc(circle_x, circle_y, flag_w/6, 0, math.pi*2, true)
ctx.closePath()
# 填色設為紅色
ctx.fillStyle = 'rgb(255, 0, 0)'
ctx.fill()
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def usa(self, *args, **kwargs):
'''
原始程式來源: http://blog.roodo.com/esabear/archives/19215194.html
改寫為 Brython 程式
'''
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="300" height="200"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import math
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 canvas.height 單位光點
# ctx.setTransform(1, 0, 0, -1, 0, canvas.height)
# 以下採用 canvas 原始座標繪圖
flag_w = canvas.width
flag_h = canvas.height
square_x = flag_w/2
square_y = flag_h/2
circle_x = flag_w/2
circle_y = flag_h/2
#黑色邊框
ctx.fillStyle='rbg(0, 0, 0)'
ctx.fillRect(0,0,flag_w,flag_h)
#國旗白底
ctx.fillStyle='#fff'
ctx.fillRect(flag_w/20, flag_h/20, flag_w/1.111, flag_h/1.111)
ctx.beginPath()
ctx.arc(circle_x, circle_y, flag_w/6, 0, math.pi*2, true)
ctx.closePath()
# 填色設為紅色
ctx.fillStyle = 'rgb(255, 0, 0)'
ctx.fill()
</script>
</body>
</html>
'''
return outstring
| 2014c2g12/c2g12 | exts/wsgi/programs/c2g12/__init__.py | Python | gpl-2.0 | 11,211 |
import json
from django.http import HttpResponse
from django.shortcuts import render
from recognize_albums.models import Album, Image
def all_albums(request):
albums = []
for album_entry in Album.objects.all():
images = []
for image_entry in Image.objects.filter(album__title=album_entry.title):
images.append(image_entry.image.url)
albums.append({
"title": album_entry.title,
"images": images
})
return HttpResponse(json.dumps({"albums": albums}, indent=4), content_type="application/json")
| NCSUWebClass/fall14-recognize4 | app/recognize_albums/views.py | Python | mit | 579 |
try:
from boxe_clock.apps.android_app import BoxingApp
except ImportError:
from boxe_clock.apps.generic_app import BoxingApp
def main():
BoxingApp().run()
if __name__ == "__main__":
main()
| zedr/boxing-clock | src/main.py | Python | bsd-3-clause | 209 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: functions are currently experimental and subject to change!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the
default graph and adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("function %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError(
"function with argument defaults or keywords arguments are not"
" supported. {} has defaults {} and keywords {}.".format(
func, argspec.defaults, argspec.keywords))
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
whitelisted_stateful_ops=None,
capture_resource_var_by_value=True,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
whitelisted_stateful_ops: A set of ops that if stateful we ignore and
copy into the function body, when `capture_by_value` is True.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._whitelisted_stateful_ops = whitelisted_stateful_ops
if self._whitelisted_stateful_ops is None:
self._whitelisted_stateful_ops = set()
self._capture_resource_var_by_value = capture_resource_var_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._sub_functions = dict() # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possbile.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Its gradient function's name."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
temp_graph = func_graph_from_py_func(
self._func,
self._arg_names,
self._arg_types,
self._func_name,
self._capture_by_value,
self._caller_device,
whitelisted_stateful_ops=self._whitelisted_stateful_ops,
capture_resource_var_by_value=self._capture_resource_var_by_value)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
[], # control_outputs
[], # control_output_names
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op.op_def.is_stateful]
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, name, capture_by_value, whitelisted_stateful_ops,
capture_resource_var_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._whitelisted_stateful_ops = whitelisted_stateful_ops
self._capture_resource_var_by_value = capture_resource_var_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
# The name of the function.
self.name = name
# Placeholder tensors representing the inputs to this function. The tensors
# are in this _FuncGraph.
self.inputs = []
# Tensors that will be returned this function. The tensors are in this
# _FuncGraph.
self.outputs = []
# Maps external tensor -> internal tensor (e.g. input placeholder).
self._captured = {}
# The external tensors that have been captured as inputs and must be passed
# to this function (empty if capturing by value, otherwise these are the
# keys of _captured).
self.extra_inputs = []
# Input placeholders that been added for captured values (empty if capturing
# by value).
self.extra_args = []
# Captured variables.
# TODO(skyewm): is this needed?
self.extra_vars = []
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
# pylint: disable=protected-access
with ops.init_scope():
original_init_container = ops.get_default_graph()._container
try:
self._container = container_name
with ops.init_scope():
ops.get_default_graph()._container = container_name
yield self._container
finally:
self._container = original_container
with ops.init_scope():
ops.get_default_graph()._container = original_init_container
# pylint: enable=protected-access
# pylint: enable=g-doc-return-or-yield
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if (isinstance(var, resource_variable_ops.ResourceVariable) and
self._capture_resource_var_by_value):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, data_types, **kwargs):
for i, x in enumerate(inputs):
if isinstance(x, ops.EagerTensor) or x.graph is not self:
inputs[i] = self.capture(x)
return super(_FuncGraph, self).create_op(op_type, inputs, data_types,
**kwargs)
def capture(self, tensor, name=None):
"""Adds the given tensor to this graph and returns the captured tensor."""
if tensor in self._captured:
# Captured already.
return self._captured[tensor]
elif self._capture_by_value:
return self._add_tensor_and_parents(tensor)
else:
return self._capture_tensor_as_extra_input(tensor, name)
def _capture_tensor_as_extra_input(self, tensor, name=None):
# Substitute with a placeholder.
self.extra_inputs.append(tensor)
# Hoist the new input placeholder out of any control flow context
# we're currently in.
with ops.control_dependencies(None):
ph = array_ops.placeholder(
tensor.dtype, shape=tensor.get_shape(), name=name)
# pylint: disable=protected-access
if isinstance(tensor, ops.EagerTensor):
handle_data = tensor._handle_data
if handle_data:
handle_data = handle_data.SerializeToString()
else:
handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
tensor._as_tf_output())
if handle_data:
c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
compat.as_bytes(handle_data))
# pylint: enable=protected-access
self.inputs.append(ph)
self._captured[tensor] = ph
self.extra_args.append(ph)
if _is_guaranteed_const(tensor):
with ops.control_dependencies(None):
return array_ops.guarantee_const(ph)
else:
return ph
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
# pylint: enable=protected-access
if op_def.is_stateful and op not in self._whitelisted_stateful_ops:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self.create_op(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t] = captured_t
return captured_op
def func_graph_from_py_func(func,
arg_names,
arg_types,
name=None,
capture_by_value=False,
device=None,
colocation_stack=None,
container=None,
collections_ref=None,
arg_shapes=None,
whitelisted_stateful_ops=None,
capture_resource_var_by_value=True):
"""Returns a _FuncGraph generated from `func`.
Args:
func: A Python callable which constructs a TF function body. The arguments
must correspond to `arg_types`. Returns a value or list/tuple of values.
No returned value can be None.
arg_names: A sequence of strings for the function argument names.
arg_types: A sequence of the function's argument types.
name: The function name. If None, the name is derived from `func`.
capture_by_value: boolean. If True, captured values will be copied into the
function body.
device: device name or function.
colocation_stack: A colocation stack (list) the _FuncGraph should use.
container: A container name the _FuncGraph should start with.
collections_ref: A reference to a collections dict the _FuncGraph should
use internally.
arg_shapes: A sequence of the function's argument shapes.
whitelisted_stateful_ops: A set of ops that if stateful we ignore and
re-create.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
Returns:
A _FuncGraph.
Raises:
ValueError: if func returns None.
"""
if not name:
name = function_utils.get_func_name(func)
func_graph = _FuncGraph(name, capture_by_value, whitelisted_stateful_ops,
capture_resource_var_by_value)
with func_graph.as_default(), ops.device(device):
# pylint: disable=protected-access
if collections_ref is not None:
func_graph._collections = collections_ref
if container is not None:
func_graph._container = container
if colocation_stack is not None:
func_graph._colocation_stack = colocation_stack
# pylint: enable=protected-access
if arg_shapes is None:
arg_shapes = [None] * len(arg_types)
# Create placeholders for the function arguments.
for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
func_graph.inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=func_graph.getvar):
outputs = func(*func_graph.inputs)
# There is no way of distinguishing between a function not returning
# anything and a function returning None in Python.
# We need to allow the former and ideally want to forbid the latter as
# it is most likely user error.
# TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
# allow users to explicitly mark the function as not returning anything.
# For now, we allow a single None return and interpret it as a function
# with no output.
if outputs is None:
outputs = []
else:
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any(_ is None for _ in outputs):
raise ValueError("Function %s can not return None." % name)
# Ensures each output is a Tensor in the function graph.
outputs = [ops.convert_to_tensor(t) for t in outputs]
outputs = [func_graph.capture(t) if t.graph is not func_graph else t
for t in outputs]
func_graph.outputs = outputs
return func_graph
def _is_guaranteed_const(tensor):
"""Determines whether `tensor` is guaranteed to be a constant.
A tensor is guaranteed to be a constant if either it was produced by
a `GuaranteeConst` op or if all of its children are guaranteed to be
constants.
Args:
tensor: The tensor for which to determine const-ness.
Returns:
True if `tensor` is guaranteed to be a constant, False otherwise.
"""
if isinstance(tensor, ops.EagerTensor):
return False
class Work(object):
def __init__(self, op, leaving):
self.op = op
self.leaving = leaving
is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
constants = set([])
def all_inputs_const(op):
# If all inputs of an op are guaranteed constants, then we can infer that
# the op produces a constant as well.
return op.inputs and all(inp.op in constants for inp in op.inputs)
visited = set([])
stack = [Work(tensor.op, leaving=False)]
while stack:
work = stack.pop()
if work.leaving:
if all_inputs_const(work.op):
constants.add(work.op)
continue
visited.add(work.op)
if is_guaranteed_const(work.op):
constants.add(work.op)
continue
# This op will be revisited after all its inputs are checked for const-ness.
stack.append(Work(work.op, leaving=True))
for inp in work.op.inputs:
if inp.op not in visited:
stack.append(Work(inp.op, leaving=False))
return tensor.op in constants
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" % (len(
sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
if name is None:
name = func_name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
op = g.create_op(
func_name,
list(inputs),
output_types,
name=name,
attrs=attrs,
op_def=sig,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# TODO(iga): This method does major surgery on _DefinedFunction.
# Make it a named constructor using @classmethod of _DefinedFunction.
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
serialized = fdef.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
result._c_func = c_api_util.ScopedTFFunction(c_func)
result._extra_inputs = []
result._op_def = fdef.signature
# pylint: enable=protected-access
return result
def from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError(
"FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _get_experimental_kwarg_as_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, bool):
return attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
return attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
return attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
kwargs_keys = list(kwargs.keys())
for key in kwargs_keys:
if key.startswith("experimental_"):
attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
del kwargs[key]
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any(_ not in _DTYPE_TO_STR for _ in types):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.uint32: "u32",
dtypes.uint64: "u64",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def function_def_from_tf_function(c_func):
"""Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(c_func, buf)
data = c_api.TF_GetBuffer(buf)
fdef = function_pb2.FunctionDef()
fdef.ParseFromString(compat.as_bytes(data))
return fdef
| ageron/tensorflow | tensorflow/python/framework/function.py | Python | apache-2.0 | 45,054 |
import logging
from typing import Any, Dict
from freqtrade import constants
from freqtrade.configuration import setup_utils_configuration
from freqtrade.exceptions import DependencyException, OperationalException
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def setup_optimize_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
"""
Prepare the configuration for the Hyperopt module
:param args: Cli args from Arguments()
:return: Configuration
"""
config = setup_utils_configuration(args, method)
no_unlimited_runmodes = {
RunMode.BACKTEST: 'backtesting',
RunMode.HYPEROPT: 'hyperoptimization',
}
if (method in no_unlimited_runmodes.keys() and
config['stake_amount'] == constants.UNLIMITED_STAKE_AMOUNT):
raise DependencyException(
f'The value of `stake_amount` cannot be set as "{constants.UNLIMITED_STAKE_AMOUNT}" '
f'for {no_unlimited_runmodes[method]}')
return config
def start_backtesting(args: Dict[str, Any]) -> None:
"""
Start Backtesting script
:param args: Cli args from Arguments()
:return: None
"""
# Import here to avoid loading backtesting module when it's not used
from freqtrade.optimize.backtesting import Backtesting
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in Backtesting mode')
# Initialize backtesting object
backtesting = Backtesting(config)
backtesting.start()
def start_hyperopt(args: Dict[str, Any]) -> None:
"""
Start hyperopt script
:param args: Cli args from Arguments()
:return: None
"""
# Import here to avoid loading hyperopt module when it's not used
try:
from filelock import FileLock, Timeout
from freqtrade.optimize.hyperopt import Hyperopt
except ImportError as e:
raise OperationalException(
f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.HYPEROPT)
logger.info('Starting freqtrade in Hyperopt mode')
lock = FileLock(Hyperopt.get_lock_filename(config))
try:
with lock.acquire(timeout=1):
# Remove noisy log messages
logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)
logging.getLogger('filelock').setLevel(logging.WARNING)
# Initialize backtesting object
hyperopt = Hyperopt(config)
hyperopt.start()
except Timeout:
logger.info("Another running instance of freqtrade Hyperopt detected.")
logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines.")
logger.info("Quitting now.")
# TODO: return False here in order to help freqtrade to exit
# with non-zero exit code...
# Same in Edge and Backtesting start() functions.
def start_edge(args: Dict[str, Any]) -> None:
"""
Start Edge script
:param args: Cli args from Arguments()
:return: None
"""
from freqtrade.optimize.edge_cli import EdgeCli
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.EDGE)
logger.info('Starting freqtrade in Edge mode')
# Initialize Edge object
edge_cli = EdgeCli(config)
edge_cli.start()
| gcarq/freqtrade | freqtrade/commands/optimize_commands.py | Python | gpl-3.0 | 3,589 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import py
import _pytest._code
import pytest
from _pytest.nodes import FSCollector
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.resultlog import generic_path
from _pytest.resultlog import pytest_configure
from _pytest.resultlog import pytest_unconfigure
from _pytest.resultlog import ResultLog
pytestmark = pytest.mark.filterwarnings("ignore:--result-log is deprecated")
def test_generic_path(testdir):
from _pytest.main import Session
config = testdir.parseconfig()
session = Session(config)
p1 = Node("a", config=config, session=session, nodeid="a")
# assert p1.fspath is None
p2 = Node("B", parent=p1)
p3 = Node("()", parent=p2)
item = Item("c", parent=p3)
res = generic_path(item)
assert res == "a.B().c"
p0 = FSCollector("proj/test", config=config, session=session)
p1 = FSCollector("proj/test/a", parent=p0)
p2 = Node("B", parent=p1)
p3 = Node("()", parent=p2)
p4 = Node("c", parent=p3)
item = Item("[1]", parent=p4)
res = generic_path(item)
assert res == "test/a:B().c[1]"
def test_write_log_entry():
reslog = ResultLog(None, None)
reslog.logfile = py.io.TextIO()
reslog.write_log_entry("name", ".", "")
entry = reslog.logfile.getvalue()
assert entry[-1] == "\n"
entry_lines = entry.splitlines()
assert len(entry_lines) == 1
assert entry_lines[0] == ". name"
reslog.logfile = py.io.TextIO()
reslog.write_log_entry("name", "s", "Skipped")
entry = reslog.logfile.getvalue()
assert entry[-1] == "\n"
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == "s name"
assert entry_lines[1] == " Skipped"
reslog.logfile = py.io.TextIO()
reslog.write_log_entry("name", "s", "Skipped\n")
entry = reslog.logfile.getvalue()
assert entry[-1] == "\n"
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == "s name"
assert entry_lines[1] == " Skipped"
reslog.logfile = py.io.TextIO()
longrepr = " tb1\n tb 2\nE tb3\nSome Error"
reslog.write_log_entry("name", "F", longrepr)
entry = reslog.logfile.getvalue()
assert entry[-1] == "\n"
entry_lines = entry.splitlines()
assert len(entry_lines) == 5
assert entry_lines[0] == "F name"
assert entry_lines[1:] == [" " + line for line in longrepr.splitlines()]
class TestWithFunctionIntegration(object):
# XXX (hpk) i think that the resultlog plugin should
# provide a Parser object so that one can remain
# ignorant regarding formatting details.
def getresultlog(self, testdir, arg):
resultlog = testdir.tmpdir.join("resultlog")
testdir.plugins.append("resultlog")
args = ["--resultlog=%s" % resultlog] + [arg]
testdir.runpytest(*args)
return [x for x in resultlog.readlines(cr=0) if x]
def test_collection_report(self, testdir):
ok = testdir.makepyfile(test_collection_ok="")
fail = testdir.makepyfile(test_collection_fail="XXX")
lines = self.getresultlog(testdir, ok)
assert not lines
lines = self.getresultlog(testdir, fail)
assert lines
assert lines[0].startswith("F ")
assert lines[0].endswith("test_collection_fail.py"), lines[0]
for x in lines[1:]:
assert x.startswith(" ")
assert "XXX" in "".join(lines[1:])
def test_log_test_outcomes(self, testdir):
mod = testdir.makepyfile(
test_mod="""
import pytest
def test_pass(): pass
def test_skip(): pytest.skip("hello")
def test_fail(): raise ValueError("FAIL")
@pytest.mark.xfail
def test_xfail(): raise ValueError("XFAIL")
@pytest.mark.xfail
def test_xpass(): pass
"""
)
lines = self.getresultlog(testdir, mod)
assert len(lines) >= 3
assert lines[0].startswith(". ")
assert lines[0].endswith("test_pass")
assert lines[1].startswith("s "), lines[1]
assert lines[1].endswith("test_skip")
assert lines[2].find("hello") != -1
assert lines[3].startswith("F ")
assert lines[3].endswith("test_fail")
tb = "".join(lines[4:8])
assert tb.find('raise ValueError("FAIL")') != -1
assert lines[8].startswith("x ")
tb = "".join(lines[8:14])
assert tb.find('raise ValueError("XFAIL")') != -1
assert lines[14].startswith("X ")
assert len(lines) == 15
@pytest.mark.parametrize("style", ("native", "long", "short"))
def test_internal_exception(self, style):
# they are produced for example by a teardown failing
# at the end of the run or a failing hook invocation
try:
raise ValueError
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
reslog = ResultLog(None, py.io.TextIO())
reslog.pytest_internalerror(excinfo.getrepr(style=style))
entry = reslog.logfile.getvalue()
entry_lines = entry.splitlines()
assert entry_lines[0].startswith("! ")
if style != "native":
assert os.path.basename(__file__)[:-9] in entry_lines[0] # .pyc/class
assert entry_lines[-1][0] == " "
assert "ValueError" in entry
def test_generic(testdir, LineMatcher):
testdir.plugins.append("resultlog")
testdir.makepyfile(
"""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
@pytest.mark.xfail
def test_xfail():
assert 0
@pytest.mark.xfail(run=False)
def test_xfail_norun():
assert 0
"""
)
testdir.runpytest("--resultlog=result.log")
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
LineMatcher(lines).fnmatch_lines(
[
". *:test_pass",
"F *:test_fail",
"s *:test_skip",
"x *:test_xfail",
"x *:test_xfail_norun",
]
)
def test_makedir_for_resultlog(testdir, LineMatcher):
"""--resultlog should automatically create directories for the log file"""
testdir.plugins.append("resultlog")
testdir.makepyfile(
"""
import pytest
def test_pass():
pass
"""
)
testdir.runpytest("--resultlog=path/to/result.log")
lines = testdir.tmpdir.join("path/to/result.log").readlines(cr=0)
LineMatcher(lines).fnmatch_lines([". *:test_pass"])
def test_no_resultlog_on_slaves(testdir):
config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog")
assert not hasattr(config, "_resultlog")
pytest_configure(config)
assert hasattr(config, "_resultlog")
pytest_unconfigure(config)
assert not hasattr(config, "_resultlog")
config.slaveinput = {}
pytest_configure(config)
assert not hasattr(config, "_resultlog")
pytest_unconfigure(config)
assert not hasattr(config, "_resultlog")
def test_failure_issue380(testdir):
testdir.makeconftest(
"""
import pytest
class MyCollector(pytest.File):
def collect(self):
raise ValueError()
def repr_failure(self, excinfo):
return "somestring"
def pytest_collect_file(path, parent):
return MyCollector(parent=parent, fspath=path)
"""
)
testdir.makepyfile(
"""
def test_func():
pass
"""
)
result = testdir.runpytest("--resultlog=log")
assert result.ret == 2
| txomon/pytest | testing/test_resultlog.py | Python | mit | 7,799 |
print ("hello_world")
| Pearn/stroke_diagnostic_test | hello_world.py | Python | apache-2.0 | 22 |
from microkorg_abstract import MicroKorgAbstractParameter
from timbre.pitch import Tune as TimbreTune
from timbre.pitch import BendRange as TimbreBendRange
from timbre.pitch import Transpose as TimbreTranspose
from timbre.pitch import VibratoInt as TimbreVibratoInt
from timbre.pitch import PortamentoTime as TimbrePortamentoTime
class Tune(TimbreTune):
pass
class BendRange(TimbreBendRange):
pass
class Transpose(TimbreTranspose):
pass
class VibratoInt(TimbreVibratoInt):
pass
class PortamentoTime(TimbrePortamentoTime):
def _get_offset(self):
self.offset = 14
self.bits = range(7) | oscillot/mk-maxxer | vocoder/pitch.py | Python | gpl-2.0 | 628 |
import RPi.GPIO as GPIO
from sys_config import sys_config
import time
# Set our GPIO numbering to BCM
GPIO.setmode(GPIO.BCM)
class Sensor:
def __init__(self, in_GPIO_pin, in_type, in_callback_function = None):
self.v_GPIO_pin = in_GPIO_pin
self.v_callback_function = in_callback_function
self.v_type = in_type
# Set the GPIO pin to an input
GPIO.setup(self.v_GPIO_pin, GPIO.IN)
if callable(self.v_callback_function):
self.watch()
else:
# GPIO.setup(self.v_GPIO_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
self.v_cur_status = getStatus()
def watch(self):
# This line tells our script to keep an eye on our gpio pin and let us know when the pin goes HIGH or LOW
GPIO.add_event_detect(self.v_GPIO_pin, GPIO.BOTH, bouncetime=300)
# This line asigns a function to the GPIO pin so that when the above line tells us there is a change on the pin, run this function
GPIO.add_event_callback(self.v_GPIO_pin, self.v_callback_function)
time.sleep(sys_config['min_sensor_interval'])
print "watching..."
return
def getStatus(self):
self.v_cur_status = GPIO.input(self.v_GPIO_pin)
print "getStatus = ", self.v_cur_status
return self.v_cur_status
| pmandr/plant_carer | legacy/Sensor.py | Python | gpl-3.0 | 1,195 |
# -*- coding: utf-8 -*-
"""DDM diff function tools
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import time
from ddm.core._numba import numba, USE_NUMBA
from ddm.core.util import _prepare_complex_array
from ddm.core.norm import dnorm2
from ddm.conf import DDMConfig
from ddm.utils.print_tools import print_frame_rate, print_progress, print_message
import threading
lock = threading.Lock()
DTUINT= np.dtype("uint")
@numba.jit(['uint[:](complex64[:],boolean[:],float32[:])','uint[:](complex128[:],boolean[:],float64[:])'],nopython = True, cache = True,nogil=True)
def _diff(data, select, out):
"""diff calculation implementation"""
assert len(select) == len(out)
r = np.zeros(out.shape,DTUINT)
maxr = len(r) -1
N = data.shape[0]
for i in range(N):
for j in range(N-i):
if j > maxr:
break
if select[j] != 0:
tmp = data[i] - data[j+i]
out[j] = out[j] + (tmp.real**2 + tmp.imag**2)
#out[j] = out[j] + (tmp.real**2 + tmp.imag**2)/(N-j)
r[j] += 1
#normalize
for i in range(len(out)):
if r[i] != 0:
out[i] = out[i]/r[i]
else:
out[i]=0
return r
def diff(data, select = None, norm = False):
"""Calculates DDM Difference function.
Calculation is performed over the last axis (-1) of the input array.
Output of this function is equivalent to :func:`correlate`
with as_ddm option turned on. For large arrays use the :func:`ddm` function
Parameters
----------
data : array
A numpy array of any shape and complex dtype.
select : array or tuple, optional
A selection criteria. Tt can be a tuple of lower and upper bound, or an
array of bools.
norm : bool, optional
Specifies whether to normalize data or not (default).
Returns
-------
out : array
Calculated DDM function D
Examples
--------
>>> a = np.array([1.,2.,4.,8.])
>>> diff(a)
array([ 0. , 7. , 22.5, 49. ])
>>> diff(a, select = (1,2))
array([ 0. , 7. , 22.5, 49. ])
"""
if USE_NUMBA == False:
#oo this will be slow...
import warnings
warnings.warn("Caculating D function for large arrays without numba is a suicide! Please install numba!")
data, shape_orig = _prepare_complex_array(data)
if select is None:
select = np.ones(shape = (data.shape[-1],), dtype = bool)
elif isinstance(select, tuple):
low, high = select
select = np.ones(shape = (high,), dtype = bool)
select[0:low] = False
else:
select = select.astype(bool, copy= False).ravel()
out = np.empty((len(data),len(select)), data.real.dtype)
out[...]= np.nan
iout = np.empty(out.shape, "uint")
def run(data,out,iout, level):
N = data.shape[0]
for i in range(N):
tmp = np.zeros_like(out[0])
a = data[i,:]
r = _diff(a,select,tmp)
with lock:
out[i,:] = tmp
iout[i,:] = r
print_progress(i,N, level = level)
t0 = time.time()
level = DDMConfig.verbose
num_threads = max(1,DDMConfig.nthreads)
print_message("Performing DDM idiff calculation on {} threads...".format(num_threads), level = level)
threads = []
if num_threads > 1:
for i in range(1,num_threads):
t = threading.Thread(target = run, args = (data[i::num_threads,:],out[i::num_threads,:],iout[i::num_threads,:],0))
threads.append(t)
for t in threads:
t.start()
run(data[0::num_threads,:],out[0::num_threads,:],iout[0::num_threads,:],DDMConfig.verbose)
for t in threads:
t.join()
N = data.shape[0]
print_frame_rate(N,t0, level = level)
if norm:
norm = dnorm2(data)[:,np.newaxis]
np.divide(out,norm,out)
out = out.reshape(shape_orig[0:-1]+(len(select),))
return np.ma.masked_invalid(out)
@numba.jit(['uint[:](complex64[:],float64[:],boolean[:],float32[:])','uint[:](complex128[:],float64[:],boolean[:],float64[:])'],nopython = True, cache = True,nogil=True)
def _idiff(data, t, select, out):
"""ddm calculation implementation"""
assert len(data) == len(t)
assert len(select) == len(out)
r = np.zeros(out.shape,DTUINT)
maxr = len(r) -1
N = data.shape[0]
for i in range(N):
for j in range(N-i):
tmp = data[i] -data[j+i]
k = int(round((t[i+j] - t[i])))
if k > maxr:
break
elif select[k] != 0:
out[k] = out[k] + (tmp.real**2 + tmp.imag**2)
r[k] = r[k]+1
#normalize
for i in range(len(out)):
if r[i] != 0:
out[i] = out[i]/r[i]
else:
out[i]=np.nan
return r
def idiff(data, x = None, select = None, weights = None, norm = False):
"""Calculates DDM Difference function for irregular-spaced data.
Calculation is performed over the last axis (-1) of the input array.
Parameters
----------
data : array
A numpy array of any shape and complex dtype.
x : array or None
An index array of type float. Each element in x represents a time...
select : array or tuple, optional
A selection criteria. Tt can be a tuple of lower and upper bound, or an
array of bools.
norm : bool, optional
Specifies whether to normalize data or not (default).
Returns
-------
out : array
Calculated DDM function D
Examples
--------
>>> a = np.array([1.,2.,4.,8.])
>>> idiff(a)
array([ 0. , 7. , 22.5, 49. ])
"""
if USE_NUMBA == False:
#oo this will be slow...
import warnings
warnings.warn("Caculating D function for large arrays without numba is a suicide! Please install numba!")
data, shape_orig = _prepare_complex_array(data)
if x is None:
x = np.arange(shape_orig[-1], dtype = "float64")
else:
x = x.astype("float64", copy = False)#convert to float64
if select is None:
size = int(round(x.max())) +1
select = np.ones(shape = (size,), dtype = "bool")
elif isinstance(select, tuple):
low, high = select
select = np.ones(shape = (high,), dtype = bool)
select[0:low] = False
else:
select = select.astype(bool, copy= False).ravel()
out = np.empty((len(data),len(select)), data.real.dtype)
out[...]= np.nan
iout = np.empty(out.shape, "uint")
def run(data,out,iout, level):
N = data.shape[0]
for i in range(N):
tmp = np.zeros_like(out[0])
a = data[i,:]
r = _idiff(a,x,select,tmp)
with lock:
out[i,:] = tmp
iout[i,:] = r
print_progress(i,N, level = level)
t0 = time.time()
level = DDMConfig.verbose
num_threads = max(1,DDMConfig.nthreads)
print_message("Performing DDM idiff calculation on {} threads...".format(num_threads), level = level)
threads = []
if num_threads > 1:
for i in range(1,num_threads):
t = threading.Thread(target = run, args = (data[i::num_threads,:],out[i::num_threads,:],iout[i::num_threads,:],0))
threads.append(t)
for t in threads:
t.start()
run(data[0::num_threads,:],out[0::num_threads,:],iout[0::num_threads,:],DDMConfig.verbose)
for t in threads:
t.join()
N = data.shape[0]
print_frame_rate(N,t0, level = level)
if norm:
norm = dnorm2(data)[:,np.newaxis]
np.divide(out,norm,out)
out = out.reshape(shape_orig[0:-1]+(len(select),))
return np.ma.masked_invalid(out)
if __name__ == "__main__":
import doctest
doctest.testmod()
| andrej5elin/ddm | ddm/core/diff.py | Python | gpl-3.0 | 8,173 |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-instance-attributes
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division)
from copy import deepcopy
import numpy as np
from numpy import dot, zeros, eye
from scipy.linalg import cholesky, qr, pinv
from filterpy.common import pretty_str
class SquareRootKalmanFilter(object):
"""
Create a Kalman filter which uses a square root implementation.
This uses the square root of the state covariance matrix, which doubles
the numerical precision of the filter, Therebuy reducing the effect
of round off errors.
It is likely that you do not need to use this algorithm; we understand
divergence issues very well now. However, if you expect the covariance
matrix P to vary by 20 or more orders of magnitude then perhaps this
will be useful to you, as the square root will vary by 10 orders
of magnitude. From my point of view this is merely a 'reference'
algorithm; I have not used this code in real world software. Brown[1]
has a useful discussion of when you might need to use the square
root form of this algorithm.
You are responsible for setting the various state variables to
reasonable values; the defaults below will not give you a functional
filter.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate
P : numpy.array(dim_x, dim_x)
State covariance matrix
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : numpy.array
Last measurement used in update(). Read only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
[1] Robert Grover Brown. Introduction to Random Signals and Applied
Kalman Filtering. Wiley and sons, 2012.
"""
def __init__(self, dim_x, dim_z, dim_u=0):
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_x must be 0 or greater')
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
self._P = eye(dim_x) # uncertainty covariance
self._P1_2 = eye(dim_x) # sqrt uncertainty covariance
self._Q = eye(dim_x) # sqrt process uncertainty
self._Q1_2 = eye(dim_x) # sqrt process uncertainty
self.B = 0. # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
self.H = np.zeros((dim_z, dim_x)) # Measurement function
self._R1_2 = eye(dim_z) # sqrt state uncertainty
self._R = eye(dim_z) # state uncertainty
self.z = np.array([[None]*self.dim_z]).T
self.K = 0.
self.S = 0.
# Residual is computed during the innovation (update) step. We
# save it so that in case you want to inspect it for various
# purposes
self.y = zeros((dim_z, 1))
# identity matrix.
self._I = np.eye(dim_x)
self.M = np.zeros((dim_z + dim_x, dim_z + dim_x))
# copy prior and posterior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2)
self.x_post = np.copy(self.x)
self._P1_2_post = np.copy(self._P1_2)
def update(self, z, R2=None):
"""
Add a new measurement (z) to the kalman filter. If z is None, nothing
is changed.
Parameters
----------
z : np.array
measurement for this update.
R2 : np.array, scalar, or None
Sqrt of meaaurement noize. Optionally provide to override the
measurement noise for this one call, otherwise self.R2 will
be used.
"""
if z is None:
self.z = np.array([[None]*self.dim_z]).T
self.x_post = self.x.copy()
self._P1_2_post = np.copy(self._P1_2)
return
if R2 is None:
R2 = self._R1_2
elif np.isscalar(R2):
R2 = eye(self.dim_z) * R2
# rename for convienance
dim_z = self.dim_z
M = self.M
M[0:dim_z, 0:dim_z] = R2.T
M[dim_z:, 0:dim_z] = dot(self.H, self._P1_2).T
M[dim_z:, dim_z:] = self._P1_2.T
_, self.S = qr(M)
self.K = self.S[0:dim_z, dim_z:].T
N = self.S[0:dim_z, 0:dim_z].T
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(self.H, self.x)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x += dot(self.K, pinv(N)).dot(self.y)
self._P1_2 = self.S[dim_z:, dim_z:].T
self.z = deepcopy(z)
self.x_post = self.x.copy()
self._P1_2_post = np.copy(self._P1_2)
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u)
# P = FPF' + Q
_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
self._P1_2 = P2[:self.dim_x, :self.dim_x].T
# copy prior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2)
def residual_of(self, z):
""" returns the residual for the given measurement (z). Does not alter
the state of the filter.
"""
return z - dot(self.H, self.x)
def measurement_of_state(self, x):
""" Helper function that converts a state into a measurement.
Parameters
----------
x : np.array
kalman state vector
Returns
-------
z : np.array
measurement corresponding to the given state
"""
return dot(self.H, x)
@property
def Q(self):
""" Process uncertainty"""
return dot(self._Q1_2.T, self._Q1_2)
@property
def Q1_2(self):
""" Sqrt Process uncertainty"""
return self._Q1_2
@Q.setter
def Q(self, value):
""" Process uncertainty"""
self._Q = value
self._Q1_2 = cholesky(self._Q, lower=True)
@property
def P(self):
""" covariance matrix"""
return dot(self._P1_2.T, self._P1_2)
@property
def P_prior(self):
""" covariance matrix of the prior"""
return dot(self._P1_2_prior.T, self._P1_2_prior)
@property
def P_post(self):
""" covariance matrix of the posterior"""
return dot(self._P1_2_prior.T, self._P1_2_prior)
@property
def P1_2(self):
""" sqrt of covariance matrix"""
return self._P1_2
@P.setter
def P(self, value):
""" covariance matrix"""
self._P = value
self._P1_2 = cholesky(self._P, lower=True)
@property
def R(self):
""" measurement uncertainty"""
return dot(self._R1_2.T, self._R1_2)
@property
def R1_2(self):
""" sqrt of measurement uncertainty"""
return self._R1_2
@R.setter
def R(self, value):
""" measurement uncertainty"""
self._R = value
self._R1_2 = cholesky(self._R, lower=True)
def __repr__(self):
return '\n'.join([
'SquareRootKalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('M', self.M),
pretty_str('B', self.B),
])
| zaqwes8811/micro-apps | self_driving/deps/filterpy/filterpy/kalman/square_root.py | Python | mit | 10,085 |
import paramiko
import time
import redis
import ujson
import os
import subprocess
import django_rq
from django_rq import job
from django.conf import settings
from adjacent import Client
from datetime import timedelta
channel_to_job_dict = {}
"""
Send json data to client/browser using centrifuge/adacent
"""
def send_data_to_client(channel, message, message_key=None):
client = Client()
if message_key is None:
client.publish(channel, message)
else:
data = {}
data[message_key] = message
client.publish(channel, ujson.dumps(data))
return client.send()
"""
1. We need redis server to be running.
2. We need rqworker to be running in the background.
3. We need centrifuge to be running in the background.
"""
"""
This is called by run-worker.py when spawn_antik times out.
We need to let the user know that we failed to create antik.
"""
def handle_failed_antik(job, exc_type, exc_value, traceback):
args = job.args
# Before deleting, publish that it failed to the client
# publish using centrifuge.
send_data_to_client(args[0], exc_value, 'error')
# Clear the failed queue
"""
q = django_rq.get_failed_queue()
while True:
job = q.dequeue()
if not job:
break
job.delete() # Will delete key from Redis
"""
"""
Invoked by the scheduler. This is an idication that
backend antik has crashed/unresponsive.
Inform to the client
"""
def handle_unresponsive_antik(data_channel):
del channel_to_job_dict[data_channel]
send_data_to_client(data_channel, {'error' : 'antik is not responding'},
'error')
@job
def spawn_antik(data_channel, final_config,antik_cred):
"""
input should be validated form.
subscribe to the channel which antik should connect to
Convert the form to xml.
1. If antik should be done locally,
Write the xml to /tmp directory.
Fork antik
2. If antik is done remotely,
ssh into the machine and trigger antik
"""
local_file = "/tmp/configlocal_%s" % data_channel
remote_file = "/tmp/configremote_%s" % data_channel
with open(local_file, "w") as outfile:
ujson.dump(final_config, outfile)
if antik_cred['location'] == 'local':
dev_null = open(os.devnull, 'w')
subprocess.Popen(["antik", "-g", "-f", local_file], stdout=dev_null,
stderr=subprocess.STDOUT)
else:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(antik_cred['ip'], username=antik_cred['username'], password=antik_cred['password'])
sftp = ssh.open_sftp()
sftp.put(local_file, remote_file)
sftp.close()
stdin, stdout, stderr = ssh.exec_command('nohup antik -g -f '
'%s > /dev/null 2>&1 &' % remote_file)
pubsub = redis.Redis(settings.CONFIGURED_IP).pubsub()
pubsub.subscribe([data_channel])
for item in pubsub.listen():
if item['data'] == "KEEPALIVE":
break
#Task started just once.
@job
def subscriber():
r = redis.Redis(settings.CONFIGURED_IP)
pubsub = r.pubsub()
pubsub.subscribe(['default'])
for item in pubsub.listen():
if item['channel'] == "default":
# We have got a subscriber
pubsub.subscribe(item['data'])
elif item['data'] == "KEEPALIVE":
# Keepalive coming from antik backend.
scheduler = django_rq.get_scheduler('default')
job_second_list = channel_to_job_dict.get(item['channel'])
if job_second_list is not None:
job = job_second_list[0]
second = job_second_list[1]
else:
job = None
second = None
if job is None:
print "scehduled job. "
job = scheduler.enqueue_in(timedelta(seconds = 5),
handle_unresponsive_antik, item['channel'])
# Enqueue the job in a dict
channel_to_job_dict[item['channel']] = [job, 1]
elif job in scheduler:
# Re-schedule
if second + 1 == 5:
scheduler.cancel(job);
job = scheduler.enqueue_in(timedelta(seconds = 5),
handle_unresponsive_antik, item['channel'])
# Enqueue the job in a dict
channel_to_job_dict[item['channel']] = [job, 1]
else:
second = second + 1
channel_to_job_dict[item['channel']] = [job, second]
elif item['data'] == "DONE":
if job in scheduler:
scheduler.cancel(job);
del channel_to_job_dict[item['channel']]
send_data_to_client(item['channel'], {'notification' : 'test completed'},
'notification')
else:
# Received data. Send it Centrifuge
send_data_to_client(item['channel'], item['data'])
| niks3089/nixia-console | receiver/task.py | Python | gpl-3.0 | 5,045 |
# Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from unittest import mock
from oslo_utils import uuidutils
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.db.nfvo import nfvo_db
from tacker.db.vnfm import vnfm_db
from tacker.plugins.common import constants
from tacker.tests.unit.db import base as db_base
from tacker.tests.unit.db import utils
from tacker.vnfm import plugin
class FakeCVNFMonitor(mock.Mock):
pass
class FakePlugin(mock.Mock):
pass
class FakeK8SVimClient(mock.Mock):
pass
class TestCVNFMPlugin(db_base.SqlTestCase):
def setUp(self):
super(TestCVNFMPlugin, self).setUp()
self.addCleanup(mock.patch.stopall)
self.context = context.get_admin_context()
self._mock_vim_client()
self._stub_get_vim()
self._mock_vnf_monitor()
self._mock_vnf_maintenance_monitor()
self._mock_vnf_maintenance_plugin()
self._insert_dummy_vim()
self.vnfm_plugin = plugin.VNFMPlugin()
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
mock.patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._mgmt_driver_name',
return_value='noop').start()
self.create = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.create',
return_value=uuidutils.
generate_uuid()).start()
self.create_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.'
'create_wait').start()
self.update = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.update').start()
self.update_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.'
'update_wait').start()
self.delete = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.delete').start()
self.delete_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.'
'delete_wait').start()
self.scale = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.scale',
return_value=uuidutils.generate_uuid()).start()
self.scale_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
'kubernetes_driver.Kubernetes.scale_wait',
return_value=uuidutils.
generate_uuid()).start()
def _fake_spawn(func, *args, **kwargs):
func(*args, **kwargs)
mock.patch.object(self.vnfm_plugin, 'spawn_n',
_fake_spawn).start()
self._cos_db_plugin =\
common_services_db_plugin.CommonServicesPluginDb()
def _mock_vim_client(self):
self.vim_client = mock.Mock(wraps=FakeK8SVimClient())
fake_vim_client = mock.Mock()
fake_vim_client.return_value = self.vim_client
self._mock(
'tacker.vnfm.vim_client.VimClient', fake_vim_client)
def _stub_get_vim(self):
vim_obj = {'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
'vim_name': 'fake_vim',
'vim_auth': {'auth_url': 'http://localhost:6443',
'password': 'test_pw', 'username': 'test_user',
'project_name': 'test_project',
'ssl_ca_cert': None},
'vim_type': 'kubernetes'}
self.vim_client.get_vim.return_value = vim_obj
def _mock_vnf_monitor(self):
self._vnf_monitor = mock.Mock(wraps=FakeCVNFMonitor())
fake_vnf_monitor = mock.Mock()
fake_vnf_monitor.return_value = self._vnf_monitor
self._mock(
'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
def _mock_vnf_maintenance_monitor(self):
self._vnf_maintenance_mon = mock.Mock(wraps=FakeCVNFMonitor())
fake_vnf_maintenance_monitor = mock.Mock()
fake_vnf_maintenance_monitor.return_value = self._vnf_maintenance_mon
self._mock(
'tacker.vnfm.monitor.VNFMaintenanceAlarmMonitor',
fake_vnf_maintenance_monitor)
def _mock_vnf_maintenance_plugin(self):
self._vnf_maintenance_plugin = mock.Mock(wraps=FakePlugin())
fake_vnf_maintenance_plugin = mock.Mock()
fake_vnf_maintenance_plugin.return_value = self._vnf_maintenance_plugin
self._mock(
'tacker.plugins.fenix.FenixPlugin',
fake_vnf_maintenance_plugin)
def _insert_dummy_vnf_template(self):
session = self.context.session
vnf_template = vnfm_db.VNFD(
id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
name='fake_template',
description='fake_template_description',
template_source='onboarded',
deleted_at=datetime.min)
session.add(vnf_template)
session.flush()
return vnf_template
def _insert_dummy_vnf_template_inline(self):
session = self.context.session
vnf_template = vnfm_db.VNFD(
id='d58bcc4e-d0cf-11e6-bf26-cec0c932ce01',
tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
name='tmpl-koeak4tqgoqo8cr4-dummy_inline_vnf',
description='inline_fake_template_description',
deleted_at=datetime.min,
template_source='inline')
session.add(vnf_template)
session.flush()
return vnf_template
def _insert_dummy_vim(self):
pass
session = self.context.session
vim_db = nfvo_db.Vim(
id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
name='fake_vim',
description='fake_vim_description',
type='kubernetes',
status='Active',
deleted_at=datetime.min,
placement_attr={'regions': ['default', 'kube-public',
'kube-system']})
vim_auth_db = nfvo_db.VimAuth(
vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
password='encrypted_pw',
auth_url='http://localhost:6443',
vim_project={'name': 'test_project'},
auth_cred={'auth_url': 'https://localhost:6443',
'username': 'admin',
'bearer_token': None,
'ssl_ca_cert': 'test',
'project_name': 'default',
'type': 'kubernetes'})
session.add(vim_db)
session.add(vim_auth_db)
session.flush()
def test_create_cvnf_with_vnfd(self):
self._insert_dummy_vnf_template()
vnf_obj = utils.get_dummy_vnf_obj()
result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
self.assertIsNotNone(result)
self.assertIn('id', result)
self.assertIn('instance_id', result)
self.assertIn('status', result)
self.assertIn('attributes', result)
self.assertIn('mgmt_ip_address', result)
self.assertIn('created_at', result)
self.assertIn('updated_at', result)
self.assertEqual('ACTIVE', result['status'])
self._cos_db_plugin.create_event.assert_called_with(
self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
tstamp=mock.ANY, details=mock.ANY)
@mock.patch('tacker.vnfm.plugin.VNFMPlugin.create_vnfd')
def test_create_cvnf_from_template(self, mock_create_vnfd):
self._insert_dummy_vnf_template_inline()
mock_create_vnfd.return_value = {'id':
'd58bcc4e-d0cf-11e6-bf26'
'-cec0c932ce01'}
vnf_obj = utils.get_dummy_inline_cvnf_obj()
result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
self.assertIsNotNone(result)
self.assertIn('id', result)
self.assertIn('instance_id', result)
self.assertIn('status', result)
self.assertIn('attributes', result)
self.assertIn('mgmt_ip_address', result)
self.assertIn('created_at', result)
self.assertIn('updated_at', result)
self.assertEqual('ACTIVE', result['status'])
mock_create_vnfd.assert_called_once_with(mock.ANY, mock.ANY)
self._cos_db_plugin.create_event.assert_called_with(
self.context, evt_type=constants.RES_EVT_CREATE,
res_id=mock.ANY,
res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
tstamp=mock.ANY, details=mock.ANY)
def test_delete_vnf(self):
pass
def test_update_vnf(self):
pass
def _test_scale_vnf(self, type):
pass
def test_scale_vnf_out(self):
pass
def test_scale_vnf_in(self):
pass
| openstack/tacker | tacker/tests/unit/vnfm/test_k8s_plugin.py | Python | apache-2.0 | 10,109 |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from oslo_config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the ec2api python module is installed'),
cfg.StrOpt('bindir',
default=os.path.join(sys.prefix, 'local', 'bin'),
help='Directory where ec2api binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining ec2api's state"),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
| MayankGo/ec2-api | ec2api/paths.py | Python | apache-2.0 | 2,014 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Print text in all colors
from clint.textui import colored, puts, indent
from clint.arguments import Args
args = Args()
if __name__ == '__main__':
puts('Tests:')
with indent(4):
puts('%s All tests passed 1.' % colored.green('✔'))
puts('%s Failed? ' % colored.red('✖'))
puts('')
puts('Greet:')
with indent(4):
puts(colored.red('Здравствуйте'))
puts(colored.blue('नमस्ते'))
puts(colored.cyan('γειά σου'))
puts('')
puts('Arguments:')
with indent(4):
puts('%s' % colored.red(args[0]))
puts('')
| vicgc/recognise | src/app/console.py | Python | mit | 665 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## partes_de_fabricacion_rollos.py - Parte de producción para rollos.
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
##
###################################################################
## Changelog:
## 15 de noviembre de 2005 -> Inicio
## 16 de noviembre de 2005 -> 99% funcional
## 23 de enero de 2006 -> Portado a clase.
## 26 de enero de 2006 -> Funcional al 99% one more time.
## 9 de mayo de 2006 -> Control de permisos. Para copiar a otras
## ventanas, mirar: check_permisos(), rellenar_widgets,
## self.__lecturaescritura, self.__permisos, activar_widgets y
## la asignación de id en nuevo_.
## 10 de mayo de 2006 -> Cambiado comportamiento de set_articulo.
## 26 de julio de 2006 -> Añadidos empleados por defecto según
## calendario laboral.
## 8 de marzo de 2006 -> Añado rollos por defecto.
## 31 de julio de 2007 -> Nueva casilla "versión de la ficha de
## producción" usada (texto libre).
###################################################################
## DONE:
## + Comprobar que se marca bien el consumo estimado en relación
## con el de balas _en todos los partes_ de la misma partida.
## + No estaría de más un entry con el cálculo acumulado de consumo
## estimado (además facilitaría el consumo en relación con
## el consumo real de balas añadida. El cálculo de arriba,
## vamos.)
## + Falta cálculo de rendimiento:
## nºtrabajadores * horas turno / nº trabajadores * horas reales.
## + Comprobar que las horas del parte no pisan a otro parte de rollos.
## + Al eliminar todos los rollos de un parte, los consumos deberían
## quedar a 0. Sin embargo no es así. Why? (por poner el parte a
## None antes de descontar el consumo).
###################################################################
#import sys, os
#sys.stdout = open("salida_debug.txt", "a")
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time #, sqlobject ## pclases enmascara sqlobject. No
## hace falta importarlo directamente.
try:
import pclases
except ImportError:
import sys
from os.path import join as pathjoin; sys.path.append(pathjoin("..", "framework"))
import pclases
import mx
try:
import geninformes
except ImportError:
import sys
sys.path.append('../informes')
import geninformes
from utils import _float as float
try:
import psycopg
except ImportError:
import psycopg2 as psycopg
from ventana_progreso import VentanaActividad, VentanaProgreso
import re, os
from partes_de_fabricacion_balas import verificar_solapamiento, \
buscar_o_crear_albaran_interno, \
entran_en_turno
try:
from psycopg import ProgrammingError as psycopg_ProgrammingError
except ImportError:
from psycopg2 import ProgrammingError as psycopg_ProgrammingError
def build_etiqueta(rollo):
"""
Devuelve un diccionario con la información del rollo a incluir en la
etiqueta.
Si rollo no es un rollo o un rollo defectuoso, devuelve None.
También devuelve la función que genera el modelo de etiqueta que le
corresponde al producto.
"""
try:
cer = rollo.productoVenta.camposEspecificosRollo
func = cer.modeloEtiqueta.get_func()
except AttributeError: # No es un rollo.
func = None
except ValueError: # No tiene modelo de etiqueta.
func = None
if isinstance(rollo, pclases.RolloDefectuoso):
producto = rollo.productoVenta
res = {'descripcion': producto.nombre,
'densidad': utils.float2str(rollo.densidad, 1),
'ancho': "%s m" % (utils.float2str(rollo.ancho, 1)),
'peso': "%s kg" % (utils.float2str(
rollo.peso - rollo.pesoEmbalaje)),
'm2': "%s m²" % (utils.float2str(
rollo.ancho * rollo.metrosLineales, 1)),
'mlin': "%s m" % (utils.float2str(rollo.metrosLineales)),
#'nrollo': str(rollo.numrollo),
'nrollo': rollo.codigo, # Para diferenciarlos mejor en la
# etiqueta desde lejos (aunque lleven otras marcas y tal)
'partida': rollo.partida.codigo,
'codigo': producto.codigo,
'codigo39': rollo.codigo,
'defectuoso': True,
'idrollo': rollo.id,
'objeto': rollo}
elif isinstance(rollo, pclases.Rollo):
producto = rollo.productoVenta
campos = producto.camposEspecificosRollo
if rollo.rollob:
res = {'descripcion': producto.nombre,
'densidad': str(campos.gramos),
'ancho': "%s m" % (campos.ancho),
'peso': "%s kg" % (int(
(campos.ancho*campos.metrosLineales*campos.gramos/1000))),
'm2': "%s m²" % (campos.ancho*campos.metrosLineales),
'mlin': "%s m" % (campos.metrosLineales),
'nrollo': str(rollo.numrollo),
'partida': rollo.partida.codigo,
'codigo': producto.codigo,
'codigo39': rollo.codigo,
'defectuoso': rollo.rollob,
'idrollo': rollo.id,
'objeto': rollo}
else:
res = {'descripcion': producto.nombre,
'densidad': str(campos.gramos),
'ancho': "%s m" % (campos.ancho),
'peso': "%s kg" % (int(
(campos.metros_cuadrados * campos.gramos / 1000.0))),
# PESO TEÓRICO. Sin embalaje.
'm2': "%s m²" % (campos.metros_cuadrados),
'mlin': "%s m" % (campos.metrosLineales),
'nrollo': str(rollo.numrollo),
'partida': rollo.partida.codigo,
'codigo': producto.codigo,
'codigo39': rollo.codigo,
'defectuoso': False,
'idrollo': rollo.id,
'objeto': rollo} # Si todavía no se ha creado, como
# defectuoso == False, geninformes no lo necesitará.
else:
res = None
return res, func
def imprimir_etiqueta_de_rollo_defectuoso(rollo):
"""
Imprime una etiqueta de rollo defectuoso correspondiente
al objeto rollo/rolloDefectuoso recibido.
"""
import informes
producto = rollo.productoVenta
if isinstance(rollo, pclases.RolloDefectuoso):
elemento = {'descripcion': producto.nombre,
'densidad': utils.float2str(rollo.densidad, 1),
'ancho': "%s m" % (utils.float2str(rollo.ancho, 1)),
'peso': "%s kg" % (utils.float2str(rollo.peso - rollo.pesoEmbalaje)),
'm2': "%s m²" % (utils.float2str(rollo.ancho * rollo.metrosLineales, 1)),
'mlin': "%s m" % (utils.float2str(rollo.metrosLineales)),
#'nrollo': str(rollo.numrollo),
'nrollo': rollo.codigo, # Para diferenciarlos mejor en
# la etiqueta desde lejos (aunque lleven otras
# marcas y tal)
'partida': rollo.partida.codigo,
'codigo': producto.codigo,
'codigo39': rollo.codigo,
'defectuoso': True,
'idrollo': rollo.id,
'objeto': rollo}
elif isinstance(rollo, pclases.Rollo):
campos = producto.camposEspecificosRollo
elemento = {'descripcion': producto.nombre,
'densidad': str(campos.gramos),
'ancho': "%s m" % (campos.ancho),
'peso': "%s kg" % (int((campos.ancho*campos.metrosLineales*campos.gramos/1000))),
'm2': "%s m²" % (campos.ancho*campos.metrosLineales),
'mlin': "%s m" % (campos.metrosLineales),
'nrollo': str(rollo.numrollo),
'partida': rollo.partida.codigo,
'codigo': producto.codigo,
'codigo39': rollo.codigo,
'defectuoso': rollo.rollob,
'idrollo': rollo.id,
'objeto': rollo} # En realidad da igual, porque si rollob
# es True se cambiarán todos estos datos antes de
# imprimir su etiqueta.
else:
return
informes.abrir_pdf(geninformes.etiquetasRollosEtiquetadora([elemento],
False))
class PartesDeFabricacionRollos(Ventana):
def __init__(self, objeto = None, permisos = "rwx", usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
self.producto = None # Producto relacionado con el parte.
# Debe coincidir con el de todas las rollos
# de "Detalles de producción"
self.ultima_etiqueta = None
self.__lecturaescritura = None
# Este atributo vale None cuando la ventana permite acceder y
# modificar todos los partes.
# Si sólo permite consulta de partes anteriores y edición de los
# nuevos, self.__lecturaescritura contiene el identificador del
# parte. De este modo siempre se puede cambiar el parte nuevo
# aunque se haya consultado momentáneamente otro.
self.__permisos = permisos
# ¡Mira papá lo que me acabo de inventar! ¡Sin manos! si el permiso
# contiene "x" la ventana permite crear nuevos partes. Si tiene "r"
# permite leer partes antiguos. Y si contiene "w" permite editar
# partes antiguos. Ueeee.
Ventana.__init__(self, 'partes_de_fabricacion_rollos.glade', objeto)
# XXX
self.wids['sp_merma'] = gtk.SpinButton()
self.wids['sp_merma'].set_range(0, 100)
self.wids['table1'].attach(self.wids['sp_merma'], 1, 2, 1, 2)
self.wids['sp_merma'].connect('output',
self.actualizar_consumo_estimado)
self.wids['sp_merma'].set_property("visible", False)
# XXX
connections = {'b_salir/clicked': self._salir,
'ventana/delete_event' : self._salir,
'b_nuevo/clicked': self.crear_nuevo_partedeproduccion,
'b_actualizar/clicked': self.actualizar_ventana,
'b_guardar/clicked': self.guardar,
'b_buscar/clicked': self.buscar_partedeproduccion,
'b_borrar/clicked': self.borrar_parte,
'b_articulo/clicked': self.set_articulo,
'b_fecha/clicked': self.mostrar_calendario,
'b_hora_fin/clicked': self.set_hora_final,
'b_hora_ini/clicked': self.set_hora_inicial,
'b_add_rollo/clicked': self.add_rollo,
'b_drop_rollo/clicked': self.drop_rollo,
'b_add_incidencia/clicked': self.add_incidencia,
'b_drop_incidencia/clicked': self.drop_incidencia,
'b_cambiar_partida/clicked': self.cambiar_partida,
'b_add_bala/clicked': self.add_bala,
'b_drop_bala/clicked': self.drop_bala,
'b_add_empleado/clicked': self.add_empleado,
'b_drop_empleado/clicked': self.drop_empleado,
'ch_bloqueado/clicked': self.bloquear,
'b_plastico/clicked': self.cambiar_plastico,
'b_imprimir/clicked': self.imprimir,
'b_etiquetas/clicked': self.etiquetas,
'b_etiq_peq/clicked': self.etiquetas,
'b_add_agujas/clicked': self.add_agujas,
'b_drop_agujas/clicked': self.drop_agujas,
'b_bascula/clicked': self.iniciar_pesaje_auto,
'b_add_desecho/clicked': self.add_desecho,
'b_drop_desecho/clicked': self.drop_desecho,
'b_add_consumo/clicked': self.consumir_manual
}
self.add_connections(connections)
linea = pclases.LineaDeProduccion.select(
pclases.LineaDeProduccion.q.nombre.contains('de geotextiles'))
if linea.count() == 0:
print "WARNING: La línea de geotextiles no está correctamente "\
"dada de alta."
self.plastico = None
self.linea = None
else:
linea = linea[0]
self.linea = linea
formulacion = linea.formulacion
try:
self.plastico = [ca.productoCompra
for ca in formulacion.consumosAdicionales
if "plastico" in ca.nombre
and not ca.productoCompra.obsoleto][0]
except IndexError:
self.plastico = None
# Como ahora el consumo de plástico es manual, si el
# consumoAdicional no existe o no tiene asociado un
# producto de compra o ningún producto de venta lo usa,
# oculto el botón de selección de plástico de
# envolver porque en ese caso no vale para nada.
consumoautomaticoplastico = [ca for ca
in formulacion.consumosAdicionales if "plastico" in ca.nombre]
if consumoautomaticoplastico == []:
ver_consumo_plastico = False
else:
ver_consumo_plastico = True
for ca in consumoautomaticoplastico:
ver_consumo_plastico = (ver_consumo_plastico
and ca.productoCompra != None
and not ca.productoCompra.obsoleto
and ca.productosVenta != [])
self.wids['hbox14'].set_property("visible", ver_consumo_plastico)
self.inicializar_ventana()
if self.objeto == None:
self.ir_a_primero()
else:
self.ir_a(objeto)
gtk.main()
# --------------- Funciones auxiliares ------------------------------
def es_diferente(self):
"""
Devuelve True si la información en pantalla es distinta a la
del objeto en memoria.
"""
partedeproduccion = self.objeto
if partedeproduccion == None:
return False # Si no hay partedeproduccion activo, devuelvo que no hay cambio respecto a la ventana
try:
condicion = utils.str_fecha(partedeproduccion.fecha) == self.wids['e_fecha'].get_text()
condicion = condicion and (str(partedeproduccion.prodestandar) == self.wids['e_o11'].get_text())
condicion = condicion and (self.wids['e_fichaproduccion'].get_text() == partedeproduccion.fichaproduccion)
# NOTA: Nada más a comparar. La info del artículo es la de alguno de las rollos introducidas y se elige mediante el botón
# correspondiente (determinará las búsquedas y la información a pedir a la hora de añadir rollos en "detalles de producción".)
obs = partedeproduccion.observaciones
bounds = self.wids['txt_observaciones'].get_buffer().get_bounds()
condicion = condicion and (self.wids['txt_observaciones'].get_buffer().get_text(bounds[0], bounds[1]) == obs)
condicion = condicion and (self.wids['sp_merma'].get_value() / 100.0 == partedeproduccion.merma)
condicion = condicion and (partedeproduccion.horainicio.strftime('%H:%M') == self.wids['e_hora_ini'].get_text())
condicion = condicion and (partedeproduccion.horafin.strftime('%H:%M') == self.wids['e_hora_fin'].get_text())
except AttributeError, msg:
txt = "%s: partes_de_fabricacion_rollos.py::es_diferente -> Devuelvo True; Excepción 'AttributeError': %s" % (self.usuario, msg)
self.logger.error(txt)
partedeproduccion.sync() # Si la excepción es por lo que pienso, al sincronizar se actualizarán las horas como mx y no como str.
condicion = False
return not condicion # Condición verifica que sea igual
def colorear_rollos(self, tv):
def cell_func(column, cell, model, itr, numcol):
cell.set_property("foreground", None) # Colores por defecto. Si no se cumple nada de lo de abajo, es como debe estar la fila.
cell.set_property("cell-background", None)
if self.producto != None and model[itr][1] != None and model[itr][1].strip() != "":
if model[itr][3] < self.producto.camposEspecificosRollo.gramos:
color = "blue"
elif model[itr][3] == self.producto.camposEspecificosRollo.gramos:
color = "green"
else:
color = "red"
if numcol == 3: # Columna de la densidad
cell.set_property("foreground", color) # En windows GTK no es capaz de sombrear los colores. Directamente no se muestra.
cell.set_property("text", "%.1f" % model[itr][3])
## Redondeo de decimales:
if numcol == 2:
cell.set_property("text", "%.1f" % model[itr][2])
# Marco el color de fondo para las muestras:
id = model[itr][-1]
try:
articulo = pclases.Articulo.get(id)
if articulo.es_rollo():
rollo = articulo.rollo
if rollo.muestra:
cell.set_property("cell-background", "grey")
elif rollo.rollob:
cell.set_property("cell-background", "IndianRed")
else:
cell.set_property("cell-background", "white")
elif articulo.es_rollo_defectuoso():
cell.set_property("cell-background", "orange red")
except pclases.SQLObjectNotFound, msg:
pass
elif model[itr][5].strip() != "":
cell.set_property("text", model[itr][numcol])
cell.set_property("foreground", "saddle brown")
else:
cell.set_property("text", model[itr][numcol])
cell.set_property("foreground", None)
cols = tv.get_columns()
for i in xrange(len(cols)):
column = cols[i]
cells = column.get_cell_renderers()
for cell in cells:
column.set_cell_data_func(cell, cell_func, i)
def colorear_tabla_empleados(self):
"""
Prepara y asocia la función para resaltar los empleados
cuyas horas trabajadas sean inferiores o superiores a
la duración del parte.
"""
def cell_func(column, cell, model, itr, numcol):
idht = model[itr][-1]
ht = pclases.HorasTrabajadas.get(idht)
duracion_parte = self.objeto.get_duracion()
ht_horas = ht.horas
try:
supera_parte = ht_horas > duracion_parte
except TypeError: # ht.horas es datetime.time
ht_horas = utils.DateTime2DateTimeDelta(ht_horas)
supera_parte = ht_horas > duracion_parte
if supera_parte:
color = "orange"
elif ht_horas < duracion_parte:
color = "red"
else:
color = "black"
cell.set_property("foreground", color)
cols = self.wids['tv_empleados'].get_columns()
numcol = len(cols) - 1
column = cols[numcol]
cells = column.get_cell_renderers()
for cell in cells:
column.set_cell_data_func(cell, cell_func, numcol)
def inicializar_ventana(self):
"""
Inicializa los controles de la ventana, estableciendo sus
valores por defecto, deshabilitando los innecesarios,
rellenando los combos, formateando el TreeView -si lo hay-...
"""
# CWT: No se debe poder editar la producción estándar desde el parte.
# Siempre debe ser la del producto, so...
self.wids['e_o11'].set_has_frame(False)
self.wids['e_o11'].set_property("editable", False)
self.wids['e_o11'].set_property("editable", True) # NOTA: XXX: Hasta
# que estén bien puestas todas las producciones estándar.
# Inicialmente no se muestra NADA. Sólo se le deja al
# usuario la opción de buscar o crear nuevo.
self.activar_widgets(False)
self.wids['b_actualizar'].set_sensitive(False)
self.wids['b_guardar'].set_sensitive(False)
self.wids['b_nuevo'].set_sensitive(True)
self.wids['b_buscar'].set_sensitive(True)
self.wids['e_num_a'].set_alignment(1.0)
self.wids['e_num_b'].set_alignment(1.0)
self.wids['e_peso_a'].set_alignment(1.0)
self.wids['e_peso_b'].set_alignment(1.0)
self.wids['e_peso_sin_a'].set_alignment(1.0)
self.wids['e_peso_sin_b'].set_alignment(1.0)
self.wids['e_metros_a'].set_alignment(1.0)
self.wids['e_metros_b'].set_alignment(1.0)
self.wids['e_mlin_a'].set_alignment(1.0)
self.wids['e_mlin_b'].set_alignment(1.0)
# Inicialización del resto de widgets:
# (Nombre, tipo, editable, ordenable, buscable, función_actualización)
cols = (('Partida', 'gobject.TYPE_STRING', False, True, False, None),
('Nº Rollo', 'gobject.TYPE_STRING', False, True, True, None),
('Peso c.e.(kg)', 'gobject.TYPE_FLOAT', True, True, False,
self.cambiar_peso),
('gr/m² (s.e.)', 'gobject.TYPE_FLOAT', False, True, False,
None),
('Motivo parada', 'gobject.TYPE_STRING', False, True, False,
self.cambiar_motivo_incidencia),
('Hora comienzo', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_inicio_incidencia),
('Hora terminación', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_fin_incidencia),
('Duración', 'gobject.TYPE_STRING', False, True, False, None),
('Observaciones', 'gobject.TYPE_STRING', True, False, False,
self.cambiar_observaciones),
('ID', 'gobject.TYPE_INT64', False, False, False, None)
)
utils.preparar_listview(self.wids['tv_rollos'], cols)
self.colorear_rollos(self.wids['tv_rollos'])
self.wids['tv_rollos'].get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.wids['tv_rollos'].add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.wids['tv_rollos'].connect('button_press_event',
self.button_clicked)
cols = (('Código', 'gobject.TYPE_INT64', False, True, False, None),
('Nombre', 'gobject.TYPE_STRING', False, True, False, None),
('Apellidos', 'gobject.TYPE_STRING', False, True, True, None),
('Horas', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_horas_trabajadas),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_empleados'], cols)
self.colorear_tabla_empleados()
cols = (('Nº Bala', 'gobject.TYPE_STRING', False, True, True, None),
('Peso', 'gobject.TYPE_FLOAT', True, True, False,
self.cambiar_peso_bala),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_balas'], cols)
# Al loro porque me voy a cargar la mitad de lo que ha hecho el
# preparar_listview.
import gobject
model = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_FLOAT,
gobject.TYPE_FLOAT,
gobject.TYPE_INT64)
self.wids['tv_balas'].set_model(model)
self.wids['tv_balas'].get_selection().set_mode(gtk.SELECTION_MULTIPLE)
cell = gtk.CellRendererProgress()
column = gtk.TreeViewColumn('Consumido', cell)
column.add_attribute(cell, 'value', 2)
column.set_sort_column_id(2)
self.wids['tv_balas'].insert_column(column, 2)
cols = (('Producto', 'gobject.TYPE_STRING', False, True, True, None),
('Cantidad', 'gobject.TYPE_INT64', True, True, False,
self.cambiar_cantidad_aguja),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_agujas'], cols)
cols = (('Producto', 'gobject.TYPE_STRING', False, True, True, None),
('Cantidad', 'gobject.TYPE_STRING', False, True, False, None),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_consumos'], cols)
cols = (('Producto', 'gobject.TYPE_STRING', False, True, True, None),
('Cantidad', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_cantidad_descuento_material),
('Observaciones', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_observaciones_descuento_material),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_desecho'], cols)
self.wids['tv_desecho'].get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.wids['ventana'].maximize()
def cambiar_observaciones_descuento_material(self, cell, path, newtext):
"""
Cambia las observaciones del registro.
"""
model = self.wids['tv_desecho'].get_model()
id = model[path][-1]
desecho = pclases.DescuentoDeMaterial.get(id)
desecho.observaciones = newtext
desecho.fechahora = mx.DateTime.localtime() # Actualizo la fecha y hora.
self.objeto.unificar_desechos()
self.rellenar_tabla_desechos()
def cambiar_cantidad_descuento_material(self, cell, path, newtext):
"""
Cambia la cantidad descontada del registro y actualiza el
producto de venta a la nueva cantidad (suma la cantidad
anterior y resta la nueva).
"""
model = self.wids['tv_desecho'].get_model()
id = model[path][-1]
desecho = pclases.DescuentoDeMaterial.get(id)
try:
newtext=newtext.replace(desecho.productoCompra.unidad, "").strip()
nueva_cantidad = utils._float(newtext)
except ValueError:
utils.dialogo_info(titulo = "ERROR FORMATO NUMÉRICO",
texto = 'El texto "%s" no es una cantidad correcta.' % newtext,
padre = self.wids['ventana'])
else:
cantidad_desecho_inicial = desecho.cantidad
productoCompra = desecho.productoCompra
productoCompra.sync()
antes = productoCompra.existencias
cantidad_desecho_final = desecho.cambiar_cantidad(nueva_cantidad)
despues = desecho.productoCompra.existencias
self.logger.warning("%spartes_de_fabricacion_rollos::cambiar_cantidad_descuento_material -> Cambiada cantidad de descuento existente. Stock de %s antes: %f, después: %f. Cantidad de desecho antes: %f. Después: %f." % (self.usuario and self.usuario.usuario + ": " or "", productoCompra.descripcion, antes, despues, cantidad_desecho_inicial, cantidad_desecho_final))
if cantidad_desecho_final != nueva_cantidad:
utils.dialogo_info(titulo = "EXISTENCIAS INSUFICIENTES",
texto = "No había existencias suficientes del producto para cambiar la\ncantidad desechada a %s." % (utils.float2str(nueva_cantidad)),
padre = self.wids['ventana'])
self.objeto.unificar_desechos()
self.rellenar_tabla_desechos()
def actualizar_consumo(self, consumo, descontar):
"""
Pone el campo actualizado del consumo a True y
descuenta la cantidad del producto de compra.
"""
consumo.actualizado = descontar
if descontar:
consumo.productoCompra.existencias -= consumo.cantidad
consumo.productoCompra.add_existencias(
-consumo.cantidad,
pclases.Almacen.get_almacen_principal())
else:
consumo.productoCompra.existencias += consumo.cantidad
consumo.productoCompra.add_existencias(
consumo.cantidad,
pclases.Almacen.get_almacen_principal())
def add_agujas(self, b):
""" DEPRECATED """
producto = self.buscar_producto_compra("AGUJA")
if producto == None or producto.obsoleto:
return
cantidad = utils.dialogo_entrada(titulo = 'CANTIDAD',
texto = 'Introduzca la cantidad consumida:',
padre = self.wids['ventana'])
if cantidad == None:
return
try:
cantidad = float(cantidad)
except ValueError:
utils.dialogo_info(titulo = "ERROR",
texto = 'Cantidad incorrecta',
padre = self.wids['ventana'])
return
if cantidad > producto.existencias:
utils.dialogo_info(titulo = 'CANTIDAD INSUFICIENTE',
texto = 'No hay existencias suficientes en '
'almacén.\nVerifique que ha tecleado '
'la cantidad correctamente\ny que las '
'entradas en almacén del producto han '
'sido contabilizadas.',
padre = self.wids['ventana'])
return
# NOTA: OJO: (Esto hay que cambiarlo tarde o temprano). Si antes y despues = -3, es consumo de agujas.
consumo = pclases.Consumo(antes = -3,
despues = -3,
cantidad = cantidad,
actualizado = False,
parteDeProduccion = self.objeto,
productoCompra = producto)
self.actualizar_consumo(consumo, True)
self.objeto.unificar_consumos()
actualizar_albaran_interno_con_tubos(self.objeto)
self.rellenar_agujas()
def drop_agujas(self, b):
""" DEPRECATED """
model, iter = self.wids['tv_agujas'].get_selection().get_selected()
if iter == None:
return
idconsumo = model[iter][-1]
consumo = [c for c in self.objeto.consumos if c.id == idconsumo][0]
self.actualizar_consumo(consumo, False)
consumo.parteDeProduccion = None
consumo.destroySelf()
self.rellenar_agujas()
def cambiar_cantidad_aguja(self, cell, path, texto):
""" DEPRECATED """
try:
cantidad = float(texto)
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "El texto introducido (%s) no respeta el formato numérico.\nUse solo números y el punto como separador decimal." % texto)
return
model = self.wids['tv_agujas'].get_model()
idc = model[path][-1]
consumo = pclases.Consumo.get(idc)
cantidad_anterior = consumo.cantidad
consumo.productoCompra.existencias += cantidad_anterior
consumo.productoCompra.add_existencias(
cantidad_anterior,
pclases.Almacen.get_almacen_principal())
consumo.cantidad = cantidad
model[path][1] = consumo.cantidad
self.actualizar_consumo(consumo, True)
cantidad = 0
for consumo in [c for c in self.objeto.consumos if c.antes == -1 and c.despues == -1]:
cantidad += consumo.cantidad
self.wids['e_total_agujas'].set_text(utils.float2str(cantidad, 0))
def cambiar_cantidad_antes(self, cell, path, texto):
""" DEPRECATED """
try:
antes = float(texto)
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "El texto introducido (%s) no respeta el formato numérico.\nUse solo números y el punto como separador decimal." % texto, padre = self.wids['ventana'])
return
model = self.wids['tv_granza'].get_model()
idc = model[path][-1]
consumo = pclases.Consumo.get(idc)
if antes < consumo.despues or antes < 0:
utils.dialogo_info(titulo = "ERROR", texto = "La cantidad después de producir no puede ser superior a la de antes de\nempezar la fabricación y ninguna debe ser negativa.", padre = self.wids['ventana'])
return
consumo.antes = antes
consumo.cantidad = consumo.antes - consumo.despues
model[path][1] = consumo.antes
model[path][2] = consumo.despues
model[path][3] = consumo.cantidad
self.actualizar_consumo(consumo, True)
cantidad = 0
for consumo in [c for c in self.objeto.consumos if (c.antes != -1 and c.despues != -1) and (c.antes != -2 and c.despues != -2)]:
cantidad += consumo.cantidad
self.wids['e_total_granza'].set_text(utils.float2str(cantidad))
def cambiar_horas_trabajadas(self, cell, path, newtext):
newtext = newtext.replace(".", ":").replace(",", ":")
if ":" not in newtext:
if len(newtext) < 4:
newtext = ("0" * (4 - len(newtext))) + newtext
newtext = "%s:%s" % (newtext[:-2], newtext[-2:])
model = self.wids['tv_empleados'].get_model()
id = model[path][-1]
ht = pclases.HorasTrabajadas.get(id)
try:
try:
dtdelta = mx.DateTime.DateTimeDelta(0, float(newtext.split(':')[0]), float(newtext.split(':')[1]), 0)
except IndexError:
dtdelta = mx.DateTime.DateTimeDelta(0, int(newtext), 0)
newtext = utils.str_hora_corta(dtdelta)
if dtdelta > self.objeto.get_duracion():
utils.dialogo_info(titulo = "TIEMPO INCORRECTO", texto = "El tiempo trabajado no puede superar la\nduración del parte de producción.", padre = self.wids['ventana'])
return
ht.horas = newtext
ht.sync(); ht.syncUpdate()
model[path][3] = ht.horas.strftime('%H:%M')
except (ValueError, TypeError), msg:
utils.dialogo_info(titulo = "ERROR", texto = 'El texto "%s" no representa el formato horario.' % newtext, padre = self.wids['ventana'])
def cambiar_peso_bala(self, cell, path, newtext):
""" DEPRECATED """
try:
peso = float(newtext)
except:
utils.dialogo_info(titulo = 'ERROR DE FORMATO', texto = 'No introdujo un número válido', padre = self.wids['ventana'])
return
idbala = self.wids['tv_balas'].get_model()[path][-1]
bala = pclases.Bala.get(idbala)
bala.pesobala = peso
self.rellenar_balas()
def cambiar_peso(self, cell, path, newtext):
"""
Cambia el peso de un rollo en el ListView de rollos fabricados en el parte.
"""
if self.usuario == None or self.usuario.nivel <= 2:
self.cambiar_peso_rollo(cell, path, newtext)
else:
utils.dialogo_info(titulo = "SIN PERMISOS",
texto = "No tiene permiso para cambiar el peso de los rollos.",
padre = self.wids['ventana'])
def activar_widgets(self, s):
"""
Activa o desactiva (sensitive=True/False) todos
los widgets de la ventana que dependan del
objeto mostrado.
Entrada: s debe ser True o False. En todo caso
se evaluará como boolean.
"""
s = (s and ((self.usuario and self.usuario.nivel <= 1)
or not self.objeto.bloqueado
or not self.usuario))
if self.objeto:
s = s or self.objeto.id == self.__lecturaescritura
ws = ('table1', 'tv_rollos', 'hbox1', 'hbox2', 'frame1', 'hbox3',
'frame3', 'b_fecha', 'b_hora_ini', 'b_hora_fin', 'b_articulo',
'b_add_rollo', 'b_drop_rollo','b_add_incidencia',
'b_drop_incidencia', 'b_borrar', 'ch_bloqueado', 'vbox4',
'e_fichaproduccion')
for w in ws:
self.wids[w].set_sensitive(s)
# Voy a desactivar el botón para cambiar de producto si el parte ya
# tiene asignado una partida donde ya se ha comenzado a producir un
# producto. Así evito que haya más de un producto por partida. PERO,
# ojo, si el parte actual tiene artículos es que el usuario quiere
# cambiar el producto de todos los rollos del parte, entonces sí que
# le dejo.
if self.objeto and self.usuario and self.usuario.nivel > 0:
codpart = self.wids['e_partida_gtx'].get_text()
try:
partida = pclases.Partida.selectBy(codigo = codpart)[0]
except IndexError:
pass
else:
if partida.get_producto() and not self.objeto.articulos:
self.wids['b_articulo'].set_sensitive(False)
def ir_a_primero(self):
"""
Pregunta si crear un parte nuevo, de forma que al abrir la ventana
siempre se pueda empezar un parte de rápidamente.
Si se contesta que no al diálogo, se va al _último_ registro de la tabla.
"""
nuevo = False
if nuevo:
self.crear_nuevo_partedeproduccion(None)
else:
partedeproduccion = self.objeto
try:
if partedeproduccion != None: partedeproduccion.notificador.desactivar()
# Anulo el aviso de actualización del envío que deja de ser activo.
# OJO: Debe haber más formas de distinguirlos e incluso más lógicas, pero de momento me voy a guiar
# por el formateo de las observaciones. Si tiene 6 campos concatenados con ';' es de balas y si no es de rollos.
partesdeproduccion = pclases.ParteDeProduccion.select("""partida_cem_id IS NULL AND NOT observaciones LIKE '%;%;%;%;%;%'""")
partesdeproduccion = partesdeproduccion.orderBy("-id")
partedeproduccion=partesdeproduccion[0]
partedeproduccion.notificador.activar(self.aviso_actualizacion)
# Activo la notificación
except:
partedeproduccion = None
self.objeto = partedeproduccion
self.actualizar_ventana()
def refinar_resultados_busqueda(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
filas_res.append((r.id,
utils.str_fecha(r.fecha),
utils.str_hora_corta(r.horainicio),
utils.str_hora_corta(r.horafin),
"CLIC PARA VER",
r.observaciones))
idpartedeproduccion = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione parte de producción de rollos',
cabeceras = ('ID Interno',
'Fecha',
'Hora inicio',
'Hora fin',
'Partida',
'Observaciones'),
func_change = self.mostrar_info_parte,
padre = self.wids['ventana'])
if idpartedeproduccion < 0:
return None
else:
return idpartedeproduccion
def mostrar_info_parte(self, tv):
model, iter = tv.get_selection().get_selected()
if iter!=None and model[iter][-2] == "CLIC PARA VER":
parte = pclases.ParteDeProduccion.get(model[iter][0]) # En los
# diálogos de resultado el ID va al revés.
if parte.es_de_balas() and parte.articulos != []:
try:
lotepartida = parte.articulos[0].bala.lote.codigo
except AttributeError:
lotepartida = parte.articulos[0].bigbag.loteCem.codigo
elif not parte.es_de_balas() and parte.articulos != []:
lotepartida = parte.articulos[0].partida.codigo
else:
lotepartida = 'VACIO'
producto = parte.articulos != [] and parte.articulos[0].productoVenta.nombre or 'VACÍO'
model[iter][-2] = lotepartida
def calcular_duracion(self, hfin, hini):
"""
DEPRECATED.
¡OBSOLETO!
"""
if isinstance(hfin, mx.DateTime.DateTimeDeltaType):
hfin = hfin + mx.DateTime.oneDay
duracion = hfin - hini
if duracion.day > 0:
duracion -= mx.DateTime.oneDay
return duracion
def rellenar_agujas(self):
""" DEPRECATED """
#model = self.wids['tv_agujas'].get_model()
#model.clear()
#cantidad = 0
#for consumo in [c for c in self.objeto.consumos if c.antes == -3 and c.despues == -3]:
# model.append((consumo.productoCompra.descripcion, int(consumo.cantidad), consumo.id))
# cantidad += consumo.cantidad
#self.wids['e_total_agujas'].set_text(utils.float2str(cantidad, 0))
pass
def rellenar_widgets(self):
"""
Introduce la información del partedeproduccion actual
en los widgets.
No se chequea que sea != None, así que
hay que tener cuidado de no llamar a
esta función en ese caso.
"""
self.wids['sp_merma'].set_value(self.objeto.merma * 100)
# Added 4/6/2006. Consumo de plástico alternativo:
self.wids['e_plastico'].set_text(
self.plastico and self.plastico.descripcion or "SIN ENVOLVER")
# DONE: BUG: Creo que aquí. Al darle a NUEVO no se borra la información del lote y artículo del parte anterior.
# (Sospecho que lo del artículo es por no poner self.articulo = None y el lote ¿por no borrar el e_lote?)
# It's not a bug, it's a feature!
# NOTA: De momento se deja así (es en nuevo_ donde se debería poner el artículo a None y demás) a petición
# del usuario -le parece más cómodo que se inicie con el mismo lote y artículo del parte anterior en pantalla-.
# Si cambia el requerimiento (que cambiará) ya sabes dónde tienes que tocar, gañanazo.
partedeproduccion = self.objeto
self.wids['ch_bloqueado'].set_active(self.objeto.bloqueado)
# Información global:
self.wids['e_fecha'].set_text(utils.str_fecha(partedeproduccion.fecha))
self.wids['e_hora_ini'].set_text(partedeproduccion.horainicio.strftime('%H:%M'))
self.wids['e_hora_fin'].set_text(partedeproduccion.horafin.strftime('%H:%M'))
self.wids['e_o11'].set_text(str(partedeproduccion.prodestandar))
self.wids['e_tiempo_total'].set_text(partedeproduccion.get_duracion().strftime('%H:%M'))
self.wids['txt_observaciones'].get_buffer().set_text(partedeproduccion.observaciones)
# Ya no se usa y además protesta:
# /home/queen/Q-INN/geotexan/geotexinn02/formularios/partes_
# de_fabricacion_rollos.py:782: Warning: g_signal_emit_valist:
# assertion `signal_id > 0' failed
#self.wids['sp_merma'].set_value(partedeproduccion.merma * 100)
# Información de detalle:
if self.objeto.articulos != []:
self.producto = self.objeto.articulos[0].productoVenta
self.rellenar_datos_articulo(self.producto)
self.wids['e_fichaproduccion'].set_text(partedeproduccion.fichaproduccion)
self.rellenar_tabla_empleados()
for a in self.objeto.articulos:
try:
self.wids['e_partida_gtx'].set_text(a.partida.codigo)
self.wids['e_partida'].set_text(a.partida.partidaCarga.codigo)
break
except:
self.wids['e_partida_gtx'].set_text("")
self.wids['e_partida'].set_text('')
self.rellenar_tabla_rollos()
self.rellenar_balas()
self.rellenar_agujas()
self.rellenar_tabla_consumos()
self.rellenar_tabla_desechos()
self.objeto.make_swap()
self.check_permisos()
def rellenar_tabla_desechos(self):
"""
Rellena la tabla de desechos del parte.
"""
parte = self.objeto
if parte != None:
model = self.wids['tv_desecho'].get_model()
self.wids['tv_desecho'].set_model(None)
model.clear()
desechos = parte.descuentosDeMaterial[:]
try:
desechos.sort(lambda c1, c2: c1 != None and c2 != None and int(c1.id - c2.id) or 0)
except TypeError, msg:
self.logger.error("partes_de_fabricacion_rollos.py (rellenar_tabla_desechos): Error ordenando descuento de material (%s):\n%s" % (msg, desechos))
for c in desechos:
if c.productoCompraID != None:
unidad = c.productoCompra.unidad
producto = c.productoCompra.descripcion
else:
unidad = ""
producto = ""
model.append((producto,
"%s %s" % (utils.float2str(c.cantidad), unidad),
c.observaciones,
c.id))
self.wids['tv_desecho'].set_model(model)
def rellenar_tabla_consumos(self):
"""
Rellena la tabla de consumos del parte.
"""
parte = self.objeto
if parte != None:
model = self.wids['tv_consumos'].get_model()
self.wids['tv_consumos'].set_model(None)
model.clear()
consumos = parte.consumos[:]
try:
consumos.sort(lambda c1, c2: c1 != None and c2 != None and int(c1.id - c2.id) or 0)
except TypeError, msg:
self.logger.error("partes_de_fabricacion_rollos.py (rellenar_tabla_consumos): Error ordenando consumos (%s):\n%s" % (msg, consumos))
for c in parte.consumos:
if c.productoCompraID != None:
unidad = c.productoCompra.unidad
producto = c.productoCompra.descripcion
else:
unidad = ""
producto = ""
model.append((producto,
"%s %s" % (utils.float2str(c.cantidad), unidad),
c.id))
self.wids['tv_consumos'].set_model(model)
def check_permisos(self):
if "w" in self.__permisos: # Puede modificar los partes:
self.activar_widgets(True)
else: # Sólo puede modificar el parte que haya creado nuevo (si es que ha creado alguno)
if self.__lecturaescritura == self.objeto.id or not self.objeto.bloqueado:
self.activar_widgets(True)
else:
self.activar_widgets(False)
# Compruebo primero este porque habilita o deshabilita todos los botones, incluso los que
# dependen de los otros dos permisos.
if "r" in self.__permisos: # Puede leer partes anteriores, habilito el buscar:
self.wids['b_buscar'].set_sensitive(True)
else:
self.wids['b_buscar'].set_sensitive(False)
if "x" in self.__permisos: # Puede crear nuevos:
self.wids['b_nuevo'].set_sensitive(True)
else:
self.wids['b_nuevo'].set_sensitive(False)
def colorear_pesos(self):
"""
Cambia el color de e_consumo_estimado y e_consumo_real
dependiendo de si se ha consumido más de lo estimado o menos.
"""
try:
real = float(self.wids['e_consumo_real'].get_text())
except:
real = 0
try:
#balas = float(self.wids['e_total_balas'].get_text())
balas = sum([b.pesobala for b in self.get_partida().balas])
# Si se llama a get_partida antes que a rellenar_rollos no devuelve la partida correcta.
except:
balas = 0
try:
estimado = float(self.wids['e_consumo_estimado'].get_text())
except:
estimado = 0
# DEBUG: print "balas", balas, "estimado", estimado, "real", real
if balas < estimado:
self.wids['e_consumo_estimado'].modify_base(gtk.STATE_NORMAL, self.wids['e_consumo_estimado'].get_colormap().alloc_color("red"))
else:
self.wids['e_consumo_estimado'].modify_base(gtk.STATE_NORMAL, self.wids['e_consumo_estimado'].get_colormap().alloc_color("white"))
if balas < real:
self.wids['e_consumo_real'].modify_base(gtk.STATE_NORMAL, self.wids['e_consumo_real'].get_colormap().alloc_color("red"))
else:
self.wids['e_consumo_real'].modify_base(gtk.STATE_NORMAL, self.wids['e_consumo_real'].get_colormap().alloc_color("white"))
def cmpfechahora_or_numrollo(self, detalle1, detalle2):
"""
Si tiene número de rollo, ordena por número de rollo. En otro caso,
ordena por fecha y hora de fabricación/incidencia.
"""
try:
if detalle1.rollo.numrollo < detalle2.rollo.numrollo:
return -1
if detalle1.rollo.numrollo > detalle2.rollo.numrollo:
return 1
return 0
except AttributeError:
if detalle1.fechahora < detalle2.fechahora:
return -1
elif detalle1.fechahora > detalle2.fechahora:
return 1
else:
return 0
def partida(self, d):
try:
res = d.partida.codigo
except AttributeError:
res = ""
return res
def rollo(self, d):
try:
res = d.codigo
except AttributeError:
res = ''
return res
def peso(self, d):
try:
res = d.peso
except AttributeError:
res = 0.0
return res
def densidad(self, d):
try:
if d.es_rollo():
res = d.rollo.densidad
elif d.es_rollo_defectuoso():
res = d.rolloDefectuoso.densidad
else:
res = 0.0
except AttributeError:
res = 0.0
return res
def longitud(self, d):
try:
res = d.largo
except AttributeError:
res = 0.0
return res
def ancho(self, d):
try:
res = d.ancho
except AttributeError:
res = 0.0
return res
def motivo(self, d):
try:
res = d.tipoDeIncidencia.descripcion
except AttributeError:
if d.es_rollo_defectuoso():
res = "%s m lineales" % (utils.float2str(d.largo, 1))
else:
res = ''
return res
def horaini(self, d):
try:
res = d.horainicio.strftime('%H:%M')
except AttributeError:
res = ''
return res
def horafin(self, d):
try:
res = d.horafin.strftime('%H:%M')
except AttributeError:
res = ''
return res
def duracion(self, d):
try:
duracion = (d.horafin - d.horainicio)
try:
res = duracion.strftime('%H:%M')
except AttributeError:
res = "%d:%02d" % (duracion.seconds / 3600,
duracion.seconds / 60 % 60)
except AttributeError:
res = ''
return res
def observaciones(self, d):
try:
res = d.observaciones
except AttributeError:
if d.es_rollo():
res = d.rollo.observaciones
elif d.es_rollo_defectuoso():
res = d.rolloDefectuoso.observaciones
else:
res = ""
return res
def calcular_tiempo_trabajado(self, parte):
tiempototal = parte.get_duracion()
paradas = [p for p in parte.incidencias]
tiempoparadas = 0
for parada in paradas:
tiempoparadas += parada.get_duracion()
return tiempototal, tiempototal - tiempoparadas
def convertir_densidad_a_float(self, text, regexp):
lista_digitos = regexp.findall(text)
lista_digitos = [i for i in lista_digitos if i != '']
if lista_digitos == []:
return None
else:
try:
return float(".".join(lista_digitos))
except ValueError:
return None
def cargar_imagen(self, w, imagen):
"""
Carga la imagen "imagen" del directorio "imagenes" en el widget.
"""
im = os.path.join('..', 'imagenes', imagen)
w.set_from_file(im)
def mostrar_icono(self, densidad_anterior, densidad_actual):
"""
Muestra un icono que indica si la densidad media ha subido, bajado, etc...
"""
if densidad_anterior == None or densidad_actual == None:
self.cargar_imagen(self.wids['im_dm'], 'none.png')
else:
densidad_actual = round(densidad_actual, 1)
if densidad_anterior < densidad_actual:
if (densidad_anterior * 1.1) >= densidad_actual:
self.cargar_imagen(self.wids['im_dm'], 'up_right.png')
else:
self.cargar_imagen(self.wids['im_dm'], 'up.png')
elif densidad_anterior > densidad_actual:
if (densidad_anterior * 0.9) <= densidad_actual:
self.cargar_imagen(self.wids['im_dm'], 'down_right.png')
else:
self.cargar_imagen(self.wids['im_dm'], 'down.png')
else:
self.cargar_imagen(self.wids['im_dm'], 'right.png')
def mostrar_densidad_media(self, densidades):
"""
Muestra la densidad media y el icono de evolución.
"""
regexp = re.compile("\d*")
try:
densidad_anterior = self.convertir_densidad_a_float(self.wids['e_densidad_media'].get_text(), regexp)
densidad_media = sum(densidades) / len(densidades)
self.wids['e_densidad_media'].set_text("%s gr/m²" % (utils.float2str(densidad_media, 1)))
except ZeroDivisionError:
densidad_media = None
self.wids['e_densidad_media'].set_text("-")
self.mostrar_icono(densidad_anterior, densidad_media)
def rellenar_tabla_rollos(self, actualizar_tabla = True):
"""
Si actualizar_tabla es False no toca el model; aunque
sí que actualiza los datos del pie de tabla.
"""
model = self.wids['tv_rollos'].get_model()
if actualizar_tabla:
model.clear()
detallesdeproduccion = ([i for i in self.objeto.incidencias]
+ [a for a in self.objeto.articulos])
detallesdeproduccion.sort(self.cmpfechahora_or_numrollo)
pesototal = 0
# densidades = []
# Filas del TreeView
for detalle in detallesdeproduccion:
# densidades.append(self.densidad(detalle))
if actualizar_tabla:
model.append((self.partida(detalle),
self.rollo(detalle),
self.peso(detalle),
self.densidad(detalle),
self.motivo(detalle),
self.horaini(detalle),
self.horafin(detalle),
self.duracion(detalle),
self.observaciones(detalle),
detalle.id))
pesototal += self.peso(detalle)
# Campos del pie de la tabla:
# self.mostrar_densidad_media(densidades)
self.wids['e_consumo_real'].set_text('%s' % (
utils.float2str(round(pesototal, 2))))
rollos = [d for d in self.objeto.articulos]
self.wids['e_num_rollos'].set_text(str(len(rollos)))
metros = sum([r.largo for r in rollos])
self.wids['e_metros_lineales'].set_text(utils.float2str(metros, 0))
partedeproduccion = self.objeto
tiempototal, tiemporeal = self.calcular_tiempo_trabajado(
partedeproduccion)
self.wids['e_tiempo_real_trabajado'].set_text(
tiemporeal.strftime('%H:%M'))
try:
productividad = (tiemporeal.seconds / tiempototal.seconds) * 100
except ZeroDivisionError:
productividad = 100
self.wids['e_productividad'].set_text("%s %%" % (
utils.float2str(productividad)))
if self.producto != None:
peso_total = (metros * self.producto.camposEspecificosRollo.ancho
* self.producto.camposEspecificosRollo.gramos / 1000)
else:
peso_total = 0
self.wids['e_peso_total'].set_text("%s" % utils.float2str(peso_total))
# Peso total teórico, SIN embalajes.
merma = self.wids['sp_merma'].get_value() / 100.0 # (está en %
# como entero)
consumo_estimado = peso_total / (1.0 - merma)
self.wids['e_consumo_estimado'].set_text("%s" % (
utils.float2str(consumo_estimado)))
rendimiento = self.objeto.calcular_rendimiento()
self.wids['e_rendimiento'].set_text("%s %%" % (
utils.float2str(rendimiento)))
iter_ant = model.get_iter_first()
if iter_ant != None:
iter_post = model.iter_next(iter_ant)
else:
iter_post = None
while iter_post != None:
iter_ant = iter_post
iter_post = model.iter_next(iter_ant)
if iter_ant != None:
path_siguiente = model.get_path(iter_ant)
sel = self.wids['tv_rollos'].get_selection()
sel.select_iter(iter_ant)
self.wids['tv_rollos'].scroll_to_cell(path_siguiente,
use_align = True)
column = self.wids['tv_rollos'].get_column(2)
cell = column.get_cell_renderers()[0]
self.wids['tv_rollos'].set_cursor_on_cell(path_siguiente,
column,
cell,
start_editing = False)
self.mostrar_densidad_media([a.rollo.densidad
for a in self.objeto.articulos
if a.rollo != None])
# OJO: La densidad media no tiene en cuenta los rollos defectuosos.
self.rellenar_pie_rollos_ab()
self.colorear_pesos()
def rellenar_pie_rollos_ab(self):
"""
Recuenta e introduce en el pie del parte los
totales de rollos A y B (alias "defectuosos").
"""
pesoa = pesob = 0.0
peso_sina = peso_sinb = 0.0
metrosa = metrosb = 0.0
numrollosa = numrollosb = 0
mlina = mlinb = 0.0
pesoEmbalaje = None
metrosLineales = None
metrosCuadrados = None
for a in self.objeto.articulos:
if a.es_rollo_defectuoso():
numrollosb += 1
pesob += a.peso
peso_sinb += a.peso_sin
metrosb += a.superficie
mlinb += a.largo
elif a.es_rollo():
if pesoEmbalaje == None:
pesoEmbalaje = a.rollo.productoVenta.camposEspecificosRollo.pesoEmbalaje
if metrosLineales == None:
metrosLineales = a.rollo.productoVenta.camposEspecificosRollo.metrosLineales
if metrosCuadrados == None:
metrosCuadrados = a.rollo.productoVenta.camposEspecificosRollo.metros_cuadrados
numrollosa += 1
apeso = a.peso
#pesoa += a.peso
pesoa += apeso
#peso_sina += a.peso_sin
peso_sina += apeso - pesoEmbalaje
#metrosa += a.superficie
#mlina += a.largo
else:
self.logger.error("partes_de_fabricacion_rollos::rellenar_pie_rollos_ab -> ¡Artículo ID %d no es rollo ni rollo defectuoso!" % (a.id))
if metrosLineales != None:
mlina = metrosLineales * numrollosa
if metrosCuadrados != None:
metrosa = metrosCuadrados * numrollosa
self.wids['e_num_a'].set_text(`numrollosa`)
self.wids['e_num_b'].set_text(`numrollosb`)
self.wids['e_peso_a'].set_text(utils.float2str(pesoa))
self.wids['e_peso_b'].set_text(utils.float2str(pesob))
self.wids['e_peso_sin_a'].set_text(utils.float2str(peso_sina))
self.wids['e_peso_sin_b'].set_text(utils.float2str(peso_sinb))
self.wids['e_metros_a'].set_text(utils.float2str(metrosa, 1))
self.wids['e_metros_b'].set_text(utils.float2str(metrosb, 1))
self.wids['e_mlin_a'].set_text(utils.float2str(mlina, 1))
self.wids['e_mlin_b'].set_text(utils.float2str(mlinb, 1))
def actualizar_consumo_estimado(self, sp):
if self.producto != None:
rollos = [d for d in self.objeto.articulos]
metros = sum([r.productoVenta.camposEspecificosRollo.metrosLineales for r in rollos])
peso_total = metros*self.producto.camposEspecificosRollo.ancho*self.producto.camposEspecificosRollo.gramos/1000
merma = self.wids['sp_merma'].get_value() / 100.0 # (está en % como entero)
consumo_estimado = peso_total / (1.0 - merma)
self.wids['e_consumo_estimado'].set_text("%sf" % (utils.float2str(consumo_estimado)))
self.colorear_pesos()
return False # Porque further processing is required. I suppose.
def rellenar_datos_articulo(self, producto):
"""
A partir del artículo recibido, completa la información
de la cabecera del formulario (ancho, etc...) en
función de los datos de la rollo.
También verifica si el parte tiene ficha de fabricación. Si no la
tiene, pone la del producto recibido.
"""
if producto == None:
self.wids['e_articulo'].set_text('')
self.wids['e_grsm2'].set_text('')
self.wids['e_ancho'].set_text('')
self.wids['e_long_rollo'].set_text('')
else:
self.wids['e_articulo'].set_text(producto.nombre)
ce = producto.camposEspecificosRollo
self.wids['e_grsm2'].set_text(ce and str(ce.gramos) or '')
self.wids['e_ancho'].set_text(ce and str(ce.ancho) or '')
self.wids['e_long_rollo'].set_text(ce and str(ce.metrosLineales) or '')
if not self.objeto.fichaproduccion:
self.objeto.fichaproduccion = ce.fichaFabricacion
# --------------- Manejadores de eventos ----------------------------
def cambiar_peso_rollo(self, cell, path, newtext):
model = self.wids['tv_rollos'].get_model()
if model[path][1] == '': # Nº rollo, no tiene, no es un rollo.
return
id = model[path][-1]
articulo = pclases.Articulo.get(id)
if articulo.es_rollo():
rollo = articulo.rollo
elif articulo.es_rollo_defectuoso():
rollo = articulo.rolloDefectuoso
try:
descontar_material_adicional(self, rollo.articulo, restar = False)
rollo.peso = float(newtext)
except ValueError:
utils.dialogo_info('NÚMERO INCORRECTO', 'El peso del rollo debe ser un número.', padre = self.wids['ventana'])
return
descontar_material_adicional(self, rollo.articulo, restar = True)
model[path][2] = rollo.peso # Columna 2 = peso. Columna 3 = densidad.
if articulo.es_rollo():
pesosin = (rollo.peso - articulo.productoVenta.camposEspecificosRollo.pesoEmbalaje) * 1000
elif articulo.es_rollo_defectuoso():
pesosin = (rollo.peso - rollo.pesoEmbalaje) * 1000
try:
dens = pesosin / (articulo.superficie)
except ZeroDivisionError:
dens = 0
rollo.densidad = dens
model[path][3] = rollo.densidad
self.rellenar_tabla_rollos(actualizar_tabla = False)
iter = model.get_iter(path)
iter = model.iter_next(iter)
if iter != None:
path_siguiente = model.get_path(iter)
column = self.wids['tv_rollos'].get_column(2)
cell = column.get_cell_renderers()[0]
self.wids['tv_rollos'].set_cursor_on_cell(path_siguiente, column, cell, start_editing=False)
def cambiar_motivo_incidencia(self, cell, path, newtext):
# Funcionalidad no implementada.
pass
def cambiar_observaciones(self, cell, path, newtext):
model = self.wids['tv_rollos'].get_model()
id = model[path][-1]
if model[path][1] != '': # Nº rollo, tiene, no es una incidencia.
articulo = pclases.Articulo.get(id)
if articulo.es_rollo():
rollo = articulo.rollo
elif articulo.es_rollo_defectuoso():
rollo = articulo.rolloDefectuoso
rollo.observaciones = newtext
else:
incidencia = pclases.Incidencia.get(id)
incidencia.observaciones = newtext
model[path][-2] = newtext
def cambiar_inicio_incidencia(self, cell, path, newtext):
model = self.wids['tv_rollos'].get_model()
if model[path][1] != '': # Nº rollo, tiene, no es una incidencia.
return
id = model[path][-1]
incidencia = pclases.Incidencia.get(id)
try:
incidencia.horainicio = mx.DateTime.DateTimeFrom(
day = self.objeto.fecha.day,
month = self.objeto.fecha.month,
year = self.objeto.fecha.year,
hour = int(newtext.split(":")[0]),
minute = int(newtext.split(":")[1]))
if (incidencia.horafin - incidencia.horainicio).days > 1:
incidencia.horainicio + mx.DateTime.oneDay
while incidencia.horainicio < self.objeto.fechahorainicio:
# El parte está en la franja de medianoche y la
# incidencia comienza después de las 12.
horaini += mx.DateTime.oneDay # Debe llevar la fecha del
# día siguiente.
horafin += mx.DateTime.oneDay
except (ValueError, IndexError):
utils.dialogo_info('HORA INCORRECTA',
'La fecha y hora deben respetar el formato inicial.\nSe va '
'a reestablecer el valor antiguo,\na continuación trate de '
'editar este valor conservando su formato.',
padre = self.wids['ventana'])
return
self.rellenar_tabla_rollos()
def cambiar_fin_incidencia(self, cell, path, newtext):
model = self.wids['tv_rollos'].get_model()
if model[path][1] != '': # Nº rollo, tiene, no es una incidencia.
return
id = model[path][-1]
incidencia = pclases.Incidencia.get(id)
try:
incidencia.horafin = mx.DateTime.DateTimeFrom(
day = self.objeto.fecha.day,
month = self.objeto.fecha.month,
year = self.objeto.fecha.year,
hour = int(newtext.split(":")[0]),
minute = int(newtext.split(":")[1]))
if (incidencia.horafin - incidencia.horainicio).days < 0:
incidencia.horafin += mx.DateTime.oneDay
while incidencia.horainicio < self.objeto.fechahorainicio:
# El parte está en la franja de medianoche y la incidencia
# comienza después de las 12.
horaini += mx.DateTime.oneDay # Debe llevar la fecha del
# día siguiente.
horafin += mx.DateTime.oneDay
except (ValueError, IndexError):
utils.dialogo_info('HORA INCORRECTA',
'La fecha y hora deben respetar el formato inicial.\nSe va a'
' reestablecer el valor antiguo,\na continuación trate de '
'editar este valor conservando su formato.',
padre = self.wids['ventana'])
return
self.rellenar_tabla_rollos()
def crear_nuevo_partedeproduccion(self, widget):
"""
Función callback del botón b_nuevo.
Pide los datos básicos para crear un nuevo objeto.
Una vez insertado en la BD hay que hacerlo activo
en la ventana para que puedan ser editados el resto
de campos que no se hayan pedido aquí.
"""
self.ultima_etiqueta = None
partedeproduccion = self.objeto
# Datos a pedir: Ninguno. Lo planto todo con valores por defecto y listo.
if not utils.dialogo('Se creará un nuevo parte de producción vacío.',
'NUEVO PARTE',
padre = self.wids['ventana']):
return
if partedeproduccion != None:
partedeproduccion.notificador.desactivar()
partedeproduccion = pclases.ParteDeProduccion(fecha = time.localtime(),
horainicio = time.struct_time(time.localtime()[:4]
+ (0,0)
+ time.localtime()[6:]),
horafin = time.struct_time(time.localtime()[:3]
+((time.localtime()[3]+8)%24, 0,0)
+time.localtime()[6:]),
prodestandar = 0,
observaciones = '',
bloqueado = False)
partedeproduccion._corregir_campos_fechahora()
self.objeto = partedeproduccion
self.wids['e_partida'].set_text('')
self.wids['e_partida_gtx'].set_text('')
self.wids['e_fichaproduccion'].set_text('')
self.add_empleados_calendario()
self.__lecturaescritura = self.objeto.id
self.actualizar_ventana()
self.objeto.notificador.activar(self.aviso_actualizacion)
verificar_solapamiento(partedeproduccion, self.wids['ventana'])
def refinar_resultados_busqueda_producto(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
filas_res.append((r.id, r.codigo, r.nombre, r.descripcion))
idproducto = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione producto',
cabeceras = ('ID Interno', 'Código','Nombre', 'Descripción'),
padre = self.wids['ventana'])
if idproducto < 0:
return None
else:
return idproducto
def buscar_partedeproduccion(self, widget):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
partedeproduccion = self.objeto
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR PARTE",
texto = "Introduzca fecha del parte o nombre del producto:",
padre = self.wids['ventana'])
if a_buscar != None:
try:
if a_buscar != '':
a_buscar = a_buscar.replace("-", "/")
if a_buscar.count('/') == 1:
a_buscar = "%s/%d" % (a_buscar, mx.DateTime.localtime().year)
if len(a_buscar.split('/')[-1]) == 2:
fecha = time.strptime(a_buscar, '%d/%m/%y')
else:
fecha = time.strptime(a_buscar, '%d/%m/%Y')
resultados = pclases.ParteDeProduccion.select(pclases.ParteDeProduccion.q.fecha == fecha)
resultados = [r for r in resultados if r.es_de_geotextiles()]
else:
resultados = pclases.ParteDeProduccion.select("""NOT observaciones LIKE '%;%;%;%;%;%'""")
except:
producto = pclases.ProductoVenta.select(pclases.AND(pclases.ProductoVenta.q.nombre.contains(a_buscar),
pclases.ProductoVenta.q.camposEspecificosRolloID != None))
resultados = pclases.ParteDeProduccion.select()
# Pongo la barra porque con muchos partes esto tarda
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
i = 0.0
tot = resultados.count()
partes = []
if producto.count() > 1:
idproducto = self.refinar_resultados_busqueda_producto(producto)
if idproducto != None:
for p in resultados:
if p.articulos != [] and p.articulos[0].productoVentaID == idproducto:
partes.append(p)
vpro.set_valor(i/tot, 'Buscando partes')
i += 1
else:
vpro.ocultar()
return
elif producto.count() == 1:
for p in resultados:
if p.articulos != [] and p.articulos[0].productoVentaID == producto[0].id:
partes.append(p)
vpro.set_valor(i/tot, 'Buscando partes')
i += 1
else:
for p in resultados:
if p.es_de_geotextiles():
partes.append(p)
vpro.set_valor(i/tot, 'Buscando partes')
i += 1
vpro.ocultar()
resultados = partes
# NOTA: Ver ir_a_primero para comprender el criterio de seleccion.
# OJO: Se usa en dos partes del código: refactorizar y crear una funcioncita por si hay que cambiarlo en el futuro.
try:
len_resultados = len(resultados)
except:
len_resultados = resultados.count()
if len_resultados > 1:
## Refinar los resultados
idpartedeproduccion = self.refinar_resultados_busqueda(resultados)
if idpartedeproduccion == None:
return
resultados = [pclases.ParteDeProduccion.get(idpartedeproduccion)]
# Se supone que la comprensión de listas es más rápida que hacer un nuevo get a SQLObject.
# Me quedo con una lista de resultados de un único objeto ocupando la primera posición.
# (Más abajo será cuando se cambie realmente el objeto actual por este resultado.)
elif len_resultados < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS', 'La búsqueda no produjo resultados.\nPruebe a cambiar el texto buscado o déjelo en blanco para ver una lista completa.\n(Atención: Ver la lista completa puede resultar lento si el número de elementos es muy alto)', padre = self.wids['ventana'])
return
## Un único resultado
# Primero anulo la función de actualización
if partedeproduccion != None:
partedeproduccion.notificador.desactivar()
# Pongo el objeto como actual
try:
partedeproduccion = resultados[0]
except IndexError:
utils.dialogo_info(titulo = "ERROR",
texto = "Se produjo un error al recuperar la información.\nCierre y vuelva a abrir la aplicación antes de volver a intentarlo.",
padre = self.wids['ventana'])
return
# Y activo la función de notificación:
partedeproduccion.notificador.activar(self.aviso_actualizacion)
self.objeto = partedeproduccion
self.actualizar_ventana()
def guardar(self, widget):
"""
Guarda el contenido de los entry y demás widgets de entrada
de datos en el objeto y lo sincroniza con la BD.
"""
partedeproduccion = self.objeto
ye_olde_fecha, ye_olde_horainicio, ye_olde_horafin = partedeproduccion.fecha, partedeproduccion.horainicio, partedeproduccion.horafin
ye_olde_horainicio = utils.str_hora_corta(partedeproduccion.horainicio)
ye_olde_horafin = utils.str_hora_corta(partedeproduccion.horafin)
# Campos del objeto que hay que guardar:
# Fecha, horainicio, horafin, prodestandar y observaciones con el formateado especial.
fecha = self.wids['e_fecha'].get_text()
horainicio = self.wids['e_hora_ini'].get_text()
horafin = self.wids['e_hora_fin'].get_text()
prodestandar = self.wids['e_o11'].get_text()
try:
prodestandar = float(prodestandar)
except:
prodestandar = 0
if prodestandar != 0 and self.producto != None and self.producto.prodestandar == 0:
self.producto.prodestandar = prodestandar
bounds = self.wids['txt_observaciones'].get_buffer().get_bounds()
observaciones = self.wids['txt_observaciones'].get_buffer().get_text(bounds[0], bounds[1])
# Desactivo el notificador momentáneamente
partedeproduccion.notificador.activar(lambda: None)
# Actualizo los datos del objeto
partedeproduccion.prodestandar = prodestandar
partedeproduccion.observaciones = observaciones
partedeproduccion.fichaproduccion = self.wids['e_fichaproduccion'].get_text()
partedeproduccion.merma = self.wids['sp_merma'].get_value() / 100.0
try:
partedeproduccion.fecha = utils.parse_fecha(fecha)
except:
partedeproduccion.fecha = time.localtime()
partedeproduccion.horainicio = horainicio
partedeproduccion.horafin = horafin
partedeproduccion._corregir_campos_fechahora()
# Verificación de que no se solapa con otros partes:
verificar_solapamiento(partedeproduccion,
self.wids['ventana'],
ye_olde_fecha,
ye_olde_horainicio,
ye_olde_horafin)
# Fuerzo la actualización de la BD y no espero a que SQLObject lo haga
# por mí:
partedeproduccion.sync()
# Vuelvo a activar el notificador
partedeproduccion.notificador.activar(self.aviso_actualizacion)
self.actualizar_ventana()
self.wids['b_guardar'].set_sensitive(False)
def borrar_parte(self, boton):
if not utils.dialogo('Se va a intentar eliminar el parte actual.\nSi hay operaciones complejas implicadas se cancelará el borrado.\nDe cualquier forma, no se aconseja eliminar ningún parte que ya tenga rollos relacionadas.\n¿Está seguro de borrar el parte actual?', 'ELIMINAR PARTE', padre = self.wids['ventana']): return
partedeproduccion = self.objeto
partedeproduccion.notificador.desactivar()
try:
partedeproduccion.destroySelf()
except:
utils.dialogo_info('PARTE NO BORRADO', 'El parte no se eliminó.\nSi tiene rollos o empleados asociados, trate primero de eliminarlos y vuelva a intentarlo.', padre = self.wids['ventana'])
return
self.ir_a_primero()
def mostrar_calendario(self, boton):
self.wids['e_fecha'].set_text(utils.str_fecha(utils.mostrar_calendario(fecha_defecto = self.objeto and self.objeto.fecha or None, padre = self.wids['ventana'])))
self.guardar(None)
self.add_empleados_calendario()
self.rellenar_tabla_empleados()
def set_hora_inicial(self, boton):
valor_hora_ini = self.wids['e_hora_ini'].get_text()
try:
valor_hora_ini = [int(v) for v in valor_hora_ini.split(':')] + [0]
except:
valor_hora_ini = [0,0,0]
hora_ini = utils.mostrar_hora(valor_hora_ini[0], valor_hora_ini[1], valor_hora_ini[2], 'HORA INICIO PARTE')
# DONE: Hay que mostrar el título de hora inicial y hora final en la ventana.
if hora_ini != None:
partedeproduccion = self.objeto
partedeproduccion.notificador.desactivar()
partedeproduccion.horainicio = hora_ini
partedeproduccion._corregir_campos_fechahora()
self.set_hora_final(boton) # Ahí se cambiarán los empleados si es preciso.
self.actualizar_ventana()
partedeproduccion.notificador.activar(self.aviso_actualizacion)
verificar_solapamiento(partedeproduccion, self.wids['ventana'])
def set_hora_final(self, boton):
valor_hora_fin = self.wids['e_hora_fin'].get_text()
try:
valor_hora_fin = [int(v) for v in valor_hora_fin.split(':')] + [0] # Porque el entry tiene solo H:M
except:
valor_hora_fin = [0,0,0]
hora_fin = utils.mostrar_hora(valor_hora_fin[0], valor_hora_fin[1], valor_hora_fin[2], 'HORA FIN PARTE')
if hora_fin != None:
partedeproduccion = self.objeto
partedeproduccion.notificador.desactivar()
partedeproduccion.horafin = hora_fin
partedeproduccion._corregir_campos_fechahora()
partedeproduccion.syncUpdate()
partedeproduccion.sync()
self.add_empleados_calendario()
self.actualizar_ventana()
partedeproduccion.notificador.activar(self.aviso_actualizacion)
verificar_solapamiento(partedeproduccion, self.wids['ventana'])
def set_articulo(self, boton):
"""
Muestra un cuadro de búsqueda de productos generados
por la fábrica*. El seleccionado pasará a ser el
artículo del parte, mostrándose su información en la
cabecera y limitando las opciones al añadir líneas.
* Productos que sean rollos o tengan como línea de
producción la línea de fibra.
"""
if self.objeto.articulos != []:
txt = """
Ya se ha iniciado la producción de un artículo. Si cambia el producto del parte actual
cambiará también en los rollos ya fabricados pertenecientes al parte.
Si lo que desea es iniciar una nueva producción use el botón "Nuevo parte" y comience
un nuevo parte de producción. Si lo que quiere es cambiar el producto del parte actual
y todas sus rollos fabricados pulse "Sí".
¿Desea cambiar el producto fabricado en el parte?
"""
if not utils.dialogo(titulo = '¿CAMBIAR LA PRODUCCIÓN?',
texto = txt,
padre = self.wids['ventana']):
return
idlineasrollos = pclases.LineaDeProduccion.select(pclases.OR(
pclases.LineaDeProduccion.q.nombre=='Línea de geotextiles',
pclases.LineaDeProduccion.q.nombre=='Línea de geocompuestos',
pclases.LineaDeProduccion.q.nombre=='Línea de comercializados'))
# OJO: Debe llamarse EXACTAMENTE Línea de xxxxxxxxxx en la BD.
if idlineasrollos.count() == 0:
# No hay línea de rollos
utils.dialogo_info('ERROR LÍNEAS DE ROLLOS',
'No hay líneas de geotextiles o geocompuestos/'
'comercializados en las bases de datos del '
'sistema.',
padre = self.wids['ventana'])
return
criterio = pclases.ProductoVenta.q.lineaDeProduccionID == idlineasrollos[0].id
for i in xrange(1, idlineasrollos.count()):
criterio = pclases.OR(criterio, pclases.ProductoVenta.q.lineaDeProduccionID == idlineasrollos[i].id)
producto = self.buscar_producto(criterio)
if producto != None:
self.wids['e_o11'].set_text(utils.float2str(producto.prodestandar))
self.wids['e_fichaproduccion'].set_text(
producto.camposEspecificosRollo.fichaFabricacion)
self.objeto.fichaproduccion \
= self.wids['e_fichaproduccion'].get_text()
self.guardar(None)
self.producto = producto
self.rellenar_datos_articulo(self.producto)
for a in self.objeto.articulos:
a.productoVenta = self.producto
else:
self.producto = None
def buscar_producto(self, criterio_lineas):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
producto = self.producto
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR PRODUCTO",
texto = "Introduzca código, nombre o descripción de producto:",
padre = self.wids['ventana'])
if a_buscar != None:
try:
ida_buscar = int(a_buscar)
except ValueError:
ida_buscar = -1
criterio = pclases.OR(pclases.ProductoVenta.q.codigo.contains(a_buscar),
pclases.ProductoVenta.q.descripcion.contains(a_buscar),
pclases.ProductoVenta.q.nombre.contains(a_buscar),
pclases.ProductoVenta.q.id == ida_buscar)
criterio = pclases.AND(criterio, criterio_lineas)
resultados = pclases.ProductoVenta.select(criterio)
if resultados.count() > 1:
## Refinar los resultados
idproducto = self.refinar_resultados_busqueda_producto(resultados)
if idproducto == None:
return None
resultados = [pclases.ProductoVenta.get(idproducto)]
elif resultados.count() < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS', 'La búsqueda no produjo resultados.\nPruebe a cambiar el texto buscado o déjelo en blanco para ver una lista completa.\n(Atención: Ver la lista completa puede resultar lento si el número de elementos es muy alto)',
padre = self.wids['ventana'])
return None
## Un único resultado
# Primero anulo la función de actualización
if producto != None:
producto.notificador.desactivar()
# Pongo el objeto como actual
try:
producto = resultados[0]
except IndexError:
utils.dialogo_info(titulo = "ERROR",
texto = "Se produjo un error al recuperar la información.\nCierre y vuelva a abrir la ventana antes de volver a intentarlo.",
padre = self.wids['ventana'])
return None
return producto
def refinar_resultados_busqueda_producto(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
filas_res.append((r.id, r.codigo, r.nombre, r.descripcion))
idproducto = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione producto',
cabeceras = ('ID Interno', 'Código', 'Nombre', 'Descripción'),
padre = self.wids['ventana'])
if idproducto < 0:
return None
else:
return idproducto
def add_rollo(self, boton):
"""
Añade uno o varios rollos al parte de producción.
Pide un número o rango de números de rollos e introduce
un artículo y rollo correspondiente por cada uno de los
códigos.
1.- Chequear si self.producto != None
2.- Chequear si hay número de partida en ventana.
3.- Pedir número de rollos o rango de números de rollo.
4.- Por cada número de rollo, hacer:
4.1.- Crear rollo con código = str(numrollo)
4.2.- Asociar rollo al partida.
4.3.- Crear artículo relacionado con el rollo.
4.4.- Relacionar artículo al parte (o hacerlo directamente al
crear, claro).
NEW! --> 4.5.- Descontar automáticamente el material adicional.
"""
try:
numpartida = self.wids['e_partida_gtx'].get_text().upper().replace("P-", "")
except ValueError:
utils.dialogo_info(titulo = 'PARTIDA ERRÓNEA',
texto = 'El número de partida no es válido.',
padre = self.wids['ventana'])
return
try:
numpartida = int(numpartida)
partida = pclases.Partida.select(
pclases.Partida.q.numpartida == numpartida)[0]
except IndexError:
utils.dialogo_info(titulo = 'PARTIDA ERRÓNEA',
texto = 'La partida no se encontró.\nCree una partida nueva.',
padre = self.wids['ventana'])
return
except ValueError:
utils.dialogo_info(titulo = 'PARTIDA ERRÓNEA',
texto = 'El número de partida %s no es válido.' % (numpartida),
padre = self.wids['ventana'])
return
if self.producto == None:
utils.dialogo_info(titulo = 'SELECCIONAR PRODUCTO',
texto = 'Seleccione primero el producto fabricado.',
padre = self.wids['ventana'])
return
if self.usuario == None or self.usuario.nivel > 3:
self.iniciar_pesaje_auto(None)
else:
generador = self.pedir_rango()
i = 0.0
tot = len(generador)
if tot > 500 and not utils.dialogo(titulo = "¿ESTÁ SEGURO?",
texto = "Está intentando añadir más de "
"500 rollos al parte.\n"
"¿Está seguro?",
padre = self.wids['ventana']):
return
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
for numrollo in generador:
if numrollo < 0:
numrollo *= -1
defectuoso = True
else:
defectuoso = False
try:
vpro.set_valor(i/tot, 'Añadiendo rollo nº %d...'%numrollo)
articulo = crear_articulo(numrollo, partida,
self.producto, self.objeto,
objeto_ventana_parte = self,
defectuoso = defectuoso)
if articulo != None:
vpro.set_valor(i/tot, '(%s) Descontando material...' % articulo.codigo)
descontar_material_adicional(self, articulo)
i += 1
except psycopg_ProgrammingError:
vpro.ocultar()
utils.dialogo_info(titulo = 'ROLLO NO CREADO',
texto = 'El rollo no se pudo crear. Verifique que el número no esté duplicado.',
padre = self.wids['ventana'])
self.actualizar_ventana()
return
vpro.ocultar()
self.actualizar_ventana()
def pedir_rango(self):
"""
Pide un rango de números de rollos.
Devuelve un generador de números
de rollo que comienza en el primero
del rango (o único, si solo se teclea uno)
y acaba en el último del rango.
Si los números devueltos son negativos es porque
había una "X" en el rango y hay que crearlos como
rollos defectuosos.
"""
ultimo_mas_uno = pclases.Rollo._connection.queryOne("""SELECT ultimo_codigo_rollo_mas_uno(); """)[0]
rango = utils.dialogo_entrada(titulo = 'INTRODUZCA RANGO',
texto = 'Rango de números de rollos o el código indovidual.\nEscriba el rango de códigos de la forma "xxxx-yyyy", ambos inclusive.',
padre = self.wids['ventana'],
valor_por_defecto = ultimo_mas_uno)
articulos = []
if rango == '' or rango == None:
return []
rango = rango.upper()
defectuosos = "X" in rango
rango = rango.replace("R", "").replace("X", "")
try:
if '-' in rango:
ini, fin = rango.split('-')
ini = int(ini)
fin = int(fin)
else:
ini = int(rango)
fin = ini
except:
utils.dialogo_info(titulo = 'CÓDIGO INCORRECTO', texto = 'Los códigos deben ser numéricos.\n\nVerifique que los ha escrito correctamente y que ha separado el rango con un guión.', padre = self.wids['ventana'])
return []
if fin+1 - ini > 100:
if not utils.dialogo(titulo = "¿ESTÁ SEGURO?",
texto = "Ha introducido un rango demasiado grande (%s).\n¿Está realmente seguro de que quiere introducir %d artículos al parte?" % (rango, fin+1-ini),
padre = self.wids['ventana']):
return xrange(0,0)
if not defectuosos:
return xrange(ini, fin+1)
else:
return range(-fin, -ini + 1)[::-1] # HACK: Python 2.3 no tiene __reversed__ en el xrange.
def drop_rollo(self, boton):
model, paths = self.wids['tv_rollos'].get_selection().get_selected_rows()
if paths == None or paths == []:
utils.dialogo_info('ROLLO NO SELECCIONADO', 'Debe seleccionar el rollo que desee eliminar del parte.', padre = self.wids['ventana'])
return
if not utils.dialogo('¿Eliminar del parte?', 'BORRAR ROLLOS DE CONTROL DE PRODUCCIÓN', padre = self.wids['ventana']):
return
for path in paths:
id = model[path][-1]
if model[path][1] == 0 or model[path][1] == "" or model[path][1] == " ": # El número de rollo está vacío
utils.dialogo_info('ROLLO NO SELECCIONADO', 'Debe seleccionar un rollo.\nPara eliminar una incidencia use «Eliminar incidencia».', padre = self.wids['ventana'])
else:
articulo = pclases.Articulo.get(id)
try:
rollo = articulo.rollo
rolloDefectuoso = articulo.rolloDefectuoso
descontar_material_adicional(self, articulo, restar = False)
articulo.parteDeProduccion = None
articulo.rollo = None
articulo.rolloDefectuoso = rolloDefectuoso
articulo.parteDeProduccion = None
articulo.destroySelf()
if rollo != None:
rollo.destroySelf()
if rolloDefectuoso != None:
rolloDefectuoso.destroySelf()
except:
utils.dialogo_info(titulo = 'ERROR: ROLLO NO BORRADO',
texto = 'El rollo no ha sido eliminado completamente.\nVerifique que no haya sido vendido ya.\nAnote el número de rollo (%s) y contacte con el administrador de la aplicación\npara subsanar la inconsistencia.' % (rollo and rollo.codigo or "no disponible"),
padre = self.wids['ventana'])
# Mensaje y vuelvo a asociar el rollo, ya que no se ha eliminado al
# tener alguna relación con algún otro objeto.
# rollo.parteDeProduccion = self.objeto
try:
articulo.sync()
articulo.rollo = rollo
articulo.rolloDefectuoso = rolloDefectuoso
articulo.parteDeProduccion = self.objeto
descontar_material_adicional(self, articulo, restar = True)
except pclases.SQLObjectNotFound: # Ya se ha borrado
pass
except AttributeError: # Existe el artículo pero ya no tiene bala
self.logger.error("El artículo ID %s ya no tiene rollo, no se ha podido sumar el material empleado al borrarlo y tampoco se pudo eliminar el artículo en sí." % articulo.id)
self.actualizar_ventana()
def add_incidencia(self, boton):
ii = pclases.TipoDeIncidencia.select()
idincidencia = utils.dialogo_combo('SELECCIONE UN TIPO DE INCIDENCIA',
'Seleccine un tipo de incidencia del desplegable inferior',
[(i.id, i.descripcion) for i in ii],
padre = self.wids['ventana'])
if idincidencia == None:
return
utils.dialogo_info('HORA INICIO',
'A continuación seleccione la hora de inicio de la incidencia.',
padre = self.wids['ventana'])
horaini = utils.mostrar_hora(time.localtime()[3], 0, 0, 'HORA INICIO')
if horaini == None:
return
utils.dialogo_info('HORA FIN',
'A continuación seleccione la hora de finalización de la'
' incidencia.',
padre = self.wids['ventana'])
horafin = utils.mostrar_hora(time.localtime()[3], 0, 0, 'HORA FIN')
if horafin == None:
return
self.objeto.sync()
horaini = mx.DateTime.DateTimeFrom('%d-%2d-%2d %s' %
(self.objeto.fecha.year,
self.objeto.fecha.month,
self.objeto.fecha.day,
horaini))
horafin = mx.DateTime.DateTimeFrom('%d-%2d-%2d %s' %
(self.objeto.fecha.year,
self.objeto.fecha.month,
self.objeto.fecha.day,
horafin))
if horaini > horafin:
horafin += mx.DateTime.oneDay
while horaini < self.objeto.fechahorainicio: # El parte está en la
# franja de medianoche y la incidencia
# comienza después de las 12.
horaini += mx.DateTime.oneDay # Debe llevar la fecha del
# día siguiente.
horafin += mx.DateTime.oneDay
if entran_en_turno(self.objeto, horaini, horafin):
observaciones = utils.dialogo_entrada(titulo = 'OBSERVACIONES',
texto = 'Introduzca observaciones sobre la incidencia:',
padre = self.wids['ventana'])
if observaciones == None:
return
incidencia = pclases.Incidencia(
tipoDeIncidencia = pclases.TipoDeIncidencia.get(idincidencia),
horainicio = horaini,
horafin = horafin,
parteDeProduccion = self.objeto,
observaciones = observaciones)
# NOTA: La BD está diseñada para soportar varios ítems en
# detallesdeproducción. De momento seguiré con 1 ítem por detalle.
# Así que creo una nueva línea de detalle.
self.actualizar_ventana()
else:
utils.dialogo_info(titulo = 'ERROR HORARIO',
texto = 'La franja horaria que ha seleccionado no entra '
'en el turno del parte.',
padre = self.wids['ventana'])
def drop_incidencia(self, boton):
model, paths = self.wids['tv_rollos'].get_selection().get_selected_rows()
if paths == None or paths == []:
utils.dialogo_info('INCIDENCIA NO SELECCIONADA',
'Debe seleccionar la incidencia que desee eliminar del parte.',
padre = self.wids['ventana'])
else:
if not utils.dialogo('¿Eliminar del parte?',
'BORRAR INCIDENCIAS DE CONTROL DE PRODUCCIÓN',
padre = self.wids['ventana']):
return
for path in paths:
id = model[path][-1]
if model[path][1] != '': # El número de rollo NO está vacío
utils.dialogo_info('ROLLO SELECCIONADO',
'Ha seleccionado una rollo en lugar de una incidencia.\nUse «Quitar rollo» para eliminarla.',
padre = self.wids['ventana'])
else:
incidencia = pclases.Incidencia.get(id)
incidencia.parteDeProduccion = None
try:
incidencia.destroySelf()
except:
utils.dialogo_info(titulo = 'INCIDENCIA NO ELIMINADA',
texto = 'Ocurrió un error al intentar eliminar la incidencia.',
padre = self.wids['ventana'])
self.actualizar_ventana()
def add_empleado(self, w):
empleados = pclases.Empleado.select(pclases.AND(
pclases.Empleado.q.activo == True,
pclases.Empleado.q.planta == True),
orderBy = 'apellidos')
empleados = [(e.id, e.nombre, e.apellidos) for e in empleados \
if e.planta and \
e.activo and \
e.categoriaLaboral and \
e.categoriaLaboral.planta]
# e.categoriaLaboral.planta and \
# e.categoriaLaboral.lineaDeProduccion == self.linea)]
ids = utils.dialogo_resultado(filas = empleados,
titulo = 'SELECCIONE EMPLEADOS',
cabeceras = ('ID', 'Nombre', 'Apellidos'),
multi = True,
padre = self.wids['ventana'])
if ids == [-1]:
return
for id in ids:
try:
e = pclases.Empleado.get(id)
self.objeto.addEmpleado(e)
except:
utils.dialogo_info(titulo = 'NÚMERO INCORRECTO',
texto = 'El empleado con código '
'identificador %d no existe o '
'no se pudo agregar.' % id,
padre = self.wids['ventana'])
self.rellenar_tabla_empleados()
def drop_empleado(self, w):
if self.wids['tv_empleados'].get_selection().count_selected_rows() == 0:
return
model, path = self.wids['tv_empleados'].get_selection().get_selected()
id = model[path][0] # El id del empleado es la columna 0
e = pclases.Empleado.get(id)
self.objeto.removeEmpleado(e)
self.rellenar_tabla_empleados()
def rellenar_tabla_empleados(self):
model = self.wids['tv_empleados'].get_model()
model.clear()
horas_parte = self.objeto.get_duracion()
for ht in self.objeto.horasTrabajadas:
try:
supera_duracion_parte = ht.horas > horas_parte
except TypeError:
supera_duracion_parte = (
utils.DateTime2DateTimeDelta(ht.horas) > horas_parte)
if supera_duracion_parte:
ht.horas = horas_parte.strftime('%H:%M')
ht.sync()
model.append((ht.empleado.id,
ht.empleado.nombre,
ht.empleado.apellidos,
ht.horas.strftime('%H:%M'),
ht.id))
def cambiar_partida(self, w):
"""
Pide un número de partida por teclado y cambia a él.
"""
texto = """
Al cambiar la partida del parte, se cambiará la partida de
todos los productos relacionados con él, así como el artículo
al que pertencen los productos.
Si quiere comenzar la producción de una nueva partida sin afectar
a los ya existentes, cree un nuevo parte."""
if (self.objeto.articulos != []
and not utils.dialogo(titulo = '¿ESTÁ SEGURO?',
texto = texto,
padre = self.wids['ventana'])):
return
codigo = utils.dialogo_entrada(titulo = '¿NÚMERO DE PARTIDA?',
texto = 'Introduzca el número de partida de geotextiles '\
'a fabricar:',
padre = self.wids['ventana'])
if codigo == None: # Cancel
return
try:
codigo = int(codigo.upper().replace("P-", ""))
partida = pclases.Partida.select(
pclases.Partida.q.numpartida == codigo)[0]
except (TypeError, ValueError), msg:
self.logger.error("partes_de_fabricacion_rollos::cambiar_partida "\
"-> Código partida: %s. Excepción capturada: %s"
% (codigo, msg))
return
except IndexError:
utils.dialogo_info(titulo = "PARTIDA NO ENCONTRADA",
texto = "No se encontró la partida de producción.\nDebe carga"\
"r la línea con materia prima y crear la partida ante"\
"s de producir.",
padre = self.wids['ventana'])
return
# Tengo ya la partida seleccionada. Miro si hay una anterior y si tiene
# al menos 1 rollo, para evitar que queden partidas vacías.
# XXX: DONE: Hasta que JMadrid me lo confirme, esto queda en espera.
# Confirmado por correo el día 11/09/2008
try:
anterior = pclases.Partida.selectBy(
numpartida = partida.numpartida - 1)[0]
except IndexError:
# Para producir, la partida debe existir porque se crea desde
# otra ventana. Ya no se crean aquí directamente. Si no hay
# numpartida-1 es porque el usuario encargado de las partidas
# de carga no ha creado la partida de geotextiles o bien es la
# primera partida del sistema.
pass
else: # Hay partida anterior.
if (anterior.esta_vacia() and (
self.usuario == None or self.usuario.nivel > 2)):
# Muestro el diálogo e impido pasar de partida solo si hay un
# usuario registrado y no tiene un nivel de privilegios "alto".
utils.dialogo_info(titulo = "PARTIDA INCORRECTA",
texto = "La partida anterior (%s) está vacía. No puede "\
"iniciar la partida nueva %s." % (
anterior.codigo, partida.codigo),
padre = self.wids['ventana'])
return
# XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX
# Pongo la partida como actual.
partida_carga = partida.partidaCarga
self.wids['e_partida'].set_text(partida_carga.codigo)
self.wids['e_partida_gtx'].set_text(partida.codigo)
if partida.rollos: # Ya tiene algún rollo asociado de un parte anterior
productoVenta = partida.rollos[0].articulos[0].productoVenta
self.producto = productoVenta
self.rellenar_datos_articulo(self.producto)
self.wids['e_fichaproduccion'].set_text(
self.producto.camposEspecificosRollo.fichaFabricacion)
self.objeto.fichaproduccion \
= self.wids['e_fichaproduccion'].get_text()
for a in self.objeto.articulos: # Y cambio de partida los artículos y
# de producto de venta.
a.partida = partida
a.productoVenta = self.producto
self.actualizar_ventana()
def nueva_partida(self, numpartida):
""" Marcado para DEPRECATED """
if isinstance(numpartida, type("¡Hola hombre cangrejo!")):
numpartida = numpartida.upper().replace("P-", "")
partida = pclases.Partida(numpartida = numpartida,
codigo = "P-%s" % (numpartida))
partida_actual = self.get_partida()
if partida_actual != None:
for b in partida_actual.balas: # Cambio de partida las balas
b.partida = partida # ANTES de cambiar el Entry
# (porque get_partida se obtiene
# de ahí).
self.wids['e_partida'].set_text(str(numpartida))
for a in self.objeto.articulos:
a.partida = partida
self.actualizar_ventana()
def get_consumos_estimados(self, partida):
"""
Devuelve la suma de los consumos estimados de todos los
partes que pertenecen al objeto partida recibido.
"""
consumos_estimados = 0
rollos = partida.rollos
for rollo in rollos:
try:
metros = rollo.articulos[0].productoVenta.camposEspecificosRollo.metrosLineales
producto = rollo.articulos[0].productoVenta
peso_rollo = metros * producto.camposEspecificosRollo.ancho * producto.camposEspecificosRollo.gramos/1000
merma = rollo.articulos[0].parteDeProduccion.merma
consumo_rollo = peso_rollo / (1.0 - merma)
consumos_estimados += consumo_rollo
except IndexError:
self.logger.error("partes_de_fabricacion_rollos.py::get_consumos_estimados -> ¡No se encontraron artículos en el rollo ID %d!" % (rollo.id))
return consumos_estimados
def rellenar_balas(self):
""" DEPRECATED """
self.wids['frame3'].set_property("visible", False)
return # Se quita de aquí para meterlo en una ventana independiente.
# NOTA: PLAN: Esto tarda lo más grande. Es lo que hace a esta ventana tan lenta respecto a los rollos.
#model = self.wids['tv_balas'].get_model()
#model.clear()
#cantidad = 0
#partida = self.get_partida()
#if partida != None:
# consumos_estimados = self.get_consumos_estimados(partida)
# for bala in partida.balas:
# if consumos_estimados >= bala.pesobala: # Se ha gastado la bala entera.
# porcion_consumida = 100
# consumos_estimados -= bala.pesobala
# else:
# porcion_consumida = (consumos_estimados / bala.pesobala) * 100 # % consumido
# consumos_estimados = 0 # Ya no puedo descontar más o me quedaré por debajo de 0.
# model.append((bala.codigo, bala.pesobala, porcion_consumida, bala.id))
# cantidad += bala.pesobala
#self.wids['e_total_balas'].set_text('%s' % (utils.float2str(round(cantidad, 2))))
### self.wids['e_consumo_real'].set_text('%.2f' % round(cantidad, 2))
#self.colorear_pesos()
def get_partida(self):
"""
Devuelve la partida relacionada con el parte actual.
Si no hay partida definida devuelve None.
"""
try:
numpartida = self.wids['e_partida_gtx'].get_text()
numpartida = numpartida.upper().replace("P-", "")
numpartida = int(numpartida)
return pclases.Partida.select(pclases.Partida.q.numpartida == numpartida)[0]
# Numpartida es UNIQUE. Devuelve una partida o ninguna.
except ValueError:
# No hay un número en el Entry.
return None
except:
return None
def pedir_rango_balas(self):
"""
Pide un rango de números de balas.
Devuelve un generador de números
de bala que comienza en el primero
del rango (o único, si solo se teclea uno)
y acaba en el último del rango.
"""
rango = utils.dialogo_entrada(titulo = 'INTRODUZCA RANGO',
texto = 'Rango de números de bala o el código indovidual.\nEscriba el rango de códigos de la forma "xxxx-yyyy", ambos inclusive.',
padre = self.wids['ventana'])
articulos = []
if rango == '' or rango == None:
return rango
try:
if '-' in rango:
ini, fin = rango.split('-')
ini = int(ini)
fin = int(fin)
if fin < ini:
ini, fin = fin, ini
else:
ini = int(rango)
fin = ini
except:
utils.dialogo_info(titulo = 'CÓDIGO INCORRECTO', texto = 'Los códigos deben ser numéricos.\n\nVerifique que los ha escrito correctamente y que ha separado el rango con un guión.', padre = self.wids['ventana'])
return []
return xrange(ini, fin+1)
def add_bala(self, w):
""" DEPRECATED """
if self.get_partida() == None:
utils.dialogo_info(titulo = 'ELIJA PARTIDA', texto = 'Debe seleccionar antes una partida.', padre = self.wids['ventana'])
return
rango = self.pedir_rango_balas()
if rango == None:
return
elif rango == '':
balas = pclases.Bala.select(pclases.Bala.q.partidaID == None)
balas = [(b.id, b.numbala, b.pesobala) for b in balas if b.analizada()]
resp = utils.dialogo_resultado(balas,
'SELECCIONE BALAS',
cabeceras = ('ID', 'Número de bala', 'Peso'),
multi = True)
if resp == [-1]: # Ha cancelado
return
partida = self.get_partida()
for id in resp:
bala = pclases.Bala.get(id)
if bala.claseb:
if utils.dialogo(titulo = 'BALA MARCADA COMO BAJA CALIDAD',
texto = 'La bala está marcada como clase B. Esto puede provocar\nproblemas en la línea de producción.\n¿Está seguro de querer comsumir la bala de fibra?',
padre = self.wids['ventana']):
bala.partida = partida
else:
for numbala in rango:
try:
balas = pclases.Bala.select(pclases.AND(pclases.Bala.q.numbala == numbala,
pclases.Bala.q.partidaID == None))
bala = [b for b in balas if b.analizada()][0]
# Numbala es UNIQUE. Sólo encontrará uno (o ninguno).
# Busco sólo entre las balas no usadas con otra partida.
bala.partida = self.get_partida()
except:
if balas.count() == 0:
utils.dialogo_info(titulo = 'BALA INCORRECTA',
texto = """El número de bala %d no se encontró en el almacén.""" % numbala,
padre = self.wids['ventana'])
else:
utils.dialogo_info(titulo = 'LOTE NO ANALIZADO',
texto = """
El lote %s al que pertenece la bala aún no ha sido analizado.
Hasta que no se especifiquen desde laboratorio las características del lote,
la bala %d no podrá ser usada en producción.
""" % (balas[0].lote.codigo, balas[0].numbala),
padre = self.wids['ventana'])
return
self.rellenar_balas()
def drop_bala(self, w):
""" DEPRECATED """
model, paths = self.wids['tv_balas'].get_selection().get_selected_rows()
if not paths:
return
for path in paths:
idbala = model[path][-1]
bala = pclases.Bala.get(idbala)
bala.partida = None
self.rellenar_balas()
def button_clicked(self, list, event):
if event.button == 3:
# menu = gtk.Menu()
#ui_string = """<ui>
# <popup name='Popup'>
# <menuitem action='Enviar muestra'/>
# <menuitem action='Marcar como defectuoso'/>
# <menuitem action='Limpiar marcas defectuoso y muestra'/>
# </popup>
# </ui>"""
ui_string = """<ui>
<popup name='Popup'>
<menuitem action='Enviar muestra'/>
<menuitem action='Limpiar marcas defectuoso y muestra'/>
</popup>
</ui>"""
ag = gtk.ActionGroup('WindowActions')
#actions = [('Enviar muestra', gtk.STOCK_COLOR_PICKER, '_Enviar muestra', '<control>e',
# 'Envia una muestra del lote o partida correspondiente al parte a laboratorio.',
# self.enviar_a_laboratorio),
# ('Marcar como defectuoso', gtk.STOCK_DELETE, '_Marcar como defectuoso', '<control>m',
# 'Marca el rollo seleccionado como defectuoso (peso inferior, gramaje bajo, longitud incorrecta, etc.).',
# self.marcar_como_defectuoso),
# ('Limpiar marcas defectuoso y muestra', gtk.STOCK_CLEAR, '_Limpiar marcas defectuoso y muestra', '<control>l',
# 'Limpia las marcas de defectuoso y la de muestra de laboratorio del rollo, si las tuviera.',
# self.limpiar_marcas)]
actions = [('Enviar muestra', gtk.STOCK_COLOR_PICKER, '_Enviar muestra', '<control>e',
'Envia una muestra del lote o partida correspondiente al parte a laboratorio.',
self.enviar_a_laboratorio),
('Limpiar marcas defectuoso y muestra', gtk.STOCK_CLEAR, '_Limpiar marca muestra', '<control>l',
'Limpia las marcas de muestra de laboratorio del rollo, si la tuviera. No cancela la muestra ya enviada.',
self.limpiar_marcas)]
ag.add_actions(actions)
ui = gtk.UIManager() #gtk.UI_MANAGER_POPUP
ui.insert_action_group(ag, 0)
ui.add_ui_from_string(ui_string)
widget = ui.get_widget("/Popup")
model, paths = self.wids['tv_rollos'].get_selection().get_selected_rows()
mostrar_muestra = mostrar_defectuoso = True; mostrar_limpiar = False
for path in paths:
if model[path][1] != '': # Nº rollo, tiene, no es una incidencia.
id = model[path][-1]
articulo = pclases.Articulo.get(id)
if articulo.es_rollo():
rollo = articulo.rollo
mostrar_muestra = mostrar_muestra and not(rollo.muestra)
mostrar_defectuoso = mostrar_defectuoso and not(rollo.rollob)
mostrar_limpiar = mostrar_limpiar or (rollo.muestra or rollo.rollob)
elif articulo.es_rollo_defectuoso():
rollo = articulo.rolloDefectuoso
mostrar_muestra = mostrar_limpiar = mostrar_defectuoso = False
else:
mostrar_muestra = mostrar_defectuoso = mostrar_limpiar = False
menuitem = ui.get_widget("/Popup/Enviar muestra")
menuitem.set_sensitive(mostrar_muestra)
#menuitem = ui.get_widget("/Popup/Marcar como defectuoso")
#menuitem.set_sensitive(mostrar_defectuoso)
menuitem = ui.get_widget("/Popup/Limpiar marcas defectuoso y muestra")
menuitem.set_sensitive(mostrar_limpiar)
widget.popup(None, None, None, event.button, event.time)
def limpiar_marcas(self, parametro):
"""
Marca el rollo seleccionado como defectuoso.
"""
parte = self.objeto
if not parte.articulos:
utils.dialogo_info(titulo = "PARTE VACÍO", texto = "En el parte seleccionado no hubo producción.", padre = self.wids['ventana'])
else:
model, paths = self.wids['tv_rollos'].get_selection().get_selected_rows()
for path in paths:
if model[path][1] != '': # Nº rollo, tiene, no es una incidencia.
id = model[path][-1]
rollo = pclases.Articulo.get(id).rollo
if rollo != None:
rollo.rollob = False
rollo.muestra = False
rollo.observaciones = ''
model[path][-2] = rollo.observaciones
def __crear_rollo_defectuoso_ye_olde_schoole(self):
"""
Crea un rollo "defectuoso" según el modelo de datos
antiguo (marcándolo como rollo B pero siendo a todos
los efectos un rollo "normal", del mismo modo que se
hace con las balas), sin crearlo como artículo tipo
"rollo defectuoso" como se hace ahora.
"""
parte = self.objeto
if not parte.articulos:
utils.dialogo_info(titulo = "PARTE VACÍO",
texto = "En el parte seleccionado no hubo producción.",
padre = self.wids['ventana'])
else:
sel = self.wids['tv_rollos'].get_selection()
model, paths = sel.get_selected_rows()
for path in paths:
if model[path][1] != '': # Nº rollo, tiene, no es incidencia.
id = model[path][-1]
rollo = pclases.Articulo.get(id).rollo
motivo = utils.dialogo_entrada(titulo = "MOTIVO",
texto = "Introduzca el motivo por el cual el rollo %s se considera defectuoso:" % (rollo.codigo),
padre = self.wids['ventana'])
if motivo != None:
largo = utils.dialogo_entrada(titulo = "LARGO",
texto = "Introduzca la longitud del rollo defectuoso:",
valor_por_defecto = `rollo.productoVenta.camposEspecificosRollo.metrosLineales`,
padre = self.wids['ventana'])
if largo != None:
try:
largo = utils._float(largo)
except:
utils.dialogo_info(titulo = "ERROR",
texto = "El número introducido %s no es correcto." % (largo),
padre = self.wids['ventana'])
else:
pesosin = rollo.peso_sin
try:
dens = pesosin / (rollo.productoVenta.camposEspecificosRollo.ancho * largo)
except ZeroDivisionError:
dens = 0
rollo.densidad = dens
rollo.rollob = True
rollo.observaciones += "Defectuoso: " + motivo
model[path][-2] += "Defectuoso: " + motivo
imprimir_etiqueta_de_rollo_defectuoso(rollo)
def marcar_como_defectuoso(self, parametro):
"""
Marca el rollo seleccionado como defectuoso.
UNDOCUMENTED
"""
# No creo que llegue a usarse nunca. De todas formas me reservo el derecho como administrador
# a crear rollos B, por si las moscas. También tengo derecho como español a comerme un
# bocadillo de panceta si quiero. ¡¿Me oye?!
if self.usuario != None and self.usuario.nivel > 0:
utils.dialogo_info(titulo = "FUNCIONALIDAD NO IMPLEMENTADA",
texto = "Esta funcionalidad no puede ser usada todavía.",
padre = self.wids['ventana'])
return
self.__crear_rollo_defectuoso_ye_olde_schoole()
def enviar_a_laboratorio(self, parametro):
# NOTA: Ni idea de qué es lo que traerá el parámetro, sólo me interesa
# el parte que está seleccionado en el treeview.
parte = self.objeto
if not parte.articulos:
utils.dialogo_info(titulo = "PARTE VACÍO", texto = "En el parte seleccionado no hubo producción.", padre = self.wids['ventana'])
else:
a = parte.articulos[0] # Al menos tiene 1 artículo.
# Con el primero me vale.
if parte.es_de_balas():
lote = a.bala.lote
partida = None
else:
lote = None
partida = a.partida
codigo = self.crear_muestra(lote, partida)
if codigo != '':
model, paths = self.wids['tv_rollos'].get_selection().get_selected_rows()
for path in paths:
if model[path][1] != '': # Nº rollo, tiene,
# no es una incidencia.
id = model[path][-1]
rollo = pclases.Articulo.get(id).rollo
if rollo != None:
rollo.muestra = True
rollo.observaciones += '>>> Muestra %s' % codigo
model[path][-2] += '>>> Muestra %s' % codigo
def crear_muestra(self, lote, partida):
_codigo = ['']
dialogo = gtk.Dialog("DATOS DE LA MUESTRA",
self.wids['ventana'],
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT))
dialogo.set_transient_for(self.wids['ventana'])
dialogo.connect("response", self.crear_muestra_ok_cancel, lote, partida, _codigo)
texto = """
Introduzca, si lo desea, los datos para la muestra
de%s número %d.
""" % (partida and " la partida" or "l lote",
partida and partida.numpartida or lote.numlote)
txt = gtk.Label(texto)
dialogo.vbox.pack_start(txt)
dialogo.vbox.pack_start(gtk.Label("\nCódigo de muestra:"))
codigo = gtk.Entry()
codigo.set_text("M(%s)" % (partida and partida.codigo or lote.codigo))
dialogo.vbox.pack_start(codigo)
dialogo.vbox.pack_start(gtk.Label("\nObservaciones:"))
observaciones = gtk.Entry()
dialogo.vbox.pack_start(observaciones)
dialogo.vbox.show_all()
dialogo.run()
dialogo.destroy()
return _codigo[0]
def crear_muestra_ok_cancel(self, dialogo, respuesta, lote, partida, _codigo):
if respuesta == gtk.RESPONSE_ACCEPT:
codigo = dialogo.vbox.get_children()[2].get_text()
observaciones = dialogo.vbox.get_children()[4].get_text()
m = pclases.Muestra(lote = lote,
partida = partida,
codigo = codigo,
observaciones = observaciones,
pendiente = True,
envio = mx.DateTime.localtime(),
recepcion = None,
loteCem = None)
_codigo[0] = codigo
if utils.dialogo(titulo = "MUESTRA ENVIADA",
texto = "Muestra creada, enviada y pendiente para su análisis en laboratorio.\n¿Desea enviar una alerta?",
padre = self.wids['ventana']):
usuarios = [(u.id, u.usuario) for u in pclases.Usuario.select(orderBy = 'usuario')]
usuario = utils.dialogo_combo(titulo = "SELECCIONE USUARIO",
texto = "Seleccione del desplegable inferior al usuario que quiere alertar acerca de la muestra.",
ops = usuarios,
padre = self.wids['ventana'])
if usuario != None:
user = pclases.Usuario.get(usuario)
if m.codigo:
msj = "La muestra %s está " % m.codigo
else:
msj = "Tiene una muestra "
msj += "pendiente de analizar."
user.enviar_mensaje(msj)
def bloquear(self, ch, mostrar_alerta = True):
# Si el parte tiene menos de un día y se encuentra bloqueado, dejo que lo pueda desbloquear cualquiera.
if mx.DateTime.localtime() - self.objeto.fecha <= mx.DateTime.oneDay and (self.objeto.bloqueado or ch.get_active()):
self.objeto.bloqueado = False
elif ch.get_active() != self.objeto.bloqueado:
# NEW!: Los partes bloqueados solo los pueden desbloquear usuarios con nivel <= 1.
if self.objeto.bloqueado:
if self.usuario and self.usuario.nivel <= 1: # and self.objeto.bloqueado and not ch.get_active():
self.objeto.bloqueado = False
else:
if "w" in self.__permisos: # Tiene permiso para bloquear el parte
self.objeto.bloqueado = True
else:
if mostrar_alerta:
utils.dialogo_info(titulo = "USUARIO SIN PRIVILEGIOS",
texto = "No tiene permisos suficientes para bloquear y verificar partes de producción.",
padre = self.wids['ventana'])
self.objeto.sync()
self.objeto.make_swap()
ch.set_active(self.objeto.bloqueado)
def imprimir(self, boton):
self.guardar(None)
import informes
parte = self.objeto
ws = ('e_fecha', 'e_grsm2', 'sp_merma', 'e_partida', 'e_articulo',
'e_ancho', 'e_long_rollo', 'e_hora_ini', 'e_hora_fin',
'e_tiempo_total', 'e_o11', 'e_num_rollos', 'e_metros_lineales',
'e_peso_total', 'e_tiempo_real_trabajado', 'e_productividad',
'e_consumo_estimado')
datos = {}
for w in ws:
datos[w] = self.wids[w].get_text()
empleados = []
for h in parte.horasTrabajadas:
empleados.append(h.empleado)
datos['empleados'] = empleados
bounds = self.wids['txt_observaciones'].get_buffer().get_bounds()
datos['observaciones'] = self.wids['txt_observaciones'].get_buffer().get_text(bounds[0], bounds[1])
detallesdeproduccion = [i for i in self.objeto.incidencias] + [a for a in self.objeto.articulos]
detallesdeproduccion.sort(self.cmpfechahora_or_numrollo)
lineas = []
# Filas del TreeView
for detalle in detallesdeproduccion:
obs = self.observaciones(detalle)
lineas.append((self.rollo(detalle),
utils.float2str(self.peso(detalle), 1),
utils.float2str(self.densidad(detalle), 1),
self.motivo(detalle),
self.horaini(detalle),
self.horafin(detalle),
self.duracion(detalle),
obs))
informes.abrir_pdf(geninformes.parteRollos(datos, lineas))
def _dialogo_entrada(self, texto= '', titulo = 'ENTRADA DE DATOS', valor_por_defecto = '', padre=None, pwd = False):
"""
Muestra un diálogo modal con un textbox.
Devuelve el texto introducido o None si se
pulsó Cancelar.
valor_por_defecto debe ser un string.
Si pwd == True, es un diálogo para pedir contraseña
y ocultará lo que se introduzca.
"""
## HACK: Los enteros son inmutables, usaré una lista
res = [None]
de = gtk.Dialog(titulo,
padre,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
de.connect("response", utils.respuesta_ok_cancel, res)
txt = gtk.Label(texto)
de.vbox.pack_start(txt)
txt.show()
input = gtk.Entry()
input.set_visibility(not pwd)
def pasar_foco(widget, event):
if event.keyval == 65293 or event.keyval == 65421:
de.action_area.get_children()[1].grab_focus()
input.connect("key_press_event", pasar_foco)
de.vbox.pack_start(input)
input.show()
input.set_text(valor_por_defecto)
marcado = gtk.CheckButton("Mostrar etiqueta de marcado CE")
marcado.set_active(True)
de.vbox.pack_start(marcado)
marcado.show()
if len(titulo)<20:
width = 100
elif len(titulo)<60:
width = len(titulo)*10
else:
width = 600
de.resize(width, 80)
de.run()
de.destroy()
if res[0]==False:
return None, None
return res[0], marcado.get_active()
def etiquetas(self, boton):
"""
Imprime las etiquetas de los
rollos del parte seleccionados
"""
sel = self.wids['tv_rollos'].get_selection()
model, paths = sel.get_selected_rows()
rollos_defecto = []
for path in paths:
rollos_defecto.append(model[path][1])
rollos_defecto.sort()
rollos_defecto = ', '.join(rollos_defecto)
import informes
entrada, mostrar_marcado = self._dialogo_entrada(
titulo = 'ETIQUETAS',
texto = "Introduzca el número de rollo o el rango (usando '-') "
"que desea etiquetar:",
valor_por_defecto = rollos_defecto,
padre = self.wids['ventana'])
if entrada != None:
if '-' in entrada:
rango = entrada.split('-')
try:
a = int(rango[0])
b = int(rango[1])
if a<= b:
b += 1
else:
a, b = b, a+1
except:
utils.dialogo_info(titulo = 'ERROR',
texto = 'Los números de rollos introducidos no son '
'válidos',
padre = self.wids['ventana'])
return
valido = True
for i in range(a,b):
if not self.objeto.rolloEnParte(i):
valido = False
break
if not valido:
utils.dialogo_info(titulo = 'ERROR',
texto = 'Los números de rollos introducidos no '
'pertecen al parte',
padre = self.wids['ventana'])
return
temp = []
for i in range(a,b):
temp.append(
pclases.Rollo.select(pclases.Rollo.q.numrollo == i)[0])
else:
codigos = [cod.strip() for cod in entrada.split(",")]
temp = []
for codigo in codigos:
if codigo.startswith("R"):
try:
temp.append(pclases.Rollo.select(
pclases.Rollo.q.codigo == codigo)[0])
except Exception, msg:
self.logger.error(
"partes_de_fabricacion_rollos::etiquetas -> %s"
% (msg))
elif codigo.startswith("X"):
try:
temp.append(pclases.RolloDefectuoso.select(
pclases.RolloDefectuoso.q.codigo == codigo)[0])
except Exception, msg:
self.logger.error("partes_de_fabricacion_rollos"
"::etiquetas -> %s" % (msg))
else:
pass #No lo encuentro, paso de dar un mensaje de error.
for a in temp:
if (not self.objeto.rolloEnParte(a.codigo)
and (self.usuario == None
or self.usuario.usuario != 'admin')):
utils.dialogo_info(titulo = 'ERROR',
texto = 'El número de rollo (%d) introducido no '
'pertece al parte' % a,
padre = self.wids['ventana'])
return
rollos = []
fetiqueta = None
for r in temp:
elemento, fetiqueta = build_etiqueta(r)
rollos.append(elemento)
if boton.name == "b_etiquetas":
informes.abrir_pdf(
geninformes.etiquetasRollos(rollos, mostrar_marcado))
# Antiguas, 4 etiquetas por folio A4.
elif boton.name == "b_etiq_peq":
informes.abrir_pdf(
geninformes.etiquetasRollosEtiquetadora(rollos,
mostrar_marcado,
fetiqueta))
# Etiquetas térmicas pequeñas.
def buscar_producto_compra(self, defecto = "",
titulo_defecto = "PLÁSTICO ENVOLTORIO"):
a_buscar = utils.dialogo_entrada(titulo = titulo_defecto,
texto = 'Introduzca código o descripción del producto:',
valor_por_defecto = defecto,
padre = self.wids['ventana'])
if a_buscar == None:
return None
productos = pclases.ProductoCompra.select(pclases.AND(
pclases.OR(pclases.ProductoCompra.q.descripcion.contains(a_buscar),
pclases.ProductoCompra.q.codigo.contains(a_buscar)),
pclases.ProductoCompra.q.controlExistencias == True,
pclases.ProductoCompra.q.existencias > 0,
pclases.ProductoCompra.q.obsoleto == False))
if productos.count() == 0:
utils.dialogo_info(titulo = 'PRODUCTO NO ENCONTRADO',
texto = 'Producto no encontrado o sin existencias.',
padre = self.wids['ventana'])
return None
elif productos.count() > 1:
filas = [(p.id, p.codigo, p.descripcion, p.existencias)
for p in productos]
idproducto = utils.dialogo_resultado(filas,
'SELECCIONE PRODUCTO',
cabeceras = ['ID', 'Código', 'Descripción', 'Existencias'],
padre = self.wids['ventana'])
if idproducto > 0:
productos = [pclases.ProductoCompra.get(idproducto)]
else:
return None
producto = productos[0]
return producto
def cambiar_plastico(self, b):
self.plastico = self.buscar_producto_compra()
self.wids['e_plastico'].set_text(self.plastico and self.plastico.descripcion or "SIN ENVOLVER")
def _salir(self, w, event = None):
if ("w" in self.__permisos
and self.objeto
and not self.objeto.bloqueado
and self.objeto.fecha < mx.DateTime.localtime()-mx.DateTime.oneDay
): # Tiene permiso para bloquear el parte
res = utils.dialogo(titulo = "DEBE VERIFICAR EL PARTE",
texto = "Antes de cerrar el parte debe verifi"
"carlo.\n¿Marcar como verificado?",
padre = self.wids['ventana'],
bloq_temp = ["Sí"])
self.objeto.bloqueado = res
self.wids['ch_bloqueado'].set_active(self.objeto.bloqueado)
# return True
if not self.salir(w, mostrar_ventana = event == None):
# Devuelve True cuando se cancela el cierre de la ventana (por temas de event-chain).
try:
padre = self.wids['ventana']
except KeyError:
padre = None
vpro = VentanaActividad(texto = "Comprobando disparo de alertas...",
padre = padre)
vpro.mostrar()
linea = pclases.LineaDeProduccion.select(pclases.LineaDeProduccion.q.nombre.contains('de geotextiles'))
linea = self.linea
vpro.mover()
if linea == None:
print "WARNING: La línea de geotextiles no está correctamente dada de alta."
self.logger.warning("WARNING: La línea de geotextiles no está correctamente dada de alta.")
else:
vpro.mover()
formulacion = linea.formulacion
for ca in [ca_con_p for ca_con_p
in formulacion.consumosAdicionales
if ca_con_p.productoCompra != None
and not ca_con_p.productoCompra.obsoleto]:
vpro.mover()
# Verifico que no haya productos bajo mínimos:
if ca.productoCompra.existencias<ca.productoCompra.minimo:
vpro.mover()
try:
v = pclases.Ventana.select(
pclases.Ventana.q.fichero
== "pedidos_de_compra.py")[0]
except IndexError:
txterror = "WARNING: ¡La ventana de pedidos de "\
"compra SE HA PERDIDO!"
print txterror
self.logger.warning(txterror)
mensaje = "El producto %s tiene las existencias bajo mínimos. Considere hacer un pedido de compra." % ca.productoCompra.descripcion
for u in [p.usuario for p in v.permisos if p.nuevo]:
vpro.mover()
u.enviar_mensaje(mensaje)
# Y Verifico que no haya existencias negativas:
if ca.productoCompra.existencias < 0:
vpro.mover()
try:
v = pclases.Ventana.select(
pclases.Ventana.q.fichero
== "pedidos_de_compra.py")[0]
except IndexError:
txterror = "WARNING: ¡La ventana de pedidos de "\
"compra SE HA PERDIDO!"
print txterror
self.logger.error(txterror)
vpro.mover()
mensaje = "El producto %s tiene existencias NEGATIVAS. Corrija el error lo antes posible." % ca.productoCompra.descripcion
for u in [p.usuario for p in v.permisos if p.nuevo]:
vpro.mover()
u.enviar_mensaje(mensaje)
vpro.mover()
vpro.ocultar()
def add_empleados_calendario(self):
"""
Añade los empleados planificados según el calendario laboral
para la línea de producción.
1.- Obtener el calendario para self.linea.
2.- Obtener los laborables del calendario correspondiente a la fecha del objeto.
3.- Filtrar los laborables en función del turno correspondiente a la hora del objeto.
4.- Obtener los empleados del laborable resultante.
5.- Eliminar los empleados actuales. (PREGUNTA ANTES DE HACERLO)
6.- Insertarlos los nuevos en el parte.
"""
if self.linea != None:
idldp = self.linea.id
CAL = pclases.CalendarioLaboral
calendarios = CAL.select("""linea_de_produccion_id = %d AND
date_part('month', mes_anno) = %d AND
date_part('year', mes_anno) = %d""" \
% (idldp, self.objeto.fecha.month, self.objeto.fecha.year))
if calendarios.count() == 1:
calendario = calendarios[0]
empleados = self.get_empleados_de_calendario(calendario)
# Si hay empleados
if self.objeto.horasTrabajadas != []:
# Si no son los mismos del calendario y los quiere borrar.
if [ht.empleado for ht in self.objeto.horasTrabajadas] != empleados \
and utils.dialogo(titulo = "¿ELIMINAR OPERARIOS?",
texto = "El parte ya tiene empleados relacionados.\n¿Desea eliminarlos y asociar los definidos en el turno?",
padre = self.wids['ventana']):
for ht in self.objeto.horasTrabajadas:
self.objeto.removeEmpleado(ht.empleado)
else:
# Si no los quiere borrar, cancelo todo.
return
# Si no había empleados o no eran los mismos y los ha borrado.
# Añado empleados de los laborables que cumplan el turno y sean de producción (no-recuperación).
for empleado in empleados:
self.objeto.addEmpleado(empleado)
elif calendarios.count() > 1:
self.logger.error("partes_de_fabricacion_rollos.py -> Existe más de un calendario laboral para el mes, año y línea de producción: fecha %s - idldp %d - idparte %s." % (self.objeto.fecha, idldp, self.objeto.id))
def get_empleados_de_calendario(self, calendario):
res = []
LAB = pclases.Laborable
dia_lab_parte = self.objeto.fecha
if isinstance(self.objeto.horainicio,
type(mx.DateTime.DateTimeDelta(0))):
seis_am = mx.DateTime.DateTimeDeltaFrom(hours = 6)
medianoche = mx.DateTime.DateTimeDeltaFrom(hours = 0)
restar_un_dia = lambda f: f - mx.DateTime.oneDay
else:
import datetime
seis_am = datetime.time(6)
medianoche = datetime.time(0)
restar_un_dia = lambda f: f - datetime.timedelta(1)
if self.objeto.horainicio >= medianoche and \
self.objeto.horainicio <= seis_am and \
self.objeto.horafin <= seis_am: # No se mezclan turnos, esta
# última comprobación podría no hacer falta.
dia_lab_parte = restar_un_dia(dia_lab_parte)
laborables = LAB.select("""calendario_laboral_id = %d
AND date_part('day', fecha) = %d"""
% (calendario.id, dia_lab_parte.day))
for laborable in laborables:
turno = laborable.turno
if turno == None:
mensaje = "partes_de_fabricacion_rollos.py::get_empleados_de_calendario -> Laborable ID %d no tiene turno relacionado. Intento eliminarlo de la BD." % (laborable.id)
self.logger.error(mensaje)
try:
laborable.destroySelf()
idlaborable = laborable.id
self.logger.warning("partes_de_fabricacion_rollos.py::get_empleados_de_calendario -> Registro laborable ID %d ELIMINADO SATISFACTORIAMENTE." % (idlaborable))
except:
self.logger.error("partes_de_fabricacion_rollos.py::get_empleados_de_calendario -> Registro laborable ID %d NO ELIMINADO." % (laborable.id))
print "ERROR: %s" % (mensaje)
continue
turnohorainicio = utils.DateTime2DateTimeDelta(turno.horainicio)
turnohorafin = utils.DateTime2DateTimeDelta(turno.horafin)
objetohorainicio = utils.DateTime2DateTimeDelta(self.objeto.horainicio)
objetohorafin = utils.DateTime2DateTimeDelta(self.objeto.horafin)
if not turno.recuperacion:
ohi = objetohorainicio
ohf = objetohorafin
thi = turnohorainicio
thf = turnohorafin
if thi > thf: thf += mx.DateTime.oneDay
if ohi > ohf: ohf += mx.DateTime.oneDay
if ohi >= medianoche and ohi < seis_am: ohi += mx.DateTime.oneDay
if ohf >= medianoche and ohf <= seis_am: ohf += mx.DateTime.oneDay
if thi <= ohi <= thf and thi <= ohf <= thf:
for empleado in laborable.empleados:
res.append(empleado)
return res
def iniciar_pesaje_auto(self, boton):
"""
Abre la ventana de pesaje automático.
"""
rollo = None
ventana_pesaje = crear_ventana_pesaje(self,
padre = self.wids['ventana'],
rollo = rollo,
objeto_ventana_parte = self)
def consumir_manual(self, boton):
"""
Crea un registro de consumo manualmente y unifica los
consumos a continuación.
Si algún consumo acaba con cantidad 0 (porque se haya
agregado un consumo negativo que haya restado a otro)
se elimina antes de salir de la rutina.
"""
# Pedir producto(s) a consumir.
producto, texto_buscado = utils.pedir_producto_compra(
padre = self.wids['ventana'])
# Pedir cantidad.
if producto != None:
unidad = ""
try:
producto_unidad = producto.unidad
if producto_unidad != "":
unidad = " en %s" % (producto_unidad)
except AttributeError, msg:
self.logger.error("%sEl producto tipo %s ID %d no tiene atributo unidad. Excepción AttributeError: %s."
% (self.usuario and self.usuario.usuario + ": " or "",
type(producto),
producto != None and producto.id or "NONE",
msg))
descripcion = producto.descripcion
cantidad = utils.dialogo_entrada(titulo = "CANTIDAD",
texto = "Introduzca la cantidad a consumir de %s%s." % (descripcion, unidad),
padre = self.wids['ventana'])
if cantidad != None:
try:
cantidad_a_consumir = utils._float(cantidad)
except (TypeError, ValueError):
utils.dialogo_info(titulo = "ERROR DE FORMATO",
texto = 'El texto introducido "%s" no es un número.' % (cantidad),
padre = self.wids['ventana'])
else:
# Crear consumo.
producto.sync()
consumo = pclases.Consumo(silo = None,
parteDeProduccion = self.objeto,
productoCompra = producto,
actualizado = True,
antes = producto.existencias,
despues = producto.existencias - cantidad_a_consumir,
cantidad = cantidad_a_consumir)
# Actualizar existencias
producto.existencias -= cantidad_a_consumir
producto.add_existencias(-cantidad_a_consumir)
producto.syncUpdate()
self.logger.warning("%sCONSUMO LÍNEA GEOTEXTILES -> PARTE %d -> Consumiendo manualmente %f %s de %s (ID %d). Existencias: %f."
% (self.usuario and self.usuario.usuario + ": " or "",
self.objeto.id,
cantidad_a_consumir,
producto.unidad,
producto.descripcion,
producto.id,
producto.existencias))
# Unificar consumos.
self.objeto.unificar_consumos()
actualizar_albaran_interno_con_tubos(self.objeto)
# Eliminar consumos con cantidad cero.
for c in self.objeto.consumos:
if round(c.cantidad, 3) == 0:
# Cosas tan pequeñas como las agujas se descuentan
# en cantidades tan pequeñas que tras varias
# inserciones y borrados puede quedar el consumo
# con cantidad 0.0000...1, que debe ser borrado.
try:
c.destroySelf()
except Exception, msg:
self.logger.error("%sConsumo ID %d no se pudo eliminar. Excepción: %s"
% (self.usuario and self.usuario.usuario + ": " or "",
c.id,
msg))
self.rellenar_tabla_consumos()
# Buscar y crear (si no existe) el albarán interno de consumos.
buscar_o_crear_albaran_interno(self.objeto)
actualizar_albaran_interno_con_tubos(self.objeto)
def add_desecho(self, boton):
"""
Crea un registro de consumo de material desechado y
actualiza la tabla.
"""
producto = self.buscar_producto_compra(defecto = "",
titulo_defecto = "BUSCAR PRODUCTO A DESECHAR")
if producto != None:
cantidad = utils.dialogo_entrada(titulo = "INTRODUZCA CANTIDAD",
texto = "Teclee la cantidad que se desechará de %s:"%(
producto.descripcion),
padre = self.wids['ventana'])
if cantidad != None:
try:
cantidad = utils._float(cantidad)
except ValueError:
utils.dialogo_info(titulo = "ERROR EN FORMATO",
texto = "El texto tecleado %s no es un número válido." % (cantidad),
padre = self.wids['ventana'])
else:
observaciones = utils.dialogo_entrada(titulo = "OBSERVACIONES",
texto = "Teclee, si lo desea, el motivo por el cual la cantidad desechada de %s se considera defectuosa:" % (producto.descripcion),
padre = self.wids['ventana'])
if observaciones != None:
try:
desecho = pclases.DescuentoDeMaterial.desechar(producto, cantidad, self.objeto, observaciones)
except AssertionError, msg:
self.logger.error("%spartes_de_fabricacion_rollos::add_desecho -> AssertionError: %s" % (self.usuario and self.usuario.usuario + ": " or "", msg))
if desecho.cantidad != cantidad:
utils.dialogo_info(titulo = "EXISTENCIAS INSUFICIENTES",
texto = "La cantidad de %s en almacén era inferior a la cantidad tecleada (%s).\nSe ha descontado %s en su lugar." % (desecho.productoCompra.descripcion, utils.float2str(cantidad), utils.float2str(desecho.cantidad)),
padre = self.wids['ventana'])
self.objeto.unificar_desechos()
self.rellenar_tabla_desechos()
def drop_desecho(self, boton):
"""
Cancela el desecho seleccionado.
"""
model, paths = self.wids['tv_desecho'].get_selection().get_selected_rows()
if paths != None and paths != []:
for path in paths:
idddm = model[path][-1]
ddm = pclases.DescuentoDeMaterial.get(idddm)
try:
ddm.anular()
except AssertionError, msg:
self.logger.error("%spartes_de_fabricacion_rollos::drop_desecho -> AssertionError: %s" % (self.usuario and self.usuario.usuario + ": " or "", msg))
utils.dialogo_info(titulo = "ERROR",
texto = "Ocurrió un error anulando un descuento de material.\nPulse «Aceptar» para continuar.\n\n\n\nInformación de depuración:\n\n%s" % (msg),
padre = self.wids['ventana'])
self.rellenar_tabla_desechos()
self.objeto.unificar_desechos()
def descontar_material_adicional(ventana_parte, articulo, restar = True):
"""
Descuenta el material adicional correspondiente al artículo según
la formulación que indique la línea de fabricación.
Si "restar" es True, descuenta. Si es False, añade la cantidad (para
cuando se elimine un rollo del parte, por ejemplo).
Si es necesario, se dejará material con existencias en negativo, aunque
se avisará al usuario de la incidencia.
"""
producto = articulo.productoVenta
# OJO: Debe llamarse "plastico", tal cual, sin acentos ni nada. No es lo
# suyo, pero al menos hemos reducido el número de casos especiales.
for consumoAdicional in producto.consumosAdicionales:
if (not consumoAdicional.productoCompra
or consumoAdicional.productoCompra.obsoleto):
# Puede haber consumos que ya no se usan y símplemente se les ha
# quitado el producto a consumir para que no cuenten.
continue
if ("plastico" in consumoAdicional.nombre.lower()
and ventana_parte.plastico != None
and ventana_parte.plastico != consumoAdicional.productoCompra):
consumido = consumoAdicional.consumir(articulo,
cancelar = not restar,
productoCompra = ventana_parte.plastico)
else:
consumido = consumoAdicional.consumir(articulo,
cancelar = not restar)
txtlog = "PARTE ID %s (%s, %s-%s): "\
"Consumiendo %s de %s para %s %s. Existencias: %s" % (
ventana_parte.objeto.id,
utils.str_fecha(ventana_parte.objeto.fecha),
utils.str_hora_corta(ventana_parte.objeto.horainicio),
utils.str_hora_corta(ventana_parte.objeto.horafin),
utils.float2str(consumido),
consumoAdicional.productoCompra
and consumoAdicional.productoCompra.descripcion
or "... ¡NADA! ",
articulo.es_rollo() and "el rollo" or "la bolsa",
articulo.codigo,
consumoAdicional.productoCompra and
utils.float2str(consumoAdicional.productoCompra.existencias)
or "-")
try:
ventana_parte.logger.warning(txtlog)
except AttributeError:
print txtlog
ventana_parte.objeto.unificar_consumos()
actualizar_albaran_interno_con_tubos(ventana_parte.objeto)
def _calcular_peso_densidad(peso, producto):
"""
Calcula el peso y la "densidad" del artículo en base al
peso y producto recibido.
Si el peso es None, usa los datos por defecto del producto.
Si no lo es, devuelve el mismo peso recibido y la "densidad"
que le corresponde.
"""
if peso == None:
try:
peso = ((producto.camposEspecificosRollo.gramos * producto.camposEspecificosRollo.ancho * producto.camposEspecificosRollo.metrosLineales)/1000) + producto.camposEspecificosRollo.pesoEmbalaje
except TypeError: # Lo ha dado por tener pesoEmbalaje = None y no poder sumar NoneType a float.
txterror = "partes_de_fabricacion_rollo.py::_calcular_peso_densidad -> El producto tiene campos de camposEspecificosRollo a None."
print txterror
if ventana_parte != None:
ventana_parte.logger.error(txterror)
peso = 0
# Por defecto se crea con los datos fetén.
densidad = producto.camposEspecificosRollo.gramos
else:
pesosin = (peso - producto.camposEspecificosRollo.pesoEmbalaje) * 1000
try:
dens = pesosin / (producto.camposEspecificosRollo.metros_cuadrados)
except ZeroDivisionError:
dens = 0
densidad = dens
return peso, densidad
def crear_articulo(numrollo,
partida,
producto,
parte,
peso = None,
objeto_ventana_parte = None,
defectuoso = False):
"""
Crea un artículo rollo con el número de rollo recibido, perteneciente a la
partida «partida», del producto indicado y con un artículo del parte
«parte». Si peso es None se le calcula el peso y densidad que le
correspondería por defecto según el producto.
objeto_ventana_parte sería el objeto de la ventana del parte de
producción, para volcar al log si fuese necesario.
Si "defectuoso" es True, se crea un rollo defectuoso en lugar de un rollo
"normal".
OJO: AQUÍ NO SE DESCUENTA EL MATERIAL EMPLEADO EN LA FABRICACIÓN. Sólo se
crea el artículo.
"""
peso, densidad = _calcular_peso_densidad(peso, producto)
if not defectuoso:
codigo = 'R%d' % (numrollo) # NOTA: Cambiar aquí si al final el
# código será distinto al número de rollo.
rollo = pclases.Rollo(partida = partida,
codigo = codigo,
numrollo = numrollo,
peso = peso,
densidad = densidad,
muestra = False,
rollob = False)
rollod = None
else:
codigo = 'X%d' % (numrollo) # NOTA: Cambiar aquí si al final el código será distinto al número de rollo.
rollo = rollod = None
observaciones = utils.dialogo_entrada(titulo = "OBSERVACIONES",
texto = "Introduzca el motivo por el cual el rollo se considera defectuoso:",
valor_por_defecto = "Defectuoso: longitud insuficiente.",
padre = objeto_ventana_parte.wids['ventana'])
if observaciones != None:
largo = utils.dialogo_entrada(titulo = "LARGO",
texto = "Introduzca la longitud en metros del rollo defectuoso:",
padre = objeto_ventana_parte.wids['ventana'])
if largo != None and largo.strip() != "":
try:
largo = utils._float(largo)
except ValueError:
utils.dialogo_info(titulo = "ERROR",
texto = "El texto %s no es un número válido." % (largo),
padre = objeto_ventana_parte.wids['ventana'])
else:
ancho = producto.camposEspecificosRollo.ancho # Conserva el ancho del producto que se intentó fabricar.
pesoEmbalaje = producto.camposEspecificosRollo.pesoEmbalaje # Conserva el peso del embalaje del producto original.
try:
densidad = ((peso - pesoEmbalaje) * 1000) / (largo * ancho)
except ZeroDivisionError:
densidad = 0.0
try:
rollod = pclases.RolloDefectuoso(partida = partida,
numrollo = numrollo,
codigo = codigo,
observaciones = observaciones,
peso = peso,
densidad = densidad,
metrosLineales = largo,
ancho = ancho,
pesoEmbalaje = pesoEmbalaje)
except Exception, msg:
txt = "Rollo defectuoso %s no se pudo crear. Probablemente número duplicado. Mensaje de la excepción: %s" % (codigo, msg)
print txt
if rollo != None or rollod != None:
articulo = pclases.Articulo(bala = None,
rollo = rollo,
rolloDefectuoso = rollod,
parteDeProduccion = parte,
productoVenta = producto,
albaranSalida = None,
almacen = pclases.Almacen.get_almacen_principal())
else:
articulo = None
return articulo
def build_ventana(padre):
"""
Construye un gtk.Window con los widgets
para el control del pesaje automático.
"""
ventana = gtk.Window()
ventana.set_title("LECTURA AUTOMÁTICA DE BÁSCULA")
ventana.set_transient_for(padre)
ventana.set_modal(True)
contenedor = gtk.VBox()
ventana.add(contenedor)
box_rollo = gtk.HBox()
box_rollo.add(gtk.Label("Código de rollo: "))
e_numrollo = gtk.Entry()
e_numrollo.set_property("editable", False)
e_numrollo.set_property("has-frame", False)
box_rollo.add(e_numrollo)
b_cancelar = gtk.Button(stock = gtk.STOCK_CANCEL)
l_peso = gtk.Label(
'<big><span color="dark green">Esperando peso...</span></big>')
l_peso.set_use_markup(True)
l_peso.set_justify(gtk.JUSTIFY_CENTER)
l_peso.set_property('xalign', 0.5)
ch_marcado = gtk.CheckButton("_Marcado CE")
ch_marcado.set_active(True)
ch_defectuoso = gtk.CheckButton("_Defectuoso")
ch_defectuoso.set_active(False)
ch_defectuoso.connect("toggled",cambiar_marcado_ce,ch_marcado,e_numrollo)
contenedor.add(box_rollo)
contenedor.add(l_peso)
contenedor.add(ch_marcado)
contenedor.add(ch_defectuoso)
contenedor.add(b_cancelar)
ventana.resize(365, 150)
ventana.move(435, 130)
return ventana, l_peso, e_numrollo, b_cancelar, ch_marcado, ch_defectuoso
def cambiar_marcado_ce(ch_defectuoso, ch_marcado, e_numrollo):
"""
Cambia el checkbox del marcado CD a Fase si se ha activado el
checkbox de defectuoso. En otro caso se pone a True.
Si acaba marcado, cambia también el entry para mostrar el último
rollo defectuoso más 1. Si no, vuelve a poner el último rollo
más 1.
"""
if ch_defectuoso.get_active():
ch_marcado.set_active(False)
codigo_proximo_rollo_defectuoso = pclases.RolloDefectuoso._connection.queryOne("SELECT ultimo_codigo_rollo_defectuoso_mas_uno();")[0]
e_numrollo.set_text(codigo_proximo_rollo_defectuoso)
else:
ch_marcado.set_active(True)
codigo_proximo_rollo = pclases.Rollo._connection.queryOne("SELECT ultimo_codigo_rollo_mas_uno();")[0]
e_numrollo.set_text(codigo_proximo_rollo)
def get_puerto_serie():
"""
Devuelve un objeto de pyserial con el puerto correspondiente abierto.
None si no se pudo abrir.
La báscula debe estar en el COM2 en windows,
que el puerto que intentará abrir primero. Si no existe el
puerto o no hay nada conectado, intento con el 3 al 15 y por último el 1.
En POSIX comienza por el 1 y si no lo consigue abrir intenta el 2.
"""
try:
import serial
except ImportError:
utils.dialogo_info(titulo = "ERROR IMPORTACIÓN",
texto = "Debe instalar el módulo pyserial.",
padre = None)
return None
import os
if os.name == "posix":
for numpuerto in range(16):
try:
com = serial.Serial("/dev/ttyS%d" % numpuerto)
break
except:
com = None
else:
for numpuerto in [2] + range(3, 16) + [1]:
try:
com = serial.Serial("COM%d" % numpuerto)
break
except:
com = None
if com != None: # Configuración protocolo simple EPELSA
com.baudrate = 9600
com.bytesize = 8
com.parity = 'N'
com.stopbits = 1
com.timeout = None
com.timeout = 0.5 # El timeout_add es bloqueante. Leeré cada segundo.
return com
def cerrar_ventana_bascula(boton, ventana, com, src_id):
"""
Cierra (destruye, más bien) la ventana de
pesaje y cierra el puerto serie.
"""
import gobject
gobject.source_remove(src_id)
ventana.destroy()
com.close()
def imprimir_etiqueta(articulo, marcado_ce, ventana_parte, defectuoso = False):
"""
Crea y lanza un PDF con la etiqueta del artículo y el logotipo
de marcado CE si marcado_ce = True.
Si "defectuoso" es False, imprime una etiqueta de rollo defectuoso.
"""
if defectuoso:
imprimir_etiqueta_de_rollo_defectuoso(articulo.rolloDefectuoso)
else:
if (articulo.rollo.numrollo > ventana_parte.ultima_etiqueta
or ventana_parte.ultima_etiqueta == None):
import informes
rollos = []
producto = articulo.productoVenta
try:
campos = producto.camposEspecificosRollo
if not campos.modeloEtiqueta:
fetiqueta = None # Etiqueta estándar Geotexan
else:
fetiqueta = campos.modeloEtiqueta.get_func()
except AttributeError, e: # No es un rollo.
print "partes_de_fabricacion_rollos::imprimir_etiqueta "\
"-> AttributeError: No es un rollo.", e
fetiqueta = None
campos = None
except ValueError, e: # No tiene modelo de etiqueta.
print "partes_de_fabricacion_rollos::imprimir_etiqueta "\
"-> ValueError: No es un rollo.", e
fetiqueta = None
campos = None
if not campos:
try:
ventana_padre = ventana_parte.wids['ventana']
except (AttributeError, KeyError):
ventana_padre = None
utils.dialogo_info(titulo = "ERROR ETIQUETA",
texto = "Ocurrió un error al generar las etiquetas.\n"
"Intente crearlas manualmente usando el botón \n"
"correspondiente de la parte inferior de \n"
"la ventana.",
padre = ventana_padre)
return
partida = articulo.rollo.partida.codigo
if ventana_parte.ultima_etiqueta == None:
kilos_fibra = 5500.0 * (1 - articulo.parteDeProduccion.merma) # OJO: Harcoded
kilos_por_rollo = (campos.metros_cuadrados * campos.gramos) / 1000.0
rollos_parte = int(kilos_fibra / kilos_por_rollo)
ultima = articulo.rollo.numrollo + rollos_parte # Mando a imprimir todos los rollos de la partida a no ser que ya se
# hayan imprimido y sean rollos sueltos que han salido de más.
else:
ultima = articulo.rollo.numrollo + 1
for numrollo in xrange(articulo.rollo.numrollo, ultima):
elemento = {'descripcion': producto.nombre,
'densidad': str(campos.gramos),
'ancho': "%s m" % (campos.ancho),
'peso': "%s kg" % (
int((campos.metros_cuadrados
* campos.gramos
/ 1000.0))), # PESO TEÓRICO. Sin embalaje.
'm2': "%s m²" % (campos.metros_cuadrados),
'mlin': "%s m" % (campos.metrosLineales),
'nrollo': str(numrollo),
'partida': partida,
'codigo': producto.codigo,
'codigo39': "R%d" % (numrollo), # OJO: Si cambia la codificación de rollos, cambiar aquí.
'defectuoso': False,
'idrollo': 0,
'objeto': None, # Si todavía no se ha creado, como defectuoso == False, geninformes no lo necesitará.
'productoVenta': articulo.productoVenta,
} # OJO: Si el formato de código de rollo cambia, también hay que cambiarlo aquí.
rollos.append(elemento)
# informes.mandar_a_imprimir_con_ghostscript(geninformes.etiquetasRollosEtiquetadora(rollos, marcado_ce))
ventana_parte.ultima_etiqueta = ultima
informes.abrir_pdf(geninformes.etiquetasRollosEtiquetadora(rollos,
marcado_ce,
fetiqueta))
def recv_serial(com, ventana, l_peso, ventana_parte, ch_marcado, e_numrollo,
ch_defectuoso, objeto_ventana_parte):
#DEBUG: print "callback lanzado. leyendo..." # Tal y como suponía, esto es BLOQUEANTE con timeout_add.
c = com.readline(eol = '\r')
#DEBUG: print "leído: ", c
if c.strip() != '':
# Tratar
try:
peso = float(c)
except Exception, msg:
print "partes_de_fabricacion_rollos -> recv_serial", msg
peso = 0
if peso == 0:
return True # Cuando se apaga y enciende el peso, envía 0. Así que si el peso es 0, no creo rollo.
l_peso.set_text(
'<b><big><span color="dark green">%s</span></big></b>'
% (utils.float2str(peso)))
l_peso.set_use_markup(True)
#DEBUG: print "Recibido peso: %f" % (peso)
try:
codigo_rollo_a_crear = get_proximo_codigo_a_crear(e_numrollo)
partida = ventana_parte.get_partida()
if partida != None:
defectuoso = ch_defectuoso.get_active()
numrollo = int(codigo_rollo_a_crear.upper().replace("R", "").replace("X", "")) # Tanto si es normal como defectuoso,
# con esto debería quedarme un entero que correspondería al número de rollo.
articulo = crear_articulo(numrollo, partida, ventana_parte.producto, ventana_parte.objeto, peso = peso, objeto_ventana_parte = objeto_ventana_parte, defectuoso = defectuoso)
if articulo != None:
descontar_material_adicional(ventana_parte, articulo)
imprimir_etiqueta(articulo, ch_marcado.get_active(),
ventana_parte, defectuoso)
else:
utils.dialogo_info(titulo = "SIN PARTIDA",
texto = "Cancele y seleccione una partida "
"antes de introducir la producción.",
padre = ventana)
except (psycopg_ProgrammingError, ValueError, AttributeError), msg:
txterror = "partes_de_fabricacion_rollos::recv_serial -> %s" % (msg)
print txterror
utils.dialogo_info(titulo = 'ROLLO NO CREADO',
texto = 'El rollo no se pudo crear. Vuelva a pesarlo.\n\n\nSi el error persiste, tal vez esta información pueda ser útil:\n\n%s' % (txterror),
padre = ventana)
return True
ventana_parte.actualizar_ventana()
# El recién creado lo pongo en la línea de última pesada.
l_peso.set_text('<big><span color="dark green">Última pesada (%s): %s</span></big>' % (codigo_rollo_a_crear, l_peso.get_text()))
# Y la variable ahora pasa a contener el siguiente rollo en base al último creado (dado que el checkbox permanece inmutable
# hasta que lo cambie el usuario, será del mismo tipo que el recién creado).
codigo_rollo_a_crear = get_proximo_codigo_a_crear(e_numrollo)
e_numrollo.set_text("%s" % (codigo_rollo_a_crear))
l_peso.set_use_markup(True)
return True
def get_proximo_codigo_a_crear(e_numrollo):
"""
A partir del entry devuelve el siguiente código de rollo a crear.
Si lo que había era un número de rollo normal (Rxxxx) devuelve un
código de rollo normal. Si era un código de rollo defectuoso,
devuelve un código de rollo defectuoso (Xyyy).
En cualquier caso se asegura de que el código devuelto esté realmente
disponible. Si en el lapso de tiempo en que se mostró el siguiente código
se han creado otros rollos, devuelve el siguiente a crear aunque no
coincida con el que estaba en pantalla (no debería ocurrir a no ser que
haya dos ordenadores creando rollos).
"""
codigo_actual = e_numrollo.get_text()
if codigo_actual.startswith("R"):
codigo_proximo_rollo = pclases.Rollo._connection.queryOne("SELECT ultimo_codigo_rollo_mas_uno();")[0]
elif codigo_actual.startswith("X"):
codigo_proximo_rollo = pclases.RolloDefectuoso._connection.queryOne("SELECT ultimo_codigo_rollo_defectuoso_mas_uno();")[0]
else:
codigo_proximo_rollo = pclases.Rollo._connection.queryOne("SELECT ultimo_codigo_rollo_mas_uno();")[0]
print 'partes_de_fabricacion_rollos::get_proximo_codigo_a_crear -> No se pudo determinar el tipo de rollo a crear. Creo uno "normal": %s.' % (codigo_proximo_rollo)
return codigo_proximo_rollo
def crear_ventana_pesaje(ventana_parte, padre = None, rollo = None, objeto_ventana_parte = None):
"""
Crea una ventana de pesaje.
Necesita python-serial.
Se usa "COM1" como puerto si el sistema es MS-Windows. "/dev/ttyS0" o "/dev/ttyS1" (por este orden)
si el sistema es GNU/Linux.
"""
import gobject
com = get_puerto_serie()
# DEBUG: print com
if com != None:
(ventana,
l_peso,
e_numrollo,
b_cancelar,
ch_marcado,
ch_defectuoso) = build_ventana(padre)
# En WIN32 pyserial no tiene descriptor de fichero. :(
# src_id = gobject.io_add_watch(com.fd, gobject.IO_IN | gobject.IO_HUP, recv_serial, com, ventana)
src_id = gobject.timeout_add(1500, recv_serial, com, ventana, l_peso,
ventana_parte, ch_marcado, e_numrollo,
ch_defectuoso, objeto_ventana_parte)
b_cancelar.connect("clicked", cerrar_ventana_bascula, ventana, com,
src_id)
ventana.connect("destroy", cerrar_ventana_bascula, ventana, com, src_id)
if rollo == None:
ultimo_mas_uno = pclases.Rollo._connection.queryOne(
"""SELECT ultimo_codigo_rollo_mas_uno();""")
proximo_codrollo = ultimo_mas_uno[0]
else:
proximo_codrollo = rollo.codigo
e_numrollo.set_text("%s" % (proximo_codrollo))
ventana.show_all()
def actualizar_albaran_interno_con_tubos(pdp):
"""
CWT: En el albarán interno solo aparecen consumos manuales, sin embargo,
como era tradición que los tubos fueran así y ahora se han pasado a
automático, pues se ha decidido que los tubos sean un caso especial y
aparezcan en el albarán de consumos manuales aunque sean automáticos...
CON TODO LO QUE ESO CONLLEVA.
1.- Comprueba si hay consumos de tubos en el parte.
2.- Si lo hay, comprueba si el parte tiene ya un albarán interno. Si no,
lo crea.
3.- Del albarán elimina los consumos de tubos que haya.
4.- Crea los consumos nuevos de acuerdo al parte, de manera que lo que
aparece en el albarán interno -respecto a tubos- y en el parte,
coincida.
"""
cons_tubos = {}
for c in pdp.consumos:
if c.productoCompra.es_nucleo_carton():
try:
cons_tubos[c.productoCompra] += c.cantidad
except KeyError:
cons_tubos[c.productoCompra] = c.cantidad
if not pdp.albaranInterno:
buscar_o_crear_albaran_interno(pdp)
actualizar_albaran_interno_con_tubos(pdp)
for ldv in pdp.albaranInterno.lineasDeVenta:
if ldv.productoCompra in cons_tubos:
try:
ldv.destroySelf()
except Exception, msg:
# ¿La LDV está relacionada con un pedido o algo "asina"?
ldv.albaranSalida = None
print "partes_de_fabricacion_rollos::No se pudo eliminar LDV ID %d de albarán interno %s. Elimino relación entre ellos." % (ldv.id, pdp.albaranInterno.numalbaran)
for producto in cons_tubos:
ldv = pclases.LineaDeVenta(productoCompra = producto,
cantidad = cons_tubos[producto],
precio = producto.precioDefecto,
albaranSalida = pdp.albaranInterno,
pedidoVenta = None,
facturaVenta = None,
productoVenta = None)
if __name__ == "__main__":
p = PartesDeFabricacionRollos(permisos = "rx", usuario = pclases.Usuario.select(pclases.Usuario.q.usuario == "geotextil")[0])
#p = PartesDeFabricacionRollos(permisos = "rxw", usuario = pclases.Usuario.select(pclases.Usuario.q.usuario == "admin")[0])
#if len(sys.argv) > 1 and sys.argv[1] == "-b":
# crear_ventana_pesaje(None) # Esto cascará, ya no me vale para hacer las pruebas unitarias.
# gtk.main()
#else:
# p = PartesDeFabricacionRollos(permisos = "rxw")
| pacoqueen/bbinn | formularios/partes_de_fabricacion_rollos.py | Python | gpl-2.0 | 189,620 |
#!/usr/bin/env python3
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a simple script to run a java "binary".
This creates a script that sets up the java command line for running a java
jar. This includes correctly setting the classpath and the main class.
"""
import optparse
import os
import sys
from util import build_utils
# The java command must be executed in the current directory because there may
# be user-supplied paths in the args. The script receives the classpath relative
# to the directory that the script is written in and then, when run, must
# recalculate the paths relative to the current directory.
script_template = """\
#!/usr/bin/env python
#
# This file was generated by build/android/gyp/create_java_binary_script.py
import argparse
import os
import sys
self_dir = os.path.dirname(__file__)
classpath = [{classpath}]
extra_program_args = {extra_program_args}
java_path = {java_path}
if os.getcwd() != self_dir:
offset = os.path.relpath(self_dir, os.getcwd())
fix_path = lambda p: os.path.normpath(os.path.join(offset, p))
classpath = [fix_path(p) for p in classpath]
java_path = fix_path(java_path)
java_cmd = [java_path]
# This is a simple argparser for jvm, jar, and classpath arguments.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--jar-args')
parser.add_argument('--jvm-args')
parser.add_argument('--classpath')
# Test_runner parses the classpath for sharding junit tests.
parser.add_argument('--print-classpath', action='store_true',
help='Prints the classpass. Used by test_runner.')
known_args, unknown_args = parser.parse_known_args(sys.argv[1:])
if known_args.print_classpath:
sys.stdout.write(':'.join(classpath))
sys.exit(0)
if known_args.jvm_args:
jvm_arguments = known_args.jvm_args.strip('"').split()
java_cmd.extend(jvm_arguments)
if known_args.jar_args:
jar_arguments = known_args.jar_args.strip('"').split()
if unknown_args:
raise Exception('There are unknown arguments')
else:
jar_arguments = unknown_args
if known_args.classpath:
classpath += [known_args.classpath]
{extra_flags}
java_cmd.extend(
['-classpath', ':'.join(classpath), '-enableassertions', \"{main_class}\"])
java_cmd.extend(extra_program_args)
java_cmd.extend(jar_arguments)
os.execvp(java_cmd[0], java_cmd)
"""
def main(argv):
argv = build_utils.ExpandFileArgs(argv)
parser = optparse.OptionParser()
parser.add_option('--output', help='Output path for executable script.')
parser.add_option('--main-class',
help='Name of the java class with the "main" entry point.')
parser.add_option('--classpath', action='append', default=[],
help='Classpath for running the jar.')
parser.add_option('--noverify', action='store_true',
help='JVM flag: noverify.')
parser.add_option('--tiered-stop-at-level-one',
action='store_true',
help='JVM flag: -XX:TieredStopAtLevel=1.')
options, extra_program_args = parser.parse_args(argv)
extra_flags = []
if options.noverify:
extra_flags.append('java_cmd.append("-noverify")')
if options.tiered_stop_at_level_one:
extra_flags.append('java_cmd.append("-XX:TieredStopAtLevel=1")')
classpath = []
for cp_arg in options.classpath:
classpath += build_utils.ParseGnList(cp_arg)
run_dir = os.path.dirname(options.output)
classpath = [os.path.relpath(p, run_dir) for p in classpath]
java_path = os.path.relpath(
os.path.join(build_utils.JAVA_HOME, 'bin', 'java'), run_dir)
with build_utils.AtomicOutput(options.output, mode='w') as script:
script.write(
script_template.format(classpath=('"%s"' % '", "'.join(classpath)),
java_path=repr(java_path),
main_class=options.main_class,
extra_program_args=repr(extra_program_args),
extra_flags='\n'.join(extra_flags)))
os.chmod(options.output, 0o750)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| youtube/cobalt | build/android/gyp/create_java_binary_script.py | Python | bsd-3-clause | 4,156 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This script is a simple wrapper which prefixes each i3status line with custom
# information. It is a python reimplementation of:
# http://code.stapelberg.de/git/i3status/tree/contrib/wrapper.pl
#
# To use it, ensure your ~/.i3status.conf contains this line:
# output_format = "i3bar"
# in the 'general' section.
# Then, in your ~/.i3/config, use:
# status_command i3status | ~/i3status/contrib/wrapper.py
# In the 'bar' section.
#
# In its current version it will display the cpu frequency governor, but you
# are free to change it to display whatever you like, see the comment in the
# source code below.
#
# © 2012 Valentin Haenel <valentin.haenel@gmx.de>
#
# This program is free software. It comes without any warranty, to the extent
# permitted by applicable law. You can redistribute it and/or modify it under
# the terms of the Do What The Fuck You Want To Public License (WTFPL), Version
# 2, as published by Sam Hocevar. See http://sam.zoy.org/wtfpl/COPYING for more
# details.
import sys
import subprocess
import json
import re
def print_line(message):
""" Non-buffered printing to stdout. """
sys.stdout.write(message + '\n')
sys.stdout.flush()
def read_line():
""" Interrupted respecting reader for stdin. """
# try reading a line, removing any extra whitespace
try:
line = sys.stdin.readline().strip()
# i3status sends EOF, or an empty line
if not line:
sys.exit(3)
return line
# exit on ctrl-c
except KeyboardInterrupt:
sys.exit()
def cmus():
#get music info from cmus
musicInfo = 'M: '
fileName = ''
artist = ''
title = ''
try:
rawInfo = str(subprocess.check_output(['cmus-remote', '-Q']))
except:
rawInfo = ''
if rawInfo:
for line in rawInfo.split('\n'):
fileGroup = re.search(r'^file (.*?)$', line)
if fileGroup:
location = fileGroup.group(1)
fileName = re.search(r'^/(?:.*?/)*(.*)\..*$', location).group(1)
elif line.startswith('tag title'):
title = line[10:]
elif line.startswith('tag artist'):
artist = line[11:]
#organise into final string
if title or fileName:
if artist:
musicInfo += artist+' - '
musicInfo += title if title else fileName
else:
musicInfo += 'inactive'
else:
musicInfo = ''
return musicInfo
def mpd():
#get music info from cmus
musicString = 'M: '
info = {'file':'', 'artist':'', 'title':''}
#get data from via mpc
try:
for attribute in info.keys():
info[attribute] = subprocess.check_output(['mpc', '-f', '%{}%'.format(attribute), 'current']).decode().strip()
except subprocess.CalledProcessError:
return ''
#organise into final string
if info['file']:
fileRe = re.search(r'^(?:.*?/)*(.*)\..*$', info['file']) #grab filename from relative filepath
if fileRe:
info['file'] = fileRe.group(1)
if info['artist']:
musicString += info['artist'] + ' - '
musicString += info['title'] if info['title'] else info['file']
else:
musicString += 'Inactive'
return musicString
def stackTodo():
scriptPath='/home/rubic/Scripts/noteKeeping/stackTodo.py'
item = subprocess.check_output([scriptPath, 'quiet']).decode().strip()
if item:
return "ST: {}".format(item)
else:
return ""
if __name__ == '__main__':
# Skip the first line which contains the version header.
print_line(read_line())
# The second line contains the start of the infinite array.
print_line(read_line())
while True:
line, prefix = read_line(), ''
# ignore comma at start of lines
if line.startswith(','):
line, prefix = line[1:], ','
j = json.loads(line)
# insert information into the start of the json, but could be anywhere
musicString = mpd()
#add to json structure
if musicString:
j.insert(0, {'full_text' : musicString, 'name' : 'music'})
todoString = stackTodo()
if todoString:
j.insert(0, {'full_text' : todoString, 'name' : 'todo'})
# and echo back new encoded json
print_line(prefix+json.dumps(j))
| rukai/dotfiles | .i3/i3bar/wrapper.py | Python | gpl-3.0 | 4,450 |
#! /usr/bin/env python
"""
This programs performs a geometry optimization of the cation of a closed-shell
molecule with cp2k using generic settings: DFT/PBE.
Note that is mandatory to define a cell_parameter, and a xyz structure.
If you have a restart file, a basis set and you can also define
it in the command line.
It assumes that the basis and pot files are in $HOME/cp2k_basis
folder in your home, which can be changed)
It assumes a DZVP by default, which can be also changed
It is always advised to submit the script using a JOB Manager like Slurm
"""
from qmflows import (cp2k, run, templates)
from scm.plams import Molecule
import argparse
from os.path import join
import os
def main(file_xyz, cell, restart, basis, basis_folder):
# Define which systems need to be calculated
system = Molecule(file_xyz)
# Set path for basis set
basisCP2K = join(basis_folder, "BASIS_MOLOPT")
potCP2K = join(basis_folder, "GTH_POTENTIALS")
# Settings specifics
s = templates.geometry
s.basis = basis
s.potential = "GTH-PBE"
s.cell_parameters = cell
s.specific.cp2k.force_eval.dft.basis_set_file_name = basisCP2K
s.specific.cp2k.force_eval.dft.potential_file_name = potCP2K
s.specific.cp2k.force_eval.dft.uks = ''
s.specific.cp2k.force_eval.dft.charge = '1'
s.specific.cp2k.force_eval.dft.multiplicity = '2'
s.specific.cp2k.force_eval.dft.wfn_restart_file_name = f'{restart}'
# =======================
# Compute OPT files with CP2k
# =======================
result = run(cp2k(s, system))
# ======================
# Output the results
# ======================
print(result.energy)
def read_cmd_line(parser):
"""
Parse Command line options.
"""
args = parser.parse_args()
attributes = ['xyz', 'cell', 'restart', 'basis', 'bas_fold']
return [getattr(args, p) for p in attributes]
# ============<>===============
if __name__ == "__main__":
msg = "plot_decho -xyz <path/to/xyz> -cell <cell_size>\
-restart <path/to/restart_file_name>\
-basis <nameOfBasisSet>\
-bas_fold <path/to/basis_set>"
home = os.path.expanduser('~')
parser = argparse.ArgumentParser(description=msg)
parser.add_argument(
'-xyz', required=True, help='path to the xyz file')
parser.add_argument(
'-cell', required=True, help='Size of the cell')
parser.add_argument(
'-restart', type=str, default='', help='path to restart file name')
parser.add_argument(
'-basis', type=str, default='DZVP-MOLOPT-SR-GTH', help='Basis-set name')
parser.add_argument(
'-bas_fold', type=str, default=join(home, 'cp2k_basis'),
help='Location of basis set files')
main(*read_cmd_line(parser))
| felipeZ/nonAdiabaticCoupling | scripts/qmflows/opt_cation_cp2k.py | Python | mit | 2,765 |
#!/usr/bin/env python
import os
from skimage._build import cython
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('feature', parent_package, top_path)
config.add_data_dir('tests')
cython(['corner_cy.pyx'], working_path=base_path)
cython(['censure_cy.pyx'], working_path=base_path)
cython(['orb_cy.pyx'], working_path=base_path)
cython(['brief_cy.pyx'], working_path=base_path)
cython(['_texture.pyx'], working_path=base_path)
cython(['_hessian_det_appx.pyx'], working_path=base_path)
cython(['_hoghistogram.pyx'], working_path=base_path)
cython(['_haar.pyx'], working_path=base_path)
config.add_extension('corner_cy', sources=['corner_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('censure_cy', sources=['censure_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('orb_cy', sources=['orb_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('brief_cy', sources=['brief_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_texture', sources=['_texture.c'],
include_dirs=[get_numpy_include_dirs(), '../_shared'])
config.add_extension('_hessian_det_appx', sources=['_hessian_det_appx.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_hoghistogram', sources=['_hoghistogram.c'],
include_dirs=[get_numpy_include_dirs(), '../_shared'])
config.add_extension('_haar', sources=['_haar.cpp'],
include_dirs=[get_numpy_include_dirs(), '../_shared'],
language="c++")
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image Developers',
author='scikit-image Developers',
maintainer_email='scikit-image@python.org',
description='Features',
url='https://github.com/scikit-image/scikit-image',
license='SciPy License (BSD Style)',
**(configuration(top_path='').todict())
)
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/feature/setup.py | Python | gpl-3.0 | 2,360 |
import sys
from ariba import versions
def run(options):
extern_progs, report_lines = versions.get_all_versions(raise_error=False)
print(*report_lines, sep='\n')
| martinghunt/ariba | ariba/tasks/version.py | Python | gpl-3.0 | 170 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
MetaMaker is a basic tool for simulating metagenomic datasets. It downloads a
number of genomes, splits them into reads and creates a fastq output file
simulating a sequencing run.
"""
import os
import sys
import time
import json
import numpy
import random
import logging
import threading
import curses.ascii
from Bio import Entrez, SeqIO
# Please add your own e-mail address here!
# It makes the people at Entrez super happy!
Entrez.email = "MetaMaker@slu.se"
class MetaMaker( threading.Thread ):
"""
Viral Metagenomic dataset simulator.
This module can be used to simulate metagenomic datasets from sequencing profiles,
as well as create sequencing profiles from sequence data.
"""
def __init__(self, outfile = "output", num_genomes = 10, log = None, log_level = logging.INFO, profile_dir = "profiles"):
"""
Reads arguments and sets default settings.
"""
threading.Thread.__init__(self)
self.num_genomes = num_genomes
self.outfile = outfile if outfile.endswith("fastq") else "%s.fastq" % outfile
self.keyfile = outfile if outfile.endswith("key") else "%s.key" % outfile
self.taxa = 'viruses'
self.reads = 1000
self.read_length = 200
self.length_var = 0
self.quality_mean = [25]
self.quality_var = [10]
self.distribution = 'uniform'
self.progress = False
self.profile_dir = profile_dir
self.matepair = True
self.insert_size = 500
self.log = log if log else logging.getLogger( __name__ )
self.log.setLevel( log_level )
if not log:
self.log_handler = logging.StreamHandler()
self.log_handler.setLevel( log_level )
self.log_handler.setFormatter( logging.Formatter( '%(asctime)s %(levelname)s: %(message)s', "%H:%M:%S" ) )
self.log.addHandler(self.log_handler)
self.quality_cache = []
self.variance_cache = []
self._progress = 0
self._stop = threading.Event()
self.running = False
def _get_tax_id(self, data):
"""
Attempts to get taxonomic id from NCBI, given some data.
"""
organism = data['Organism_Name']
for retries in xrange(5):
try:
search = Entrez.read(Entrez.esearch('taxonomy', organism))
data = Entrez.read(Entrez.esummary(db='taxonomy',
id=search['IdList'][0]))
if data[0]['ScientificName'] != organism:
raise Exception("Something went wrong in the search!")
return data[0]['TaxId']
except Exception as e:
pass
return None
def _list_ncbi(self, max = 10000):
"""
Lists (searches) NCBI entries for the specified taxa.
"""
self.log.info('Getting list of %s from NCBI' % self.taxa)
term = "%s[Organism]" % self.taxa
handle = Entrez.esearch("genome", term = term, retmax = max)
results = Entrez.read(handle)
self.log.info(' + Found %i %s' % (len(results['IdList']), self.taxa))
return results['IdList']
def _list(self):
"""
Wrapper function in case more sources are added.
"""
id_list = []
id_list += self._list_ncbi()
return id_list
def _make_dataset(self):
"""
Creates the metadata for the project.
"""
dataset = []
avg_reads = self.reads/self.num_genomes
if self.distribution.lower() == 'exponential':
n = self.reads**(1.0/(self.num_genomes-1))
ids = self._list()
last = 0
i = 0
self.log.info("Making dataset")
while i < self.num_genomes:
if self._stop.isSet():
break
if ids:
genome_id = random.choice(ids)
ids.remove(genome_id)
else:
raise Exception('Not enough genomes.')
new = True
for prev in dataset:
if prev['genome_id'] == genome_id:
new = False
break
if not new:
continue
summary = Entrez.read(Entrez.esummary(db="genome", id=genome_id))[0]
self.log.debug(" + Trying: %s" % summary['Organism_Name'])
# Get a taxonomy id if we're printing a key-file
tax_id = self._get_tax_id(summary)
if not tax_id:
self.log.debug(" - Failed: no tax id")
continue
# Make sure we have a nucleotide id to download the data later.
nuc_id = None
search_term = "%s[Organism] complete genome" % summary['Organism_Name']
try:
project_data = Entrez.read(Entrez.esearch("nucleotide", search_term))
nuc_id = project_data['IdList'][0]
except Exception as e:
self.log.debug(" - Failed: no nuc id")
continue
self.log.info(" * Added %s to dataset" % summary['Organism_Name'])
data = {'genome_id':genome_id, 'def':summary['DefLine'],
'organism':summary['Organism_Name'],
'project':summary['ProjectID'],
'nuc_id':nuc_id,
'tax_id':tax_id}
if self.distribution.lower() == 'uniform':
data['reads'] = avg_reads
elif self.distribution.lower() == 'exponential':
data['reads'] = max(1, int(round(n**i - last)))
last += data['reads']
else:
self.log.warning("WARNING: couldn't understand distribution '%s', Defaulting to: Uniform" % \
self.distribution)
data['reads'] = avg_reads
dataset += [data]
i += 1
return dataset
def _make_read(self, seq):
"""
Extracts a single, or mate-paired read from a sequence, and returns the
read sequence as well as the position metadata.
"""
length = int(self.read_length)
stdev = numpy.sqrt(self.length_var)
read_length = length + int(numpy.random.normal(0, stdev)) if stdev else length
if self.matepair:
mate_length = length + int(numpy.random.normal(0, stdev)) if stdev else length
min_length = max(read_length, mate_length, self.insert_size)
else:
min_length = read_length
start = random.randint(0, max(0, len(seq)-min_length))
read_pos = (start, start + read_length)
read_seq = seq[read_pos[0]:read_pos[1]]
read_qual = self._make_quality(read_seq)
output = [(read_seq, read_pos, read_qual)]
if self.matepair:
mate_start = start + min_length - mate_length
mate_pos = (mate_start, mate_start+mate_length)
mate_seq = seq[mate_pos[0]:mate_pos[1]]
mate_qual = self._make_quality(mate_seq)
output += [(mate_seq, mate_pos, mate_qual)]
return output
def _make_quality(self, seq):
"""
Simulates read quality from an error function.
Qualities are in Sanger Fastq format (Phred+33), i.e. quality is
represented by an integer from 0 to 93, represented by the ascii
characters 33-126.
Errors are represented as 10^-0.0 (random base) to 10^-9.3 (super
accurate).
ref: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2847217/?tool=pubmed
This might be re-written in the future using Biopythons QualityIO,
http://www.biopython.org/DIST/docs/api/Bio.SeqIO.QualityIO-module.html
"""
output = ""
for i, q in enumerate(seq):
if len(self.quality_cache) <= i:
f = numpy.poly1d(self.quality_mean)
self.quality_cache += [f(len(self.quality_cache))]
if len(self.variance_cache) <= i:
v = numpy.poly1d(self.quality_var)
self.variance_cache += [v(len(self.variance_cache))]
quality = self.quality_cache[i]
var = numpy.random.normal(0, numpy.sqrt(self.variance_cache[i]))
if not numpy.isnan(var):
quality += var
quality = min(93, max(int(quality), 0))
output += "%c" % (33+quality)
return output
def _write_csv(self, dataset, separator = ','):
"""
Writes a csv file
"""
self.log.info('Creating Key file')
header = ['Genome ID', 'Tax ID', 'Definition', 'Organism', 'No. Reads']
with open(self.keyfile, 'w') as key:
key.write( "%s\n" % (separator.join(header)) )
for i in dataset:
data = [i['genome_id'], i['tax_id'], i['def'],
i['organism'], i['reads']]
key.write( "%s\n" % (separator.join(map(str,data))) )
def load_profile(self, profile_name):
"""
Loads the run values from a MetaMaker profile into the system.
"""
profiles = self.get_profiles(None, self.profile_dir)
if profile_name in profiles:
profile = profiles[profile_name]
self.reads = profile['default_reads']
self.read_length = profile['read_length_mean']
self.length_var = profile['read_length_var']
self.quality_mean = profile['quality_mean']
self.quality_var = profile['quality_var']
self.log.info("Using profile '%s'" % profile_name)
self.log.info(" + Number of reads: %.1e" % self.reads)
self.log.info(" + Read length : %i±%i Bp" % (self.read_length, numpy.sqrt(self.length_var),))
else:
self.log.warning("Unknown profile '%s', ignoring." % profile_name)
def progress(self):
"""
Returns the progress of the current action.
"""
return self._progress
@staticmethod
def get_profiles(return_format = None, profile_dir = 'profiles'):
"""
Returns a list of allowed sequencing profiles.
"""
profiles = {}
try:
for profile in os.listdir(profile_dir):
if profile.split('.')[-1].lower() == 'json' and profile[0] != '.':
data = json.load(open("%s/%s" % (profile_dir, profile)))
profiles[data['key']] = data
except:
return profiles
if return_format == "human":
keys = profiles.keys()
if len(profiles) < 2:
return keys[0]
return ", ".join(keys[:-1]) + " or " + keys[-1]
if return_format == "keys":
return profiles.keys()
return profiles
@staticmethod
def parse_profile(infiles, output = None, profile_dir = 'profiles'):
min_length = 1e12
max_length = 0
length_mean = 0
length_var = 0
min_qual = None
max_qual = None
qual_mean = None
qual_var = 0
count = []
for infile in infiles:
for i, record in enumerate(SeqIO.parse(infile, 'fastq')):
qual = numpy.array(record.letter_annotations['phred_quality'],
float)
length_mean += len(record.seq)
length_var += len(record.seq)**2
if len(record.seq) < min_length:
min_length = len(record.seq)
if len(record.seq) > max_length:
max_length = len(record.seq)
if qual_mean == None:
qual_mean = numpy.array(qual)
max_qual = qual
min_qual = qual
qual_var = qual**2
else:
for p, q in enumerate(qual):
if p >= len(qual_mean):
qual_mean = numpy.append(qual_mean, q)
max_qual = numpy.append(max_qual, q)
min_qual = numpy.append(min_qual, q)
qual_var = numpy.append(qual_var, q**2)
else:
qual_mean[p] += q
qual_var[p] += q**2
if q > max_qual[p]:
max_qual[p] = q
if q < min_qual[p]:
min_qual[p] = q
# counter to keep track of how many values are stored for each
# nucleotide position
for p, q in enumerate(qual):
if p >= len(count):
count = numpy.append(count, 1.0)
else:
count[p] += 1.0
# convenience variables
tot = float(count[0]) # total number of reads
mean_reads = round(tot / len(infiles))
length_mean = length_mean / tot
length_var = length_var / tot - length_mean**2
qual_mean = qual_mean / count
qual_var = qual_var / count - qual_mean**2
if not output:
output = infiles[0].split('/')[-1].split('.')[0]
# least squares approximation coefficients
m = numpy.polyfit(range(1,len(qual_mean)+1), qual_mean, 4)
v = numpy.polyfit(range(1,len(qual_var )+1), qual_var, 2)
# Save profile
profile = {"key": output,
"default_reads": mean_reads,
"read_length_mean": length_mean,
"read_length_var": length_var,
"quality_mean": list(m),
"quality_var": list(v),
}
if not output.endswith(".json"):
output = "%s.json" % output
with open("%s/%s" % (profile_dir, output), 'w') as out:
out.write(json.dumps(profile, indent=True))
def run(self):
"""
Starts the job of creating a metagenomic sample set.
"""
try:
if self.running:
self.log.error('Already running MetaMaker, can\'t start again.')
self.log.info("Running MetaMaker")
self.running = True
if self.matepair:
base = ".".join(self.outfile.split('.')[:-1])
self.log.info('output: %s.1.fastq & %s.2.fastq, key-file: %s' %
(base, base, self.keyfile) )
else:
self.log.info('output: %s, key-file: %s' %
(self.outfile, self.keyfile) )
dataset = self._make_dataset()
# Print debug information about the dataset
self.log.debug('DATASET:')
tot_reads = 0
for i in dataset:
self.log.debug("%i\t%s" % (i['reads'], i['def']))
tot_reads += i['reads']
self.log.debug("TOTAL READS: %i" % tot_reads)
# Create the key file
if self.keyfile:
self._write_csv(dataset)
# Start creating the fastq output file.
if self.matepair:
base = ".".join(self.outfile.split('.')[:-1])
out = open("%s.1.fastq" % base, 'w')
mate = open("%s.2.fastq" % base, 'w')
else:
out = open(self.outfile, 'w')
for metadata in dataset:
if self._stop.isSet():
break
self._progress = 0.0
self.log.info("* Parsing %s" % metadata['def'])
self.log.info(" * Downloading")
for tries in xrange(5):
if self._stop.isSet():
break
try:
data = Entrez.efetch(db="nucleotide", id=metadata['nuc_id'],
rettype="gb", retmode="text")
break
except Exception as e:
self.log.warning(e)
self.log.info(project_data)
self.log.info(" * Retrying")
pass
self.log.info(" * Creating Reads" )
for record in SeqIO.parse(data,"gb"):
if self._stop.isSet():
break
# TODO: make use of several records if present
for i in xrange(int(metadata['reads'])):
if self._stop.isSet():
break
seqs = []
while not seqs:
if self._stop.isSet():
break
seqs = self._make_read(record.seq)
# apply quality to read(s)
first = True
for seq, pos, quality in seqs:
if self._stop.isSet():
break
seq = list(seq)
for j, q in enumerate(quality):
if numpy.random.random() < (10**-((ord(q)-33)/10.0)):
seq[j] = 'actg'[numpy.random.randint(4)]
seq = "".join(seq)
header = "@%s|ref:%s-%i|pos:%i-%i" % (record.id,
metadata['genome_id'],
i, pos[0], pos[1])
if self.matepair:
if first:
out.write("%s/1\n" % header)
out.write("%s\n" % seq)
out.write("+\n%s\n" % quality)
else:
mate.write("%s/2\n" % header)
mate.write("%s\n" % seq)
mate.write("+\n%s\n" % quality)
else:
out.write("%s\n" % header)
out.write("%s\n" % seq)
out.write("+\n%s\n" % quality)
first = False
self._progress = (i+1)/float(int(metadata['reads']))
break
out.close()
if self.matepair:
mate.close()
self._progress = -1
self.log.info("Finished. All went well!")
if self.matepair:
base = ".".join(self.outfile.split('.')[:-1])
self.log.info("Results saved to %s.1.fastq & %s.2.fastq" %
(base, base))
else:
self.log.info("Results saved to %s" % self.outfile)
except RuntimeError as e:
pass
except Exception as e:
self.log.error(e)
self.running = False
def set(self, key, value):
"""
Sets a value in the settings.
"""
if getattr(self, key, None) != None:
if key == 'keyfile' and value and not value.endswith('.csv'):
value = "%s.csv" % value
setattr(self, key, value)
else:
raise Exception("Unknown key '%s'." % key)
def set_log(self, name, level=logging.INFO, handler = None):
"""
Sets up logging using the given log name and level.
"""
self.log = logging.getLogger( name )
if handler:
handler.setLevel( level )
self.log.addHandler( handler )
def stop(self):
self._stop.set()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser( description = __doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-c", "--create", help="Create new profile from file(s).", nargs="+", default=None)
parser.add_argument("-d", "--distribution", help="Read distribution, 'uniform' or 'exponential'", default="uniform")
parser.add_argument("-i", "--insert", help="Matepair insert size.", type=int, default=3000)
parser.add_argument("-k", "--keyfile", help="key filename.", default=None)
parser.add_argument("-l", "--length_var", help="Length variance.", default=0.0, type=float)
parser.add_argument("-o", "--output", help="Output filename", default="output")
parser.add_argument("-p", "--progress", default=False, action='store_true', help="Display progress information for long tasks.")
parser.add_argument("-m", "--matepair", help="Generate matepairs.", action="store_true", default=False)
parser.add_argument("-n", "--no_reads", help="Number of reads.", default="50M")
parser.add_argument("-r", "--read_length", help="Read length", default="200")
parser.add_argument("-s", "--no_species", help="Number of species.", default=10, type=int)
parser.add_argument("-f", "--profile", default=None,
help=("Sequencing profile to use for read generation. Changes default for "
"reads, read_length and error_function. Valid options are %s") % \
MetaMaker.get_profiles('human'))
parser.add_argument("-x", "--taxa", default="viruses", help=("Taxonomic identifier of the species to download."))
funcs = parser.add_argument_group("quality function arguments", "Factors for the quality and variance functions")
funcs.add_argument("-a", "--error_variance", nargs="+", type=float, default = [0],
help=("Factors for the error variance approximation equation.") )
funcs.add_argument("-e", "--error_function", nargs="+", type=float, default = [25.0],
help="Factors for the error approximation equation.")
parser.add_argument("-v", "--verbose", action = "count", default = 0, help="Increase output Verbosity")
parser.add_argument("-q", "--quiet", action = "count", default = 0, help="Decrease output Verbosity")
args = parser.parse_args()
for arg in ['no_reads', 'read_length']:
if eval("args.%s" % arg)[-1] in ['K', 'k']:
exec("args.%s = int(args.%s[:-1])*1000" % (arg, arg))
elif eval("args.%s" % arg)[-1] in ['M', 'm']:
exec("args.%s = int(args.%s[:-1])*1000000" % (arg, arg))
elif eval("args.%s" % arg)[-1] in ['G', 'g']:
exec("args.%s = int(args.%s[:-1])*1000000000" % (arg, arg))
else:
exec("args.%s = int(args.%s)" % (arg, arg))
level = 50-(2+args.verbose-args.quiet)*10
level = 10 if level < 10 else level
log = logging.getLogger( "MetaMaker" )
log.setLevel( level )
formatter = logging.Formatter( ('%(asctime)s | %(name)s '
'%(levelname)s: %(message)s') )
console_handler = logging.StreamHandler()
console_handler.setLevel( level )
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
app = MetaMaker( args.output, args.no_species )
if args.create:
app.parse_profile(args.create, args.output)
else:
if args.profile:
app.load_profile( args.profile )
app.set('keyfile', args.keyfile)
app.set('taxa', args.taxa)
app.set('reads', args.no_reads)
app.set('read_length', args.read_length)
app.set('length_var', args.length_var)
app.set('quality_mean', args.error_function)
app.set('distribution', args.distribution)
app.set('matepair', args.matepair)
app.set('insert_size', args.insert)
app.set('progress', args.progress)
app.run() | norling/metlab | metlab/metamaker.py | Python | gpl-3.0 | 24,644 |
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_spark_job
MODULE = 'kfp_component.google.dataproc._submit_spark_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitSparkJob(unittest.TestCase):
def test_submit_spark_job_with_expected_payload(self, mock_submit_job):
submit_spark_job('mock-project', 'mock-region', 'mock-cluster',
job_id_output_path='/tmp/kfp/output/dataproc/job_id.txt',
main_jar_file_uri='gs://mock/jar/file.jar',
args=['arg1', 'arg2'],
spark_job={ 'jarFileUris': ['gs://other/jar/file.jar'] },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'sparkJob': {
'mainJarFileUri': 'gs://mock/jar/file.jar',
'args': ['arg1', 'arg2'],
'jarFileUris': ['gs://other/jar/file.jar']
},
'labels': {
'key1': 'value1'
}
}, 30, job_id_output_path='/tmp/kfp/output/dataproc/job_id.txt') | kubeflow/pipelines | components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_spark_job.py | Python | apache-2.0 | 1,734 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('photoplaces_web', '0003_photocluster_normalized_centers_dirty'),
]
operations = [
migrations.AddField(
model_name='normalizedphotoset',
name='hour_mean_natural',
field=models.FloatField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalizedphotoset',
name='month_mean_natural',
field=models.FloatField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='photocluster',
name='normalized_set',
field=models.OneToOneField(related_name='+', null=True, blank=True, to='photoplaces_web.NormalizedPhotoSet'),
preserve_default=True,
),
]
| joonamo/photoplaces | photoplaces/photoplaces_web/migrations/0004_auto_20141105_1236.py | Python | mit | 984 |
import tempest_parser.manager.structs as structs
import os
import time
from copy import deepcopy
pkg_dir = os.path.dirname(__file__)
pkg_dir = os.path.join(pkg_dir, os.pardir)
pkg_dir = os.path.normpath(pkg_dir)
class TestsManager:
def __init__(self):
# structure
self.tests_list = deepcopy(structs.tests_template)
self.required_execution_name = "required"
def add_required(self, all_tests_filepath, path=None, use_raw_names=False):
# on init we should either load the full set of tests
# ... or load supplied ones
_tests_list_filename = os.path.join(pkg_dir, "res", all_tests_filepath)
_unixtime = os.path.getctime(_tests_list_filename)
_date = time.strftime("%d/%m/%Y %H:%M", time.gmtime(_unixtime))
if path is not None and not os.path.isfile(path):
# if this is a folder, load files into sections
self.tests_list["tests"] = self._load_from_folder(path)
elif path is not None and os.path.isfile(path):
# if file, load contents to just one section
self.tests_list["tests"] = self._all_tests_file_preload(path)
elif path is None:
# if there is no tests supplied, use save all tests set
# self.all_tests_list = read_file_as_lines(path)
self.tests_list["tests"] = self._all_tests_file_preload(
_tests_list_filename,
use_raw_names=use_raw_names
)
self.add_execution(
dict(
execution_name=self.required_execution_name,
execution_date=_date,
summary=dict(time="0s")
),
unixtime=_unixtime
)
def _load_from_folder(self, folder):
_tests = {}
_folder_content = os.listdir(folder)
for _file in _folder_content:
_tests_in_file = {}
# check extension
if _file.endswith(".list"):
_tests_in_file = self._all_tests_file_preload(
os.path.join(
folder,
_file
)
)
_tests = dict(_tests.items() + _tests_in_file.items())
return _tests
# In case we'll need to list all of the tests in tempest
# and mark which ones was executed, we have list of all tests
# It produced by ./tempest run --list-tests >all_tests_tag_<N>.list
def _all_tests_file_preload(self, resource_file, use_raw_names=False):
_tests = {}
# load all tests file
with open(resource_file) as tests_file:
for line in tests_file:
_class_name, _test_name, _uuid, _test_options, _tags = \
self.split_test_name(
line.replace("\n", ""),
raw_names=use_raw_names
)
self._test_item = deepcopy(structs.template_test_item)
self._test_item["test_name"] = _test_name
self._test_item["uuid"] = _uuid
self._test_item["results"][
self.required_execution_name] = dict(result="R", time='0s')
self._test_item["test_options"] = _test_options
self._test_item["tags"] = _tags
if _class_name not in _tests:
_tests[_class_name] = []
_tests[_class_name].append(self._test_item)
return _tests
@staticmethod
def split_test_name(full_test_name, raw_names=False):
def _dig_guid(raw_trailing):
_all_items = raw_trailing.split(']')[0].split(",")
__guid = ""
__tags = []
for _tag in _all_items:
if _tag.startswith("id-"):
__guid = _tag
else:
__tags.append(_tag)
return __guid, _tags
def _dig_options(raw_options):
__options = raw_options.split(']')[1:]
if len(__options) >= 2:
if len(__options[1]) > 0:
return __options[1]
return "".join(__options)
_first_name = full_test_name.split('.', 1)[0]
_class = ""
_test = ""
_guid = ""
_tags = []
_options = ""
if full_test_name.startswith("setUpClass") or \
full_test_name.startswith("tearDownClass"):
_class = full_test_name.split("(")[1][:-1]
elif _first_name.startswith("unittest2"):
# parse unittest fail
_name = full_test_name.split(".", 3)[3].rsplit(".", 1)
_tmp = _name[1]
_class = _name[0]
_test = _tmp.split('[')[0]
if ']' in _tmp:
_guid = _dig_guid(_tmp)
_options = _dig_options(_tmp)
elif _first_name.startswith("tempest") or \
_first_name.startswith("cvp_checks") or \
_first_name.endswith("_tempest_plugin") or \
_first_name.endswith("_tempest_tests"):
_class = full_test_name.rsplit(".", 1)[0]
_raw_test = full_test_name.rsplit(".", 1)[1]
if not raw_names:
_test = _raw_test.split('[')[0]
if '[' in _raw_test:
_trailing = _raw_test.split('[')[1]
_guid, _tags = _dig_guid(_trailing)
_options = _dig_options(_trailing)
else:
_test = _raw_test
return _class, _test, _guid, _options, _tags
@staticmethod
def split_test_name_from_speed(full_test_name):
_class = full_test_name.rsplit(".", 2)[0]
_test = full_test_name.rsplit(".", 2)[1].split('[')[0]
_tmp = full_test_name.split(" ")[0].rsplit(".", 1)[1].split(']')
_options = ""
if _tmp.__len__() >= 2:
if _tmp[1].__len__() > 0:
_options = _tmp[1]
return (
_class,
_test,
_options
)
def test_name_lookup(self, class_name, test_name, uuid, test_options):
_index = -1
_tests = self.tests_list["tests"]
if class_name in _tests:
for _test_index in range(0, _tests[class_name].__len__()):
_indexed_test = _tests[class_name][_test_index]
if _indexed_test["test_name"] == test_name \
and _indexed_test["test_options"] == test_options:
if uuid == '' or _indexed_test["uuid"] == '':
_index = _test_index
break
elif _indexed_test["uuid"] == uuid:
_index = _test_index
break
return _index
def test_name_lookup_bare(self, class_name, test_name):
_index = -1
_tests = self.tests_list["tests"]
if class_name in _tests:
for _test_index in range(0, _tests[class_name].__len__()):
if _tests[class_name][_test_index]["test_name"] == test_name:
_index = _test_index
break
return _index
def partial_class_name_lookup(self, class_name_short, test_name,
uuid=None, test_options=None):
_list = []
_class_names = self.tests_list["tests"].keys()
for _class_name in _class_names:
if _class_name.endswith(class_name_short):
_index = self.test_name_lookup(_class_name, test_name,
uuid, test_options)
if _index > -1:
_full_class_name = _class_name
_list.append(_full_class_name)
if _list.__len__() > 0:
return _list[0]
else:
return None
def add_execution(self, execution, unixtime=None):
# time = float(_execution["summary"]["time"][:-1])
_date = execution["execution_date"]
_name = execution["execution_name"]
self.tests_list["executions"][_name] = [_date, unixtime]
def mark_slowest_test_in_execution_by_name(self, execution_name,
class_name, test_name,
uuid=None,
test_options=None):
_index = self.test_name_lookup(class_name, test_name, uuid,
test_options)
if _index > -1:
# mark slowest tests
self.tests_list["tests"][class_name][_index]["results"][
execution_name]["slowest"] = True
else:
print("""
WARNING: Parsed slowest test not found in list: {0}, {1}, {2}
""".format(execution_name, class_name, test_name))
def add_fail_data_for_test(self, execution_name, class_name, test_name,
test_options, trace, message,
class_name_short=False, uuid=None):
if class_name == "setUpClass" or class_name == "tearDownClass":
# if this is a setUpClass situation,
# mark all tests with this result
_tests = self.tests_list["tests"]
if test_name in _tests:
for _test_index in range(0, _tests[test_name].__len__()):
_tests[test_name][_test_index]["results"][execution_name][
"trace"] = trace
_tests[test_name][_test_index]["results"][execution_name][
"message"] = message
break
else:
# lookup test in the list
if class_name_short:
_full_class_name = self.partial_class_name_lookup(class_name,
test_name)
if _full_class_name is None:
_full_class_name = class_name
else:
_full_class_name = class_name
_index = self.test_name_lookup(
_full_class_name,
test_name,
uuid,
test_options
)
if _index > -1:
# this matches one already in the list, copy
self.tests_list["tests"][_full_class_name][_index]["results"][
execution_name]["trace"] = trace
self.tests_list["tests"][_full_class_name][_index]["results"][
execution_name]["message"] = message
else:
print("""
WARNING: Test NOT found: {0}, {1}
for message: {2}
""".format(_full_class_name, test_name, message))
def add_result_for_test(self, execution_name, class_name, test_name, uuid,
test_options, result, running_time,
message='', trace='', tags=list(),
class_name_short=False, test_name_bare=False):
_result = deepcopy(structs.template_test_result)
_result["result"] = result
_result["time"] = running_time
_result["message"] = message
_result["trace"] = trace
if class_name == "setUpClass" or class_name == "tearDownClass":
# if this is a setUpClass situation,
# mark all tests with this result
_class_name = test_name
_tests = self.tests_list["tests"]
if test_name in _tests:
for _test_index in range(0, len(_tests[_class_name])):
_result["setup_fail"] = True
_tests[_class_name][_test_index]["results"][
execution_name] = _result
break
else:
# if this is a normal class and test name -> look it up
# lookup test in the list
if class_name_short:
_full_class_name = self.partial_class_name_lookup(class_name,
test_name)
if _full_class_name is None:
_full_class_name = class_name
else:
_full_class_name = class_name
if test_name_bare:
_index = self.test_name_lookup_bare(
_full_class_name,
test_name
)
else:
_index = self.test_name_lookup(
_full_class_name,
test_name,
uuid,
test_options
)
if _index > -1:
# this matches one already in the list, copy
_rs = self.tests_list["tests"][_full_class_name][_index][
"results"]
# if the execution is current, then
# save message from existing execution
if execution_name in _rs:
# build old message
_r = "{}: {}\n".format(
self.tests_list["tests"][_full_class_name][_index][
"results"][execution_name]["result"],
self.tests_list["tests"][_full_class_name][_index][
"results"][execution_name]["message"]
)
# cascade messages: old, new
_result["message"] = "{}\n{}: {}\n".format(
_r,
_result["result"],
_result["message"]
)
self.tests_list["tests"][_full_class_name][_index]["results"][
execution_name] = _result
else:
# the test is not there, add it
_test_item = deepcopy(structs.template_test_item)
_test_item["test_name"] = test_name
_test_item["tags"] = tags
_test_item["results"][execution_name] = _result
if _full_class_name not in self.tests_list["tests"]:
# there is no class name key, add it
self.tests_list["tests"][_full_class_name] = []
self.tests_list["tests"][_full_class_name].append(_test_item)
def get_tests_for_class(self, class_name):
if class_name in self.tests_list["tests"]:
return self.tests_list["tests"][class_name]
else:
return []
def get_tests_list(self):
return self.tests_list
def is_class_has_errors(self, class_name):
if class_name in self.tests_list["tests"]:
for test in self.tests_list["tests"][class_name]:
_executions = test["results"].keys()
for _execution in _executions:
if test["results"][_execution]["result"] == "FAIL":
return True
else:
return False
def is_test_has_errors(self, class_name):
if class_name in self.tests_list["tests"]:
for test in self.tests_list["tests"][class_name]:
_executions = test["results"].keys()
for _execution in _executions:
if test["results"][_execution]["result"] == "FAIL":
return True
else:
return False
def get_executions(self):
return self.tests_list["executions"].keys()
def get_test_classes(self):
return self.tests_list["tests"].keys()
def get_time_for_class(self, class_name):
_time_str = ""
_executions = self.tests_list["executions"].keys()
if class_name in self.tests_list["tests"]:
for _execution in _executions:
running_time = 0
for test in self.tests_list["tests"][class_name]:
if _execution in test["results"]:
if test["results"][_execution]["time"].__len__() > 0:
running_time += float(
test["results"][_execution]["time"][:-1])
_time_str += "{0}s ".format(running_time)
return _time_str
def get_totals_as_string_for_class(self, class_name):
_totals_str = ""
_executions = self.tests_list["executions"].keys()
if class_name in self.tests_list["tests"]:
for _execution in _executions:
total = 0
fail = 0
for test in self.tests_list["tests"][class_name]:
if _execution in test["results"]:
if test["results"][_execution]["result"] == "FAIL":
fail += 1
total += 1
_totals_str += "{0}/{1}, ".format(total, fail)
return _totals_str
def get_summary_for_execution(self, execution_name):
# calculate summary
running_time = 0
total = 0
ok = 0
fail = 0
skip = 0
_classes = self.tests_list["tests"].keys()
for _class in _classes:
for test in self.tests_list["tests"][_class]:
if execution_name in test["results"]:
total += 1
if test["results"][execution_name]["time"].__len__() > 0:
running_time += float(
test["results"][execution_name]["time"][:-1])
if test["results"][execution_name]["result"] == "OK":
ok += 1
elif test["results"][execution_name]["result"] == "FAIL":
fail += 1
elif test["results"][execution_name]["result"] == "SKIP":
skip += 1
return running_time, total, ok, fail, skip
def print_summary_for_execution(self, _execution_name):
# throw a quick summary
running_time, total, ok, fail, skip = self.get_summary_for_execution(
_execution_name)
print(
"Tempest testrun {0}:"
" {1} executed: {2} passed, {3} failed, {4} skipped\n".format(
_execution_name,
total,
ok,
fail,
skip
)
)
| osavatieiev/tempest-parser | tempest_parser/manager/test_manager.py | Python | apache-2.0 | 18,270 |
import numpy as np
import pytest
from pandas import DataFrame
@pytest.fixture
def int_frame_const_col():
"""
Fixture for DataFrame of ints which are constant per column
Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3]
"""
df = DataFrame(
np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1,
columns=["A", "B", "C"],
)
return df
| rs2/pandas | pandas/tests/apply/conftest.py | Python | bsd-3-clause | 399 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.integrator.FixPositions
**********************************
.. function:: espressopp.integrator.FixPositions(system, particleGroup, fixMask)
:param system:
:param particleGroup:
:param fixMask:
:type system:
:type particleGroup:
:type fixMask:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_FixPositions
class FixPositionsLocal(ExtensionLocal, integrator_FixPositions):
def __init__(self, system, particleGroup, fixMask):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_FixPositions, system, particleGroup, fixMask)
if pmi.isController :
class FixPositions(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.FixPositionsLocal',
pmicall = ['setFixMask', 'getFixMask'],
pmiproperty = [ 'particleGroup' ]
)
| kkreis/espressopp | src/integrator/FixPositions.py | Python | gpl-3.0 | 1,965 |
#!/usr/bin/python
################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
# ----------
import sys
sys.path.append("../liveq-common")
# ----------
import time
import logging
from agent.io.jobmanagers import JobManagers
from agent.config import Config
from liveq.exceptions import ConfigException
from liveq.reporting.postmortem import PostMortem
from liveq import handleSIGINT, exit
# Prepare runtime configuration
runtimeConfig = { }
# Load configuration
try:
Config.fromFile( "config/agent.conf.local", runtimeConfig )
except ConfigException as e:
print("ERROR Configuration exception: %s" % e)
exit(1)
# Hook sigint -> Shutdown
handleSIGINT()
# Setup post-mortem
PostMortem.addGlobalConfig("global", Config)
PostMortem.addGlobalInfo("version", "2.0")
# Prepare post-mortem
from subprocess import Popen, PIPE
pm = PostMortem()
p = Popen(["C:\\windows\\system32\\help.exe"], stdout=PIPE)
pm.addProcess("C:\\windows\\system32\\help.exe", p, stdout=True)
time.sleep(2)
pm.complete()
print pm.sections
a = str(pm.sections)
print pm.toBuffer()
b = pm.toBuffer()
print "dump=%i, compress=%i" % (len(a),len(b))
# EXIT
exit(0)
# Banner
logging.info("Starting agent tests %s" % Config.UUID)
# Login to the server
jobmanagers = JobManagers( Config.SERVER_CHANNEL )
def hsFunction(channel):
logging.info("Sending handshake to %s" % channel.name)
channel.send('handshake', {
'version': 2,
'slots': 0,
'free_slots': 0,
'group': 'debug'
})
jobmanagers.handshakeFn(hsFunction)
# Pick JIDs
while True:
jobmanagers.process(0.5)
print "--- Agent: %s" % jobmanagers.jid()
| wavesoft/LiveQ | liveq-agent/tests.py | Python | gpl-2.0 | 2,485 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
# make public folders
from __future__ import unicode_literals
import os
def make():
"""make public folder symlinks if missing"""
dirs = ["public", "public/js", "public/css", "public/files", "public/backups"]
for dirname in dirs:
if not os.path.exists(dirname):
os.mkdir(dirname)
os.chdir("public")
symlinks = [
["app", "../app/public"],
["lib", "../lib/public"],
["web.py", "../lib/public/html/web.py"],
["server.py", "../lib/public/html/server.py"],
["blank.html", "../lib/public/html/blank.html"],
["unsupported.html", "../lib/public/html/unsupported.html"],
["sitemap.xml", "../lib/public/html/sitemap.xml"],
["rss.xml", "../lib/public/html/rss.xml"],
]
for link in symlinks:
if not os.path.exists(link[0]) and os.path.exists(link[1]):
os.symlink(link[1], link[0])
os.chdir('..')
| rohitw1991/latestadbwnf | webnotes/install_lib/setup_public_folder.py | Python | mit | 916 |
from __future__ import annotations
from typing import NamedTuple, Union
from redbot.core import commands
class CommandConverter(NamedTuple):
com: commands.Command
@classmethod
async def convert(cls, ctx: commands.Context, arg: str):
ret = ctx.bot.get_command(arg)
if ret:
return cls(ret)
raise commands.BadArgument('Command "{arg}" not found.'.format(arg=arg))
class CogOrCOmmand(NamedTuple):
stype: str
obj: str
@classmethod
async def convert(cls, ctx: commands.Context, arg: str):
# mypy doesn't do type narrowing for the walrus yet
if com := ctx.bot.get_command(arg):
assert com, "mypy" # nosec
return cls("command", com.qualified_name)
if cog := ctx.bot.get_cog(arg):
assert cog, "mypy" # nosec
return cls("cog", cog.__class__.__name__)
raise commands.BadArgument('Cog or Command "{arg}" not found.'.format(arg=arg))
class TrinaryBool(NamedTuple):
state: Union[bool, None]
@classmethod
async def convert(cls, ctx: commands.Context, arg: str):
try:
ret = {"allow": True, "deny": False, "clear": None}[arg.lower()]
except KeyError:
raise commands.BadArgument(
"Was expecting one of `allow`, `deny`, or `clear`, got {arg}".format(
arg=arg
)
)
else:
return cls(ret)
| mikeshardmind/SinbadCogs | channelredirect/converters.py | Python | mit | 1,459 |
""" Utilities """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Logging
# =======
import logging
import os, os.path
from colorlog import ColoredFormatter
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s] %(message)s",
# datefmt='%H:%M:%S.%f',
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'white,bold',
'INFOV': 'cyan,bold',
'WARNING': 'yellow',
'ERROR': 'red,bold',
'CRITICAL': 'red,bg_white',
},
secondary_log_colors={},
style='%'
)
ch.setFormatter(formatter)
log = logging.getLogger('AVH')
log.setLevel(logging.DEBUG)
log.handlers = [] # No duplicated handlers
log.propagate = False # workaround for duplicated logs in ipython
log.addHandler(ch)
logging.addLevelName(logging.INFO + 1, 'INFOV')
def _infov(self, msg, *args, **kwargs):
self.log(logging.INFO + 1, msg, *args, **kwargs)
logging.Logger.infov = _infov
| yuanchima/Activation-Visualization-Histogram | util.py | Python | mit | 1,077 |
import logging
import signal
import subprocess
import errno
import select
logger = logging.getLogger('BitBake.Process')
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
class CmdError(RuntimeError):
def __init__(self, command, msg=None):
self.command = command
self.msg = msg
def __str__(self):
if not isinstance(self.command, basestring):
cmd = subprocess.list2cmdline(self.command)
else:
cmd = self.command
msg = "Execution of '%s' failed" % cmd
if self.msg:
msg += ': %s' % self.msg
return msg
class NotFoundError(CmdError):
def __str__(self):
return CmdError.__str__(self) + ": command not found"
class ExecutionError(CmdError):
def __init__(self, command, exitcode, stdout = None, stderr = None):
CmdError.__init__(self, command)
self.exitcode = exitcode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
message = ""
if self.stderr:
message += self.stderr
if self.stdout:
message += self.stdout
if message:
message = ":\n" + message
return (CmdError.__str__(self) +
" with exit code %s" % self.exitcode + message)
class Popen(subprocess.Popen):
defaults = {
"close_fds": True,
"preexec_fn": subprocess_setup,
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": subprocess.PIPE,
"shell": False,
}
def __init__(self, *args, **kwargs):
options = dict(self.defaults)
options.update(kwargs)
subprocess.Popen.__init__(self, *args, **options)
def _logged_communicate(pipe, log, input):
if pipe.stdin:
if input is not None:
pipe.stdin.write(input)
pipe.stdin.close()
outdata, errdata = [], []
rin = []
if pipe.stdout is not None:
bb.utils.nonblockingfd(pipe.stdout.fileno())
rin.append(pipe.stdout)
if pipe.stderr is not None:
bb.utils.nonblockingfd(pipe.stderr.fileno())
rin.append(pipe.stderr)
try:
while pipe.poll() is None:
rlist = rin
try:
r,w,e = select.select (rlist, [], [])
except OSError, e:
if e.errno != errno.EINTR:
raise
if pipe.stdout in r:
data = pipe.stdout.read()
if data is not None:
outdata.append(data)
log.write(data)
if pipe.stderr in r:
data = pipe.stderr.read()
if data is not None:
errdata.append(data)
log.write(data)
finally:
log.flush()
return ''.join(outdata), ''.join(errdata)
def run(cmd, input=None, log=None, **options):
"""Convenience function to run a command and return its output, raising an
exception when the command fails"""
if isinstance(cmd, basestring) and not "shell" in options:
options["shell"] = True
try:
pipe = Popen(cmd, **options)
except OSError as exc:
if exc.errno == 2:
raise NotFoundError(cmd)
else:
raise CmdError(cmd, exc)
if log:
stdout, stderr = _logged_communicate(pipe, log, input)
else:
stdout, stderr = pipe.communicate(input)
if pipe.returncode != 0:
raise ExecutionError(cmd, pipe.returncode, stdout, stderr)
return stdout, stderr
| sentient-energy/emsw-bitbake-mirror | lib/bb/process.py | Python | gpl-2.0 | 3,692 |
#-*- coding:utf-8 -*-
#-----------------------------
#时间:2017,7,22
#版本:1.0
#开发者:fory
#----------------------------
import os
from scapy.all import *
os.system("reset")
sum = 0
time_how = int(raw_input("DNS服务器循环几遍?"))
x=raw_input("attack target-IP>")
zxw = 0
while zxw <= time_how:
#DNS服务器列表
dns_server_list = ["127.0.0.1","114.114.114.114","114.114.115.115","223.5.5.5","233.6.6.6","112.124.47","114.215.126.16","101,226.4.6","123.125.81.6","208.67.222.222","208.67.220.220","8.8.8.8","8.8.4.4"]
#重置变量,防止出错
dns_server=""
zxw = zxw + 1
for dns_server in dns_server_list:
a = IP(dst=dns_server,src=x)
b = UDP()
c = DNS(id=1,rd=1,qdcount=1)
c.qd = DNSQR(qname="www.baidu.com")
p = a/b/c
i = 0
while i < 10 :
send(p)
sum = sum + 1
print "第",sum,"次攻击已完成"
i = i + 1
print u"attack over!"
time.sleep(2)
| whoisk/test | dns-d.py | Python | gpl-2.0 | 903 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
from streaming_client import BATCH_SIZE
class StreamingHashServerProtocol(WebSocketServerProtocol):
"""
Streaming WebSockets server that computes a running SHA-256 for data
received. It will respond every BATCH_SIZE bytes with the digest
up to that point. It can receive messages of unlimited number of frames
and frames of unlimited length (actually, up to 2^63, which is the
WebSockets protocol imposed limit on frame size). Digest is reset upon
new message.
"""
def onMessageBegin(self, isBinary):
WebSocketServerProtocol.onMessageBegin(self, isBinary)
self.sha256 = hashlib.sha256()
self.count = 0
self.received = 0
self.next = BATCH_SIZE
def onMessageFrameBegin(self, length):
WebSocketServerProtocol.onMessageFrameBegin(self, length)
def onMessageFrameData(self, payload):
length = len(payload)
self.received += length
# when the data received exceeds the next BATCH_SIZE ..
if self.received >= self.next:
# update digest up to batch size
rest = length - (self.received - self.next)
self.sha256.update(payload[:rest])
# send digest
digest = self.sha256.hexdigest()
self.sendMessage(digest.encode('utf8'))
print("Sent digest for batch {} : {}".format(self.count, digest))
# advance to next batch
self.next += BATCH_SIZE
self.count += 1
# .. and update the digest for the rest
self.sha256.update(payload[rest:])
else:
# otherwise we just update the digest for received data
self.sha256.update(payload)
def onMessageFrameEnd(self):
pass
def onMessageEnd(self):
pass
if __name__ == '__main__':
factory = WebSocketServerFactory(u"ws://127.0.0.1:9000")
factory.protocol = StreamingHashServerProtocol
listenWS(factory)
reactor.run()
| nucular/AutobahnPython | examples/twisted/websocket/streaming/streaming_server.py | Python | mit | 3,522 |
# Base classes for the graphical user interface.
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
import inspect, os, sys, time, site, signal
import meh.ui.gui
from contextlib import contextmanager
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
gi.require_version("AnacondaWidgets", "3.0")
gi.require_version("Keybinder", "3.0")
gi.require_version("GdkPixbuf", "2.0")
gi.require_version("GLib", "2.0")
gi.require_version("GObject", "2.0")
from gi.repository import Gdk, Gtk, AnacondaWidgets, Keybinder, GdkPixbuf, GLib, GObject
from pyanaconda.i18n import _, C_
from pyanaconda.constants import IPMI_ABORTED
from pyanaconda import product, iutil, constants
from pyanaconda import threads
from pyanaconda.ui import UserInterface, common
from pyanaconda.ui.gui.utils import gtk_action_wait, gtk_call_once, unbusyCursor
from pyanaconda import ihelp
import os.path
import logging
log = logging.getLogger("anaconda")
__all__ = ["GraphicalUserInterface", "QuitDialog"]
_screenshotIndex = 0
_last_screenshot_timestamp = 0
SCREENSHOT_DELAY = 1 # in seconds
ANACONDA_WINDOW_GROUP = Gtk.WindowGroup()
# Stylesheet priorities to use for product-specific stylesheets.
# installclass stylesheets should be higher than our base stylesheet, and
# stylesheets from updates.img and product.img should be higher than that. All
# levels should be lower than GTK_STYLE_PROVIDER_PRIORITY_USER.
STYLE_PROVIDER_PRIORITY_INSTALLCLASS = Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION + 15
STYLE_PROVIDER_PRIORITY_UPDATES = Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION + 20
assert STYLE_PROVIDER_PRIORITY_UPDATES < Gtk.STYLE_PROVIDER_PRIORITY_USER
class GUIObject(common.UIObject):
"""This is the base class from which all other GUI classes are derived. It
thus contains only attributes and methods that are common to everything
else. It should not be directly instantiated.
Class attributes:
builderObjects -- A list of UI object names that should be extracted from
uiFile and exposed for this class to use. If this list
is empty, all objects will be exposed.
Only the following kinds of objects need to be exported:
(1) Top-level objects (like GtkDialogs) that are directly
used in Python.
(2) Top-level objects that are not directly used in
Python, but are used by another object somewhere down
in the hierarchy. This includes things like a custom
GtkImage used by a button that is part of an exported
dialog, and a GtkListStore that is the model of a
Gtk*View that is part of an exported object.
mainWidgetName -- The name of the top-level widget this object
object implements. This will be the widget searched
for in uiFile by the window property.
focusWidgetName -- The name of the widget to focus when the object is entered,
or None.
uiFile -- The location of an XML file that describes the layout
of widgets shown by this object. UI files are
searched for relative to the same directory as this
object's module.
translationDomain-- The gettext translation domain for the given GUIObject
subclass. By default the "anaconda" translation domain
is used, but external applications, such as Initial Setup,
that use GUI elements (Hubs & Spokes) from Anaconda
can override the translation domain with their own,
so that their subclasses are properly translated.
helpFile -- The location of the yelp-compatible help file for the
given GUI object. The default value of "" indicates
that the object has not specific help file assigned
and the default help file should be used.
"""
builderObjects = []
mainWidgetName = None
# Since many of the builder files do not define top-level widgets, the usual
# {get,can,is,has}_{focus,default} properties don't work real good. Define the
# widget to be focused in python, instead.
focusWidgetName = None
uiFile = ""
helpFile = None
translationDomain = "anaconda"
handles_autostep = False
def __init__(self, data):
"""Create a new UIObject instance, including loading its uiFile and
all UI-related objects.
Instance attributes:
data -- An instance of a pykickstart Handler object. The Hub
never directly uses this instance. Instead, it passes
it down into Spokes when they are created and applied.
The Hub simply stores this instance so it doesn't need
to be passed by the user.
skipTo -- If this attribute is set to something other than None,
it must be the name of a class (as a string). Then,
the interface will skip to the first instance of that
class in the action list instead of going on to
whatever the next action is normally.
Note that actions may only skip ahead, never backwards.
Also, standalone spokes may not skip to an individual
spoke off a hub. They can only skip to the hub
itself.
"""
common.UIObject.__init__(self, data)
if self.__class__ is GUIObject:
raise TypeError("GUIObject is an abstract class")
self.skipTo = None
self.applyOnSkip = False
self.builder = Gtk.Builder()
self.builder.set_translation_domain(self.translationDomain)
self._window = None
if self.builderObjects:
self.builder.add_objects_from_file(self._findUIFile(), self.builderObjects)
else:
self.builder.add_from_file(self._findUIFile())
self.builder.connect_signals(self)
# Keybinder from GI needs to be initialized before use
Keybinder.init()
Keybinder.bind("<Shift>Print", self._handlePrntScreen, [])
self._automaticEntry = False
self._autostepRunning = False
self._autostepDone = False
self._autostepDoneCallback = None
# this indicates if the screen is the last spoke to be processed for a hub
self.lastAutostepSpoke = False
def _findUIFile(self):
path = os.environ.get("UIPATH", "./:/tmp/updates/:/tmp/updates/ui/:/usr/share/anaconda/ui/")
dirs = path.split(":")
# append the directory where this UIObject is defined
dirs.append(os.path.dirname(inspect.getfile(self.__class__)))
for d in dirs:
testPath = os.path.join(d, self.uiFile)
if os.path.isfile(testPath) and os.access(testPath, os.R_OK):
return testPath
raise IOError("Could not load UI file '%s' for object '%s'" % (self.uiFile, self))
def _handlePrntScreen(self, *args, **kwargs):
global _last_screenshot_timestamp
# as a single press of the assigned key generates
# multiple callbacks, we need to skip additional
# callbacks for some time once a screenshot is taken
if (time.time() - _last_screenshot_timestamp) >= SCREENSHOT_DELAY:
self.take_screenshot()
# start counting from the time the screenshot operation is done
_last_screenshot_timestamp = time.time()
def take_screenshot(self, name=None):
"""Take a screenshot of the whole screen (works even with multiple displays)
:param name: optional name for the screenshot that will be appended to the filename,
after the standard prefix & screenshot number
:type name: str or NoneType
"""
global _screenshotIndex
# Make sure the screenshot directory exists.
iutil.mkdirChain(constants.SCREENSHOTS_DIRECTORY)
if name is None:
screenshot_filename = "screenshot-%04d.png" % _screenshotIndex
else:
screenshot_filename = "screenshot-%04d-%s.png" % (_screenshotIndex, name)
fn = os.path.join(constants.SCREENSHOTS_DIRECTORY, screenshot_filename)
root_window = self.main_window.get_window()
pixbuf = Gdk.pixbuf_get_from_window(root_window, 0, 0,
root_window.get_width(),
root_window.get_height())
pixbuf.savev(fn, 'png', [], [])
log.info("%s taken", screenshot_filename)
_screenshotIndex += 1
@property
def automaticEntry(self):
"""Report if the given GUIObject has been displayed under automatic control
This is needed for example for installations with an incomplete kickstart,
as we need to differentiate the automatic screenshot pass from the user
entering a spoke to manually configure things. We also need to skip applying
changes if the spoke is entered automatically.
"""
return self._automaticEntry
@automaticEntry.setter
def automaticEntry(self, value):
self._automaticEntry = value
@property
def autostepRunning(self):
"""Report if the GUIObject is currently running autostep"""
return self._autostepRunning
@autostepRunning.setter
def autostepRunning(self, value):
self._autostepRunning = value
@property
def autostepDone(self):
"""Report if autostep for this GUIObject has been finished"""
return self._autostepDone
@autostepDone.setter
def autostepDone(self, value):
self._autostepDone = value
@property
def autostepDoneCallback(self):
"""A callback to be run once autostep has been finished"""
return self._autostepDoneCallback
@autostepDoneCallback.setter
def autostepDoneCallback(self, callback):
self._autostepDoneCallback = callback
def autostep(self):
"""Autostep through this graphical object and through
any graphical objects managed by it (such as through spokes for a hub)
"""
# report that autostep is running to prevent another from starting
self.autostepRunning = True
# take a screenshot of the current graphical object
if self.data.autostep.autoscreenshot:
# as autostep is triggered just before leaving a screen,
# we can safely take a screenshot of the "parent" object at once
# without using idle_add
self.take_screenshot(self.__class__.__name__)
self._doAutostep()
# done
self.autostepRunning = False
self.autostepDone = True
self._doPostAutostep()
# run the autostep-done callback (if any)
# pylint: disable=not-callable
if self.autostepDoneCallback:
self.autostepDoneCallback(self)
def _doPostAutostep(self):
"""To be overridden by the given GUIObject sub-class with custom code
that brings the GUI from the autostepping mode back to the normal mode.
This usually means to "click" the continue button or its equivalent.
"""
pass
def _doAutostep(self):
"""To be overridden by the given GUIObject sub-class with customized
autostepping code - if needed
(this is for example used to step through spokes in a hub)
"""
pass
@property
def window(self):
"""Return the object out of the GtkBuilder representation
previously loaded by the load method.
"""
# This will raise an AttributeError if the subclass failed to set a
# mainWidgetName attribute, which is exactly what I want.
if not self._window:
self._window = self.builder.get_object(self.mainWidgetName)
return self._window
@property
def main_window(self):
"""Return the top-level window containing this GUIObject."""
return self.window.get_toplevel()
def clear_info(self):
"""Clear any info bar from the bottom of the screen."""
self.window.clear_info()
def set_error(self, msg):
"""Display an info bar along the bottom of the screen with the provided
message. This method is used to display critical errors anaconda
may not be able to do anything about, but that the user may. A
suitable background color and icon will be displayed.
"""
self.window.set_error(msg)
def set_info(self, msg):
"""Display an info bar along the bottom of the screen with the provided
message. This method is used to display informational text -
non-critical warnings during partitioning, for instance. The user
should investigate these messages but doesn't have to. A suitable
background color and icon will be displayed.
"""
self.window.set_info(msg)
def set_warning(self, msg):
"""Display an info bar along the bottom of the screen with the provided
message. This method is used to display errors the user needs to
attend to in order to continue installation. This is the bulk of
messages. A suitable background color and icon will be displayed.
"""
self.window.set_warning(msg)
class QuitDialog(GUIObject):
builderObjects = ["quitDialog"]
mainWidgetName = "quitDialog"
uiFile = "main.glade"
MESSAGE = ""
def run(self):
if self.MESSAGE:
self.builder.get_object("quit_message").set_label(_(self.MESSAGE))
rc = self.window.run()
return rc
class ErrorDialog(GUIObject):
builderObjects = ["errorDialog", "errorTextBuffer"]
mainWidgetName = "errorDialog"
uiFile = "main.glade"
# pylint: disable=arguments-differ
def refresh(self, msg):
buf = self.builder.get_object("errorTextBuffer")
buf.set_text(msg, -1)
def run(self):
rc = self.window.run()
return rc
class MainWindow(Gtk.Window):
"""This is a top-level, full size window containing the Anaconda screens."""
def __init__(self, fullscreen):
"""Create a new anaconda main window.
:param bool fullscreen: if True, fullscreen the window, if false maximize
"""
Gtk.Window.__init__(self)
# Hide the titlebar when maximized if the window manager allows it.
# This makes anaconda look full-screenish but without covering parts
# needed to interact with the window manager, like the GNOME top bar.
self.set_hide_titlebar_when_maximized(True)
# The Anaconda and Initial Setup windows might sometimes get decorated with
# a titlebar which contains the __init__.py header text by default.
# As all Anaconda and Initial Setup usually have a very distinct title text
# inside the window, the titlebar text is redundant and should be disabled.
self.set_title("")
# Treat an attempt to close the window the same as hitting quit
self.connect("delete-event", self._on_delete_event)
# Create a black, 50% opacity pixel that will be scaled to fit the lightbox overlay
# The confusing list of unnamed parameters is:
# bytes, colorspace (there is no other colorspace), has-alpha,
# bits-per-sample (has to be 8), width, height,
# rowstride (bytes between row starts, but we only have one row)
self._transparent_base = GdkPixbuf.Pixbuf.new_from_bytes(GLib.Bytes.new([0, 0, 0, 127]),
GdkPixbuf.Colorspace.RGB, True, 8, 1, 1, 1)
# Contain everything in an overlay so the window can be overlayed with the transparency
# for the lightbox effect
self._overlay = Gtk.Overlay()
self._overlay_img = None
self._overlay.connect("get-child-position", self._on_overlay_get_child_position)
self._overlay_depth = 0
# Create a stack and a list of what's been added to the stack
# Double the stack transition duration since the default 200ms is too
# quick to get the point across
self._stack = Gtk.Stack(transition_duration=400)
self._stack_contents = set()
# Create an accel group for the F12 accelerators added after window transitions
self._accel_group = Gtk.AccelGroup()
self.add_accel_group(self._accel_group)
# Make the window big
if fullscreen:
self.fullscreen()
else:
self.maximize()
self._overlay.add(self._stack)
self.add(self._overlay)
self.show_all()
self._current_action = None
# Help button mnemonics handling
self._mnemonic_signal = None
# we have a sensible initial value, just in case
self._saved_help_button_label = _("Help!")
def _on_delete_event(self, widget, event, user_data=None):
# Use the quit-clicked signal on the the current standalone, even if the
# standalone is not currently displayed.
if self.current_action:
self.current_action.window.emit("quit-clicked")
# Stop the window from being closed here
return True
def _on_overlay_get_child_position(self, overlay_container, overlayed_widget, allocation, user_data=None):
overlay_allocation = overlay_container.get_allocation()
# Scale the overlayed image's pixbuf to the size of the GtkOverlay
overlayed_widget.set_from_pixbuf(self._transparent_base.scale_simple(
overlay_allocation.width, overlay_allocation.height, GdkPixbuf.InterpType.NEAREST))
# Return False to indicate that the child allocation is not yet set
return False
def _on_mnemonics_visible_changed(self, window, property_type, obj):
# mnemonics display has been activated or deactivated,
# add or remove the F1 mnemonics display from the help button
help_button = obj.window.get_help_button()
if window.props.mnemonics_visible:
# save current label
old_label = help_button.get_label()
self._saved_help_button_label = old_label
# add the (F1) "mnemonics" to the help button
help_button.set_label("%s (F1)" % old_label)
else:
# restore the old label
help_button.set_label(self._saved_help_button_label)
@property
def current_action(self):
return self._current_action
def _setVisibleChild(self, child):
# Remove the F12 accelerator from the old window
old_screen = self._stack.get_visible_child()
if old_screen:
old_screen.remove_accelerator(self._accel_group, Gdk.KEY_F12, 0)
old_screen.remove_accelerator(self._accel_group, Gdk.KEY_F1, 0)
old_screen.remove_accelerator(self._accel_group, Gdk.KEY_F1, Gdk.ModifierType.MOD1_MASK)
# Check if the widget is already on the stack
if child not in self._stack_contents:
self._stack.add(child.window)
self._stack_contents.add(child)
child.window.show_all()
# It would be handy for F12 to continue to work like it did in the old
# UI, by skipping you to the next screen or sending you back to the hub
if isinstance(child.window, AnacondaWidgets.BaseStandalone):
child.window.add_accelerator("continue-clicked", self._accel_group,
Gdk.KEY_F12, 0, 0)
elif isinstance(child.window, AnacondaWidgets.SpokeWindow):
child.window.add_accelerator("button-clicked", self._accel_group,
Gdk.KEY_F12, 0, 0)
# Configure the help button
child.window.add_accelerator("help-button-clicked", self._accel_group,
Gdk.KEY_F1, 0, 0)
child.window.add_accelerator("help-button-clicked", self._accel_group,
Gdk.KEY_F1, Gdk.ModifierType.MOD1_MASK, 0)
# Connect to mnemonics-visible to add the (F1) mnemonic to the button label
if self._mnemonic_signal:
self.disconnect(self._mnemonic_signal)
self._mnemonic_signal = self.connect("notify::mnemonics-visible", self._on_mnemonics_visible_changed, child)
self._stack.set_visible_child(child.window)
if child.focusWidgetName:
child.builder.get_object(child.focusWidgetName).grab_focus()
def setCurrentAction(self, standalone):
"""Set the current standalone widget.
This changes the currently displayed screen and, if the standalone
is a hub, sets the hub as the screen to which spokes will return.
:param AnacondaWidgets.BaseStandalone standalone: the new standalone action
"""
# Slide the old hub/standalone off of the new one
self._stack.set_transition_type(Gtk.StackTransitionType.UNDER_LEFT)
self._current_action = standalone
self._setVisibleChild(standalone)
def enterSpoke(self, spoke):
"""Enter a spoke.
The spoke will be displayed as the current screen, but the current-action
to which the spoke will return will not be changed.
:param AnacondaWidgets.SpokeWindow spoke: a spoke to enter
"""
# Slide up, as if the spoke is under the hub
self._stack.set_transition_type(Gtk.StackTransitionType.UNDER_UP)
self._setVisibleChild(spoke)
# autostep through the spoke if required
if spoke.automaticEntry:
# we need to use idle_add here to give GTK time to render the spoke
gtk_call_once(self._autostep_spoke, spoke)
def _autostep_spoke(self, spoke):
"""Step through a spoke and make a screenshot if required.
If this is the last spoke to be autostepped on a hub return to
the hub so that we can proceed to the next one.
"""
# it might be possible that autostep is specified, but autoscreenshot isn't
if spoke.data.autostep.autoscreenshot:
spoke.take_screenshot(spoke.__class__.__name__)
if spoke.autostepDoneCallback:
spoke.autostepDoneCallback(spoke)
# if this is the last spoke then return to hub
if spoke.lastAutostepSpoke:
self.returnToHub()
def returnToHub(self):
"""Exit a spoke and return to a hub."""
# Slide back down over the spoke
self._stack.set_transition_type(Gtk.StackTransitionType.OVER_DOWN)
self._setVisibleChild(self._current_action)
def lightbox_on(self):
self._overlay_depth += 1
if not self._overlay_img:
# Add an overlay image that will be filled and scaled in get-child-position
self._overlay_img = Gtk.Image()
self._overlay_img.show_all()
self._overlay.add_overlay(self._overlay_img)
def lightbox_off(self):
self._overlay_depth -= 1
if self._overlay_depth == 0 and self._overlay_img:
# Remove the overlay image
self._overlay_img.destroy()
self._overlay_img = None
@contextmanager
def enlightbox(self, dialog):
"""Display a dialog in a lightbox over the main window.
:param GtkDialog: the dialog to display
"""
self.lightbox_on()
# Set the dialog as transient for ourself
ANACONDA_WINDOW_GROUP.add_window(dialog)
dialog.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
dialog.set_transient_for(self)
yield
self.lightbox_off()
class GraphicalUserInterface(UserInterface):
"""This is the standard GTK+ interface we try to steer everything to using.
It is suitable for use both directly and via VNC.
"""
def __init__(self, storage, payload, instclass,
distributionText=product.distributionText, isFinal=product.isFinal,
quitDialog=QuitDialog, gui_lock=None, fullscreen=False):
UserInterface.__init__(self, storage, payload, instclass)
self._actions = []
self._currentAction = None
self._ui = None
self._gui_lock = gui_lock
self.data = None
self.mainWindow = MainWindow(fullscreen=fullscreen)
self._distributionText = distributionText
self._isFinal = isFinal
self._quitDialog = quitDialog
self._mehInterface = GraphicalExceptionHandlingIface(
self.mainWindow.lightbox_on)
ANACONDA_WINDOW_GROUP.add_window(self.mainWindow)
basemask = "pyanaconda.ui"
basepath = os.path.dirname(__file__)
updatepath = "/tmp/updates/pyanaconda/ui"
sitepackages = [os.path.join(dir, "pyanaconda", "ui")
for dir in site.getsitepackages()]
pathlist = set([updatepath, basepath] + sitepackages)
_categories = []
_spokes = []
_hubs = []
# as list comprehension can't reference class level variables in Python 3 we
# need to use a for cycle (http://bugs.python.org/issue21161)
for path in pathlist:
_categories.append((basemask + ".categories.%s", os.path.join(path, "categories")))
_spokes.append((basemask + ".gui.spokes.%s", os.path.join(path, "gui/spokes")))
_hubs.append((basemask + ".gui.hubs.%s", os.path.join(path, "gui/hubs")))
paths = UserInterface.paths + {
"categories": _categories,
"spokes": _spokes,
"hubs": _hubs,
}
def _widgetScale(self):
# First, check if the GDK_SCALE environment variable is already set. If so,
# leave it alone.
if "GDK_SCALE" in os.environ:
log.debug("GDK_SCALE already set to %s, not scaling", os.environ["GDK_SCALE"])
return
# Next, check if a scaling factor is already being applied via XSETTINGS,
# such as by gnome-settings-daemon
display = Gdk.Display.get_default()
screen = display.get_default_screen()
val = GObject.Value()
val.init(GObject.TYPE_INT)
if screen.get_setting("gdk-window-scaling-factor", val):
log.debug("Window scale set to %s by XSETTINGS, not scaling", val.get_int())
return
# Get the primary monitor dimensions in pixels and mm from Gdk
primary = screen.get_primary_monitor()
monitor_geometry = screen.get_monitor_geometry(primary)
monitor_scale = screen.get_monitor_scale_factor(primary)
monitor_width_mm = screen.get_monitor_width_mm(primary)
monitor_height_mm = screen.get_monitor_height_mm(primary)
# Sometimes gdk returns 0 for physical widths and heights
if monitor_height_mm == 0 or monitor_width_mm == 0:
return
# Check if this monitor is high DPI, using heuristics from gnome-settings-dpi.
# If the monitor has a height >= 1200 pixels and a resolution > 192 dpi in both
# x and y directions, apply a scaling factor of 2 so that anaconda isn't all tiny
monitor_width_px = monitor_geometry.width * monitor_scale
monitor_height_px = monitor_geometry.height * monitor_scale
monitor_dpi_x = monitor_width_px / (monitor_width_mm / 25.4)
monitor_dpi_y = monitor_height_px / (monitor_height_mm / 25.4)
log.debug("Detected primary monitor: %dx%d %ddpix %ddpiy", monitor_width_px,
monitor_height_px, monitor_dpi_x, monitor_dpi_y)
if monitor_height_px >= 1200 and monitor_dpi_x > 192 and monitor_dpi_y > 192:
display.set_window_scale(2)
# Export the scale so that Gtk programs launched by anaconda are also scaled
iutil.setenv("GDK_SCALE", "2")
def _convertSignals(self):
# What tends to happen when we receive a signal is that the signal will
# be received by the python interpreter's C handler, python will do
# what it needs to do to set the python handler we registered to run,
# the C handler returns, and then nothing happens because Gtk is
# holding the global interpreter lock. The signal then gets delivered
# to our python code when you move the mouse or something. We can get
# around this by doing signals the GLib way. The conversion assumes
# that none of our signal handlers care about the frame parameter,
# which is generally true.
#
# After the unix_signal_add call, signal.getsignal will tell a half
# truth: the method returned will still be called, by way of
# _signal_converter, but GLib will have replaced the actual signal
# handler for that signal.
# Convert everything except SIGCHLD, because that's a different can of worms
def _signal_converter(user_data):
(handler, signum) = user_data
handler(signum, None)
for signum in (s for s in range(1, signal.NSIG) if s != signal.SIGCHLD):
handler = signal.getsignal(signum)
if handler and handler not in (signal.SIG_DFL, signal.SIG_IGN):
# NB: if you are looking at the glib documentation you are in for
# some surprises because gobject-introspection is a minefield.
# g_unix_signal_add_full comes out as GLib.unix_signal_add, and
# g_unix_signal_add doesn't come out at all.
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signum,
_signal_converter, (handler, signum))
@property
def tty_num(self):
return 6
@property
def meh_interface(self):
return self._mehInterface
def _list_hubs(self):
"""Return a list of Hub classes to be imported to this interface"""
from pyanaconda.ui.gui.hubs.summary import SummaryHub
from pyanaconda.ui.gui.hubs.progress import ProgressHub
return [SummaryHub, ProgressHub]
def _is_standalone(self, obj):
"""Is the spoke passed as obj standalone?"""
from pyanaconda.ui.gui.spokes import StandaloneSpoke
return isinstance(obj, StandaloneSpoke)
def setup(self, data):
self._actions = self.getActionClasses(self._list_hubs())
self.data = data
def getActionClasses(self, hubs):
"""Grab all relevant standalone spokes, add them to the passed
list of hubs and order the list according to the
relationships between hubs and standalones."""
from pyanaconda.ui.gui.spokes import StandaloneSpoke
# First, grab a list of all the standalone spokes.
standalones = self._collectActionClasses(self.paths["spokes"], StandaloneSpoke)
# Second, order them according to their relationship
return self._orderActionClasses(standalones, hubs)
def _instantiateAction(self, actionClass):
# Instantiate an action on-demand, passing the arguments defining our
# spoke API and setting up continue/quit signal handlers.
obj = actionClass(self.data, self.storage, self.payload, self.instclass)
# set spoke search paths in Hubs
if hasattr(obj, "set_path"):
obj.set_path("spokes", self.paths["spokes"])
obj.set_path("categories", self.paths["categories"])
# If we are doing a kickstart install, some standalone spokes
# could already be filled out. In that case, we do not want
# to display them.
if self._is_standalone(obj) and obj.completed:
del(obj)
return None
# Use connect_after so classes can add actions before we change screens
obj.window.connect_after("continue-clicked", self._on_continue_clicked)
obj.window.connect_after("help-button-clicked", self._on_help_clicked, obj)
obj.window.connect_after("quit-clicked", self._on_quit_clicked)
return obj
def run(self):
(success, args) = Gtk.init_check(None)
if not success:
raise RuntimeError("Failed to initialize Gtk")
# Check if the GUI lock has already been taken
if self._gui_lock and not self._gui_lock.acquire(False):
# Gtk main loop running. That means python-meh caught exception
# and runs its main loop. Do not crash Gtk by running another one
# from a different thread and just wait until python-meh is
# finished, then quit.
unbusyCursor()
log.error("Unhandled exception caught, waiting for python-meh to "\
"exit")
threads.threadMgr.wait_for_error_threads()
sys.exit(1)
try:
# Apply a widget-scale to hidpi monitors
self._widgetScale()
while not self._currentAction:
self._currentAction = self._instantiateAction(self._actions[0])
if not self._currentAction:
self._actions.pop(0)
if not self._actions:
return
self._currentAction.initialize()
self._currentAction.entry_logger()
self._currentAction.refresh()
self._currentAction.window.set_beta(not self._isFinal)
self._currentAction.window.set_property("distribution", self._distributionText().upper())
# Set some program-wide settings.
settings = Gtk.Settings.get_default()
settings.set_property("gtk-font-name", "Cantarell")
settings.set_property("gtk-icon-theme-name", "gnome")
# Apply the application stylesheet
provider = Gtk.CssProvider()
provider.load_from_path("/usr/share/anaconda/anaconda-gtk.css")
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
# Apply the installclass stylesheet
if self.instclass.stylesheet:
provider = Gtk.CssProvider()
provider.load_from_path(self.instclass.stylesheet)
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), provider,
STYLE_PROVIDER_PRIORITY_INSTALLCLASS)
# Look for updates to the stylesheet and apply them at a higher priority
for updates_dir in ("updates", "product"):
updates_css = "/run/install/%s/anaconda-gtk.css" % updates_dir
if os.path.exists(updates_css):
provider = Gtk.CssProvider()
provider.load_from_path(updates_css)
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), provider,
STYLE_PROVIDER_PRIORITY_UPDATES)
self.mainWindow.setCurrentAction(self._currentAction)
# Do this at the last possible minute.
unbusyCursor()
# If anything went wrong before we start the Gtk main loop, release
# the gui lock and re-raise the exception so that meh can take over
except Exception:
self._gui_lock.release()
raise
Gtk.main()
###
### MESSAGE HANDLING METHODS
###
@gtk_action_wait
def showError(self, message):
dlg = ErrorDialog(None)
with self.mainWindow.enlightbox(dlg.window):
dlg.refresh(message)
dlg.run()
dlg.window.destroy()
# the dialog has the only button -- "Exit installer", so just do so
sys.exit(1)
@gtk_action_wait
def showDetailedError(self, message, details, buttons=None):
from pyanaconda.ui.gui.spokes.lib.detailederror import DetailedErrorDialog
buttons = buttons or [C_("GUI|Detailed Error Dialog", "_Quit")]
dlg = DetailedErrorDialog(None, buttons=buttons, label=message)
with self.mainWindow.enlightbox(dlg.window):
dlg.refresh(details)
rc = dlg.run()
dlg.window.destroy()
return rc
@gtk_action_wait
def showYesNoQuestion(self, message):
dlg = Gtk.MessageDialog(flags=Gtk.DialogFlags.MODAL,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.NONE,
message_format=message)
dlg.set_decorated(False)
dlg.add_buttons(C_("GUI|Yes No Dialog", "_No"), 0,
C_("GUI|Yes No Dialog", "_Yes"), 1)
dlg.set_default_response(1)
with self.mainWindow.enlightbox(dlg):
rc = dlg.run()
dlg.destroy()
return bool(rc)
###
### SIGNAL HANDLING METHODS
###
def _on_continue_clicked(self, win, user_data=None):
# Autostep needs to be triggered just before switching to the next screen
# (or before quiting the installation if there are no more screens) to be consistent
# in both fully automatic kickstart installation and for installation with an incomplete
# kickstart. Therefore we basically "hook" the continue-clicked signal, start autostepping
# and ignore any other continue-clicked signals until autostep is done.
# Once autostep finishes, it emits the appropriate continue-clicked signal itself,
# switching to the next screen (if any).
if self.data.autostep.seen and self._currentAction.handles_autostep:
if self._currentAction.autostepRunning:
log.debug("ignoring the continue-clicked signal - autostep is running")
return
elif not self._currentAction.autostepDone:
self._currentAction.autostep()
return
if not win.get_may_continue() or win != self._currentAction.window:
return
# If we're on the last screen, clicking Continue quits.
if len(self._actions) == 1:
# save the screenshots to the installed system before killing Anaconda
# (the kickstart post scripts run to early, so we need to copy the screenshots now)
iutil.save_screenshots()
Gtk.main_quit()
return
nextAction = None
ndx = 0
# If the current action wants us to jump to an arbitrary point ahead,
# look for where that is now.
if self._currentAction.skipTo:
found = False
for ndx in range(1, len(self._actions)):
if self._actions[ndx].__class__.__name__ == self._currentAction.skipTo:
found = True
break
# If we found the point in question, compose a new actions list
# consisting of the current action, the one to jump to, and all
# the ones after. That means the rest of the code below doesn't
# have to change.
if found:
self._actions = [self._actions[0]] + self._actions[ndx:]
# _instantiateAction returns None for actions that should not be
# displayed (because they're already completed, for instance) so skip
# them here.
while not nextAction:
nextAction = self._instantiateAction(self._actions[1])
if not nextAction:
self._actions.pop(1)
if not self._actions:
sys.exit(0)
return
nextAction.initialize()
nextAction.window.set_beta(self._currentAction.window.get_beta())
nextAction.window.set_property("distribution", self._distributionText().upper())
if not nextAction.showable:
self._currentAction.window.hide()
self._actions.pop(0)
self._on_continue_clicked(nextAction)
return
self._currentAction.exit_logger()
nextAction.entry_logger()
nextAction.refresh()
# Do this last. Setting up curAction could take a while, and we want
# to leave something on the screen while we work.
self.mainWindow.setCurrentAction(nextAction)
self._currentAction = nextAction
self._actions.pop(0)
def _on_help_clicked(self, window, obj):
# the help button has been clicked, start the yelp viewer with
# content for the current screen
ihelp.start_yelp(ihelp.get_help_path(obj.helpFile, self.instclass))
def _on_quit_clicked(self, win, userData=None):
if not win.get_quit_button():
return
dialog = self._quitDialog(None)
with self.mainWindow.enlightbox(dialog.window):
rc = dialog.run()
dialog.window.destroy()
if rc == 1:
self._currentAction.exit_logger()
iutil.ipmi_report(IPMI_ABORTED)
sys.exit(0)
class GraphicalExceptionHandlingIface(meh.ui.gui.GraphicalIntf):
"""
Class inheriting from python-meh's GraphicalIntf and overriding methods
that need some modification in Anaconda.
"""
def __init__(self, lightbox_func):
"""
:param lightbox_func: a function that creates lightbox for a given
window
:type lightbox_func: None -> None
"""
meh.ui.gui.GraphicalIntf.__init__(self)
self._lightbox_func = lightbox_func
def mainExceptionWindow(self, text, exn_file, *args, **kwargs):
meh_intf = meh.ui.gui.GraphicalIntf()
exc_window = meh_intf.mainExceptionWindow(text, exn_file)
exc_window.main_window.set_decorated(False)
self._lightbox_func()
ANACONDA_WINDOW_GROUP.add_window(exc_window.main_window)
# the busy cursor may be set
unbusyCursor()
return exc_window
| kparal/anaconda | pyanaconda/ui/gui/__init__.py | Python | gpl-2.0 | 43,055 |
import urllib
class RedirectUriError(Exception):
error = 'Redirect URI Error'
description = 'The request fails due to a missing, invalid, or mismatching redirection URI (redirect_uri).'
class ClientIdError(Exception):
error = 'Client ID Error'
description = 'The client identifier (client_id) is missing or invalid.'
class AuthorizeError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6749#section-4.1.2.1
'invalid_request': 'The request is otherwise malformed',
'unauthorized_client': 'The client is not authorized to request an '
'authorization code using this method',
'access_denied': 'The resource owner or authorization server denied '
'the request',
'unsupported_response_type': 'The authorization server does not '
'support obtaining an authorization code '
'using this method',
'invalid_scope': 'The requested scope is invalid, unknown, or '
'malformed',
'server_error': 'The authorization server encountered an error',
'temporarily_unavailable': 'The authorization server is currently '
'unable to handle the request due to a '
'temporary overloading or maintenance of '
'the server',
# OpenID errors.
# http://openid.net/specs/openid-connect-core-1_0.html#AuthError
'interaction_required': 'The Authorization Server requires End-User '
'interaction of some form to proceed',
'login_required': 'The Authorization Server requires End-User '
'authentication',
'account_selection_required': 'The End-User is required to select a '
'session at the Authorization Server',
'consent_required': 'The Authorization Server requires End-User'
'consent',
'invalid_request_uri': 'The request_uri in the Authorization Request '
'returns an error or contains invalid data',
'invalid_request_object': 'The request parameter contains an invalid '
'Request Object',
'request_not_supported': 'The provider does not support use of the '
'request parameter',
'request_uri_not_supported': 'The provider does not support use of the '
'request_uri parameter',
'registration_not_supported': 'The provider does not support use of '
'the registration parameter',
}
def __init__(self, redirect_uri, error, grant_type):
self.error = error
self.description = self._errors.get(error)
self.redirect_uri = redirect_uri
self.grant_type = grant_type
def create_uri(self, redirect_uri, state):
description = urllib.quote(self.description)
# See:
# http://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthError
hash_or_question = '#' if self.grant_type == 'implicit' else '?'
uri = '{0}{1}error={2}&error_description={3}'.format(
redirect_uri,
hash_or_question,
self.error,
description)
# Add state if present.
uri = uri + ('&state={0}'.format(state) if state else '')
return uri
@property
def response(self):
pass
class TokenError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6749#section-5.2
'invalid_request': 'The request is otherwise malformed',
'invalid_client': 'Client authentication failed (e.g., unknown client, '
'no client authentication included, or unsupported '
'authentication method)',
'invalid_grant': 'The provided authorization grant or refresh token is '
'invalid, expired, revoked, does not match the '
'redirection URI used in the authorization request, '
'or was issued to another client',
'unauthorized_client': 'The authenticated client is not authorized to '
'use this authorization grant type',
'unsupported_grant_type': 'The authorization grant type is not '
'supported by the authorization server',
'invalid_scope': 'The requested scope is invalid, unknown, malformed, '
'or exceeds the scope granted by the resource owner',
}
def __init__(self, error):
self.error = error
self.description = self._errors.get(error)
def create_dict(self):
dic = {
'error': self.error,
'error_description': self.description,
}
return dic
class UserInfoError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6750#section-3.1
'invalid_request': (
'The request is otherwise malformed', 400
),
'invalid_token': (
'The access token provided is expired, revoked, malformed, '
'or invalid for other reasons', 401
),
'insufficient_scope': (
'The request requires higher privileges than provided by '
'the access token', 403
),
}
def __init__(self, code):
self.code = code
error_tuple = self._errors.get(code, ('', ''))
self.description = error_tuple[0]
self.status = error_tuple[1]
| django-py/django-openid-provider | oidc_provider/lib/errors.py | Python | mit | 5,837 |
"""
kicad_mod.py
Copyright 2015 Adam Greig
Licensed under the MIT licence, see LICENSE file for details.
Helper functions for generating KiCAD footprint files.
"""
from __future__ import print_function, division
CTYD_GAP = 0.25
CTYD_GRID = 0.05
CTYD_WIDTH = 0.01
SILK_WIDTH = 0.15
FAB_WIDTH = 0.01
FONT_SIZE = (1.0, 1.0)
FONT_THICKNESS = 0.15
FONT_HALFHEIGHT = 0.7
def fp_line(start, end, layer, width):
return ["fp_line",
["start", start[0], start[1]],
["end", end[0], end[1]],
["layer", layer],
["width", width]]
def fp_arc(start, end, angle, layer, width):
return ["fp_arc",
["start", start[0], start[1]],
["end", end[0], end[1]],
["angle", angle],
["layer", layer],
["width", width]]
def fp_circle(centre, end, layer, width):
return ["fp_circle",
["center", centre[0], centre[1]],
["end", end[0], end[1]],
["layer", layer],
["width", width]]
def fp_text(texttype, text, at, layer, size, thickness):
return ["fp_text", texttype, text,
["at"] + list(at),
["layer", layer],
["effects",
["font",
["size", size[0], size[1]],
["thickness", thickness]]]]
def pad(num, padtype, shape, at, size, layers, drill=None, offset=None,
m_mask=None, m_paste=None):
pad = ["pad", num, padtype, shape,
["at", at[0], at[1]],
["size"] + list(size),
["layers"] + list(layers)]
if drill is not None or offset is not None:
d = ["drill"]
if drill is not None:
if isinstance(drill, (float, int)):
d.append(drill)
else:
d += drill
if offset is not None:
d.append(["offset"] + offset)
pad.append(d)
if m_mask is not None:
pad.append(["solder_mask_margin", m_mask])
if m_paste is not None:
pad.append(["solder_paste_margin", m_paste])
return pad
def draw_square(width, height, centre, layer, thickness):
"""Draw a square of (`width`, `height`) centered on `centre`."""
out = []
ne = (width/2 + centre[0], -height/2 + centre[1])
nw = (-width/2 + centre[0], -height/2 + centre[1])
se = (width/2 + centre[0], height/2 + centre[1])
sw = (-width/2 + centre[0], height/2 + centre[1])
out.append(fp_line(nw, ne, layer, thickness))
out.append(fp_line(ne, se, layer, thickness))
out.append(fp_line(se, sw, layer, thickness))
out.append(fp_line(sw, nw, layer, thickness))
return nw, ne, se, sw, out
def model(path, offset, scale, rotate):
return ["model", path,
["at", ["xyz", offset[0], offset[1], offset[2]]],
["scale", ["xyz", scale[0], scale[1], scale[2]]],
["rotate", ["xyz", rotate[0], rotate[1], rotate[2]]]]
| JMW95/agg-kicad | scripts/kicad_mod.py | Python | mit | 2,908 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
DOCUMENTATION = '''
---
module: homebrew_tap
author: '"Daniel Jaouen (@danieljaouen)" <dcj24@cornell.edu>'
short_description: Tap a Homebrew repository.
description:
- Tap external Homebrew repositories.
version_added: "1.6"
options:
tap:
description:
- The repository to tap.
required: true
state:
description:
- state of the repository.
choices: [ 'present', 'absent' ]
required: false
default: 'present'
requirements: [ homebrew ]
'''
EXAMPLES = '''
homebrew_tap: tap=homebrew/dupes state=present
homebrew_tap: tap=homebrew/dupes state=absent
homebrew_tap: tap=homebrew/dupes,homebrew/science state=present
'''
def a_valid_tap(tap):
'''Returns True if the tap is valid.'''
regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
return regex.match(tap)
def already_tapped(module, brew_path, tap):
'''Returns True if already tapped.'''
rc, out, err = module.run_command([
brew_path,
'tap',
])
taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
return tap.lower() in taps
def add_tap(module, brew_path, tap):
'''Adds a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif not already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'tap',
tap,
])
if already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully tapped: %s' % tap
else:
failed = True
msg = 'failed to tap: %s' % tap
else:
msg = 'already tapped: %s' % tap
return (failed, changed, msg)
def add_taps(module, brew_path, taps):
'''Adds one or more taps.'''
failed, unchanged, added, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = add_tap(module, brew_path, tap)
if failed:
break
if changed:
added += 1
else:
unchanged += 1
if failed:
msg = 'added: %d, unchanged: %d, error: ' + msg
msg = msg % (added, unchanged)
elif added:
changed = True
msg = 'added: %d, unchanged: %d' % (added, unchanged)
else:
msg = 'added: %d, unchanged: %d' % (added, unchanged)
return (failed, changed, msg)
def remove_tap(module, brew_path, tap):
'''Removes a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'untap',
tap,
])
if not already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully untapped: %s' % tap
else:
failed = True
msg = 'failed to untap: %s' % tap
else:
msg = 'already untapped: %s' % tap
return (failed, changed, msg)
def remove_taps(module, brew_path, taps):
'''Removes one or more taps.'''
failed, unchanged, removed, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = remove_tap(module, brew_path, tap)
if failed:
break
if changed:
removed += 1
else:
unchanged += 1
if failed:
msg = 'removed: %d, unchanged: %d, error: ' + msg
msg = msg % (removed, unchanged)
elif removed:
changed = True
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
else:
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
return (failed, changed, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], required=True),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
brew_path = module.get_bin_path(
'brew',
required=True,
opt_dirs=['/usr/local/bin']
)
taps = module.params['name'].split(',')
if module.params['state'] == 'present':
failed, changed, msg = add_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
elif module.params['state'] == 'absent':
failed, changed, msg = remove_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gaqzi/ansible-modules-extras | packaging/os/homebrew_tap.py | Python | gpl-3.0 | 5,800 |
import sys, re, os
import numpy as np
import pandas as pd
from glob import glob
from nipype.utils import filemanip
from nipype.utils.filemanip import (fname_presuffix, copyfile, split_filename)
import utils
import nipype_ext as npe
def spm_dartel_make(gm, wm, template_dir, template_nme):
""" run dartel to make template and flowfields
template_dir will be location of saved templates
template_nme will be used to name template and
flow fields"""
startdir = os.getcwd()
os.chdir(template_dir)
dartel = npe.DARTEL(matlab_cmd = 'matlab-spm8')
dartel.inputs.image_files = [gm, wm]
dartel.inputs.template_prefix = template_nme
dartel_out = dartel.run()
os.chdir(startdir)
return dartel_out
def move_flowfields(inflowfields):
flowfields = []
for ff in inflowfields:
pth, nme, ext = split_filename(ff)
subdir, _ = os.path.split(pth)
darteldir,exists = make_dir(subdir, dirname='dartel')
newff = copy_file(ff, darteldir)
remove_files([ff])
flowfields.append(newff)
return flowfields
def write_dartel_log(templates, flowfields):
"""write a log to describe template"""
pth, nme, ext = split_filename(templates[0])
logfile = os.path.join(pth, nme + '.log')
with open(logfile, 'w+') as fid:
for t in templates:
fid.write(t + '\n')
fid.write('\n')
for ff in flowfields:
fid.write(ff + '\n')
return logfile
if __name__ == '__main__':
### Change items here ##############################################
# get structurals
datadir = '/home/jagust/graph/data/mri1.5/tr220'
anatstr = 'B*/raw/B*_anat.nii.gz'
anatomicals = get_files_old_only(datadir, anatstr)
####################################################################
# run dartel on cohort
gms = utils.get_files(datadir, 'B*/despike_ants_realign/coreg_masks/aparcaseg.nii.gz')
wms = utils.get_files(datadir, 'B*/despike_ants_realign/coreg_masks/B*_WM_mask.nii.gz')
gms.sort()
wms.sort()
files = []
pth, nme, ext = filemanip.split_filename(gms[0])
datestr = utils.make_datestr()
tmplt_nme = 'dartel_%s'%(datestr)
templatedir = '/home/jagust/graph/data/mri1.5/tr220/template'
dout = spm_dartel_make(gms, wms, templatedir, tmplt_nme)
#template = get_files_old_only(datadir,'B*/anat/vbm8/%s*'%(tmplt_nme))
templatedir, exists = utils.make_dir(datadir,'template')
newtemplate = utils.copy_files(template, templatedir)
utils.remove_files(template)
#flowfieldstmp = utils.get_files(datadir,'*/anat/vbm8/*%s*'%(tmplt_nme))
flowfields = move_flowfields(flowfieldstmp)
dartellog = write_dartel_log(newtemplate, flowfields)
| klarnemann/jagust_rsfmri | rsfmri/template.py | Python | mit | 2,757 |
__all__ = ['polygon_clip', 'polygon_area']
import numpy as np
def polygon_clip(rp, cp, r0, c0, r1, c1):
"""Clip a polygon to the given bounding box.
Parameters
----------
rp, cp : (N,) ndarray of double
Row and column coordinates of the polygon.
(r0, c0), (r1, c1) : double
Top-left and bottom-right coordinates of the bounding box.
Returns
-------
r_clipped, c_clipped : (M,) ndarray of double
Coordinates of clipped polygon.
Notes
-----
This makes use of Sutherland-Hodgman clipping as implemented in
AGG 2.4 and exposed in Matplotlib.
"""
from matplotlib import path, transforms
poly = path.Path(np.vstack((rp, cp)).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
# This should be fixed in matplotlib >1.5
if np.all(poly_clipped[-1] == poly_clipped[-2]):
poly_clipped = poly_clipped[:-1]
return poly_clipped[:, 0], poly_clipped[:, 1]
def polygon_area(pr, pc):
"""Compute the area of a polygon.
Parameters
----------
pr, pc : (N,) array of float
Polygon row and column coordinates.
Returns
-------
a : float
Area of the polygon.
"""
pr = np.asarray(pr)
pc = np.asarray(pc)
return 0.5 * np.abs(np.sum((pc[:-1] * pr[1:]) - (pc[1:] * pr[:-1])))
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/_shared/_geometry.py | Python | gpl-3.0 | 1,409 |
# FROMS
from models import Player
from graph import get_graph_from_edges, draw_graph, get_full_cycles_from_graph,\
full_cycle_to_edges, get_one_full_cycle, convert_full_cycle_to_graph,\
get_one_full_cycle_from_graph, get_hamiltonian_path_from_graph,\
is_there_definitely_no_hamiltonian_cycle, hamilton
import networkx as nx
import time
import random
from random import shuffle
# Constants
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_NONBINARY = "Non-binary"
GENDER_NOPREF = "No preference"
DISPLAY_GRAPH = True
# Changing this value changes how much we care about the houses of players being the same
# If 1 - we don't care, and house de-conflicting is ignored. 0 means we won't allow any players of the same house to be matched.
RELAX_SAME_HOUSE_REQUIREMENT_PERCENTAGE = 0.00
RELAX_SAME_FACULTY_REQUIREMENT_PERCENTAGE = 0.00
def get_house_from_player(player):
if player.floor == 3:
return "prof"
elif player.floor >= 4 and player.floor <= 7:
return "shan"
elif player.floor >= 8 and player.floor <= 11:
return "ora"
elif player.floor >= 12 and player.floor <= 14:
return "gaja"
elif player.floor >= 15 and player.floor <= 18:
return "tancho"
elif player.floor >= 19 and player.floor <= 21:
return "ponya"
else:
raise ValueError('Floor provided (' + player.floor +
') for player ' + str(player) + ' is invalid!')
def is_gender_pref_respected(player_being_checked, other_player):
if player_being_checked.gender_pref == GENDER_NOPREF:
# If they have no preference, always respected
print "Nopref"
return True
else:
# Otherwise check if the other_player gender is what is wanted
gender_pref_respected = player_being_checked.gender_pref == other_player.gender
return gender_pref_respected
def are_gender_prefs_respected(angel_player, mortal_player):
return is_gender_pref_respected(angel_player, mortal_player) and \
is_gender_pref_respected(mortal_player, angel_player)
def is_there_edge_between_players(angel_player, mortal_player):
'''
Checks if two players are valid as an angel-mortal pair i.e. an "edge"
exists between them. If we are enforcing a heterogenous gender mix for these
players - check if they are of the same gender and return False (no edge)
between them
'''
print "Checking %s and %s" % (angel_player, mortal_player)
# Check if gender choice is respected
gender_pref_is_respected = are_gender_prefs_respected(
angel_player, mortal_player)
# Check house and faculty are not the same
random_relax_fac_requirement = random.random() < RELAX_SAME_FACULTY_REQUIREMENT_PERCENTAGE
if random_relax_fac_requirement:
players_are_from_same_faculty = False
else:
players_are_from_same_faculty = angel_player.faculty == mortal_player.faculty
# Relax house requirement
random_relax_house_requirement = random.random() < RELAX_SAME_HOUSE_REQUIREMENT_PERCENTAGE
if random_relax_house_requirement:
players_are_from_same_house = False
else:
players_are_from_same_house = get_house_from_player(
angel_player) == get_house_from_player(mortal_player)
valid_pairing = not (players_are_from_same_faculty) and gender_pref_is_respected and (not players_are_from_same_house)# Remove same-house reqr --> #or players_are_from_same_house) and
if players_are_from_same_faculty:
print "players from same fac\n"
#ignore this requirement
if players_are_from_same_house:
print "players from same house\n"
if not gender_pref_is_respected:
print "gender pref not respected"
print "\n"
return valid_pairing
def get_player_edges_from_player_list(player_list):
player_edges = []
# iterate through all players in list - compare each player to all others
for player in player_list:
for other_player in player_list:
if other_player != player:
if is_there_edge_between_players(player, other_player):
player_edges.append((player, other_player))
return player_edges
def angel_mortal_arrange(player_list):
'''
Depending on the gender preferences to follow, run the edge-finding
algorithm, generate a graph and find a Hamiltonian circuit.
'''
print "Arranging player list: %s" % player_list
# Convert the list of players into a list of valid edges
player_edges = get_player_edges_from_player_list(player_list)
# Generate the overall graph from all edges
overall_graph = get_graph_from_edges(player_edges)
print "Number of nodes in overall graph: " + str(overall_graph.number_of_nodes())
# Find all connected components and find cycles for all
graphs = list(nx.strongly_connected_component_subgraphs(overall_graph))
print "\nConnected components detected: %s" % len(graphs)
print "Printing original player list: "
for player in player_list:
print player
print "\n\n"
print "Player list size: " + str(len(player_list))
list_of_player_chains = []
#for G in graphs:
# draw_graph(G)
for G in graphs:
print "Printing players in current graph:"
for graph_player in G.nodes():
print graph_player
# Draw this intermediate graph
print "Number of nodes in graph: " + str(G.number_of_nodes())
if DISPLAY_GRAPH:
draw_graph(G)
# Find out if there is DEFINITELY no hamiltonian cycle
is_there_full_cycle = is_there_definitely_no_hamiltonian_cycle(G)
print "Is there DEFINITELY no full cycle? - %s" % is_there_full_cycle
# Sleep for a few seconds
time.sleep(2)
'''
# Output all cycles that encompass all nodes (valid pairings)
full_cycles = get_full_cycles_from_graph(G)
# Pick any full cycle to draw, or draw nothing if there are no full cycles
full_cycle = get_one_full_cycle(full_cycles)
'''
full_cycle = hamilton(G) #get_one_full_cycle_from_graph(G)
#full_cycle = get_hamiltonian_path_from_graph(G)
# Draw the full cycle if it exists
if full_cycle is not None:
G_with_full_cycle = convert_full_cycle_to_graph(full_cycle)
draw_graph(G_with_full_cycle)
list_of_player_chains.append(full_cycle)
else:
print "There is no full cycle - sorry! This means that the current set of players cannot form a perfect chain given the arrange requirements"
return list_of_player_chains
| frizensami/archangel | arrange.py | Python | mit | 6,656 |
from django import forms
from django.forms.forms import BoundField
from .helpers import LMIForAllClient
from .fields import MultiCharField
class FieldSet(object):
"""
Taken from stackoverflow.com/questions/10366745/django-form-field-grouping
Helper class to group BoundField objects together.
"""
def __init__(self, form, fields, legend='', cls=None):
self.form = form
self.legend = legend
self.fields = fields
self.cls = cls
def __iter__(self):
for name in self.fields:
field = self.form.fields[name]
yield BoundField(self.form, field, name)
class NoColonForm(forms.Form):
"""
Removes the default colons from form labels.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('label_suffix', '')
super().__init__(*args, **kwargs)
class BaseLMIForm(NoColonForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lmi_client = LMIForAllClient()
class SectorForm(NoColonForm):
SECTOR_INPUT_COUNT = 3
sector = MultiCharField(
count=SECTOR_INPUT_COUNT,
label="How would you describe the types of jobs you could do?",
help_text=" eg customer services, security, data entry, driver",
require_all_fields=False,
error_messages={'required': 'Enter at least one job role', },
)
class JobDescriptionsForm(BaseLMIForm):
def __init__(self, *args, **kwargs):
keywords = kwargs['keywords']
del kwargs['keywords']
super().__init__(*args, **kwargs)
self.fieldsets = []
self._add_fields_from_keywords(keywords)
def _add_fields_from_keywords(self, keywords):
for keyword in keywords:
if keyword:
soc_codes = []
lmi_data = self.lmi_client.keyword_search(keyword)
count = 6
for item in lmi_data[:count]:
soc_code = str(item['soc'])
if soc_code not in soc_codes:
soc_codes.append(soc_code)
field = forms.BooleanField(
widget=forms.CheckboxInput,
label=item['title'],
help_text=item['description'],
required=False,
)
self.fields[soc_code] = field
self.fieldsets.append(FieldSet(
self, list(soc_codes), keyword))
def clean(self):
cleaned_data = super().clean()
if not any(cleaned_data.values()):
raise forms.ValidationError(
"Please select at least one job title",
code='invalid'
)
return cleaned_data
| lm-tools/situational | situational/apps/sectors/forms.py | Python | bsd-3-clause | 2,817 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Voile, détaillée plus bas."""
from math import fabs, degrees
from bases.objet.attribut import Attribut
from primaires.vehicule.vecteur import get_direction
from secondaires.navigation.constantes import *
from .base import BaseElement
class Voile(BaseElement):
"""Classe représentant une voile.
"""
nom_type = "voile"
def __init__(self, cle=""):
"""Constructeur d'un type"""
BaseElement.__init__(self, cle)
# Attributs propres aux voiles
self._attributs = {
"orientation": Attribut(lambda: 5),
"hissee": Attribut(lambda: False),
}
@staticmethod
def get_nom_orientation(voile):
"""Retourne le nom de l'orientation de la voile."""
or_voile = voile.orientation
if -ANGLE_GRAND_LARGUE > or_voile:
return "orientée vent arrière sur bâbord amure"
elif or_voile > ANGLE_GRAND_LARGUE:
return "orientée vent arrière sur tribord amure"
elif -ANGLE_LARGUE > or_voile:
return "orientée grand largue sur bâbord amure"
elif or_voile > ANGLE_LARGUE:
return "orientée grand largue sur tribord amure"
elif -ANGLE_BON_PLEIN > or_voile:
return "orientée au largue sur bâbord amure"
elif or_voile > ANGLE_BON_PLEIN:
return "orientée au largue sur tribord amure"
elif -ANGLE_PRES > or_voile:
return "orientée au bon plein sur bâbord amure"
elif or_voile > ANGLE_PRES:
return "orientée au bon plein sur tribord amure"
elif -2 > or_voile:
return "serrée au plus près sur bâbord amure"
elif or_voile > 2:
return "serrée au plus près sur tribord amure"
else:
return "parfaitement parallèle au pont"
def get_description_ligne(self, personnage):
"""Retourne une description d'une ligne de l'élément."""
if self.hissee:
message = self.get_nom_orientation() + "."
else:
message = "repliée contre le mât."
return self.nom.capitalize() + " est " + message
def regarder(self, personnage):
"""personnage regarde self."""
msg = BaseElement.regarder(self, personnage)
or_voile = self.orientation
cote = "tribord"
if or_voile < 0:
cote = "bâbord"
or_voile = -or_voile
or_voile = round(or_voile / 5) * 5
if self.hissee:
msg += "\nCette voile est " + self.get_nom_orientation()
msg += " ({orientation}° {cote}).".format(
orientation=or_voile, cote=cote)
else:
msg += "\nCette voile est repliée contre le mât."
return msg
def facteur_orientation(self, navire, vent):
"""Retourne le facteur d'orientation de la voile."""
allure = (navire.direction.direction - get_direction(vent)) % 360
or_voile = -self.orientation
if ALL_DEBOUT < allure < (360 - ALL_DEBOUT):
angle = ANGLE_DEBOUT
elif ALL_PRES < allure < (360 - ALL_PRES):
angle = ANGLE_PRES
elif ALL_BON_PLEIN < allure < (360 - ALL_BON_PLEIN):
angle = ANGLE_BON_PLEIN
elif ALL_LARGUE < allure < (360 - ALL_LARGUE):
angle = ANGLE_LARGUE
elif ALL_GRAND_LARGUE < allure < (360 - ALL_GRAND_LARGUE):
angle = ANGLE_GRAND_LARGUE
else:
angle = ANGLE_ARRIERE
if allure < 180:
angle = -angle
if angle == 90 and or_voile < 0:
angle = -90
facteur = 1 - fabs((angle - or_voile) / 20)
if facteur < 0:
facteur = 0
return facteur
def orienter(self, navire, vent):
"""Oriente la voile (meilleur angle de propulsion)."""
vent_direction = get_direction(vent)
allure = (navire.direction.direction - vent_direction) % 360
or_voile = -self.orientation
if ALL_DEBOUT < allure < (360 - ALL_DEBOUT):
angle = ANGLE_DEBOUT
elif ALL_PRES < allure < (360 - ALL_PRES):
angle = ANGLE_PRES
elif ALL_BON_PLEIN < allure < (360 - ALL_BON_PLEIN):
angle = ANGLE_BON_PLEIN
elif ALL_LARGUE < allure < (360 - ALL_LARGUE):
angle = ANGLE_LARGUE
elif ALL_GRAND_LARGUE < allure < (360 - ALL_GRAND_LARGUE):
angle = ANGLE_GRAND_LARGUE
else:
angle = ANGLE_ARRIERE
if allure < 180:
angle = -angle
if angle == 90 and or_voile < 0:
angle = -90
# On oriente la voile
if angle < 0 and self.orientation >= 0 or \
angle > 0 and self.orientation <= 0:
self.orientation -= self.orientation
if -5 < angle < 5:
angle = 5 if angle >= 0 else -5
self.orientation = -angle
def pre_hisser(self, personnage):
"""Demande au personnage de pré-hisser la voile.
Cette méthode doit être appelée avant post_hisser. Il y a
généralement un temps (retourné par cette méthode) entre les
deux.
"""
salle = personnage.salle
personnage << "Vous commencez de hisser la voile, aux prises " \
"avec les cordages."
personnage.etats.ajouter("hisser_voile")
salle.envoyer("{} commence à hisser la voile, aux prises " \
"avec les cordages", personnage)
return 7
def post_hisser(self, personnage):
"""Post-hisse la voile."""
salle = personnage.salle
if "hisser_voile" not in personnage.etats:
return
personnage.etats.retirer("hisser_voile")
self.hissee = True
personnage << "Vous hissez {}.".format(self.nom.lower())
salle.envoyer("{{}} hisse {}.".format(self.nom.lower()),
personnage)
def pre_plier(self, personnage):
"""Commence à plier une voile."""
salle = personnage.salle
personnage << "Vous commencez de replier la voile."
personnage.etats.ajouter("plier_voile")
salle.envoyer("{} commence de replier la voile.", personnage)
return 7
def post_plier(self, personnage):
"""Post-plie la voile."""
salle = personnage.salle
if "plier_voile" not in personnage.etats:
return
personnage.etats.retirer("plier_voile")
self.hissee = False
personnage << "Vous pliez {}.".format(self.nom.lower())
salle.envoyer("{{}} plie {}.".format(self.nom.lower()),
personnage)
| stormi/tsunami | src/secondaires/navigation/elements/voile.py | Python | bsd-3-clause | 8,224 |
import os
import time
from ethereum import utils
from ethereum import pruning_trie as trie
from ethereum.refcount_db import RefcountDB
from ethereum.db import OverlayDB
from ethereum.utils import to_string, is_string
import rlp
from rlp.utils import encode_hex
from ethereum import blocks
from ethereum import processblock
from ethereum.slogging import get_logger
from ethereum.config import Env
import sys
log = get_logger('eth.chain')
class Index(object):
""""
Collection of indexes
children:
- needed to get the uncles of a block
blocknumbers:
- needed to mark the longest chain (path to top)
transactions:
- optional to resolve txhash to block:tx
"""
def __init__(self, env, index_transactions=True):
assert isinstance(env, Env)
self.env = env
self.db = env.db
self._index_transactions = index_transactions
def add_block(self, blk):
self.add_child(blk.prevhash, blk.hash)
if self._index_transactions:
self._add_transactions(blk)
# block by number #########
def _block_by_number_key(self, number):
return 'blocknumber:%d' % number
def update_blocknumbers(self, blk):
"start from head and update until the existing indices match the block"
while True:
if blk.number > 0:
self.db.put_temporarily(self._block_by_number_key(blk.number), blk.hash)
else:
self.db.put(self._block_by_number_key(blk.number), blk.hash)
self.db.commit_refcount_changes(blk.number)
if blk.number == 0:
break
blk = blk.get_parent()
if self.has_block_by_number(blk.number) and \
self.get_block_by_number(blk.number) == blk.hash:
break
def has_block_by_number(self, number):
return self._block_by_number_key(number) in self.db
def get_block_by_number(self, number):
"returns block hash"
return self.db.get(self._block_by_number_key(number))
# transactions #############
def _add_transactions(self, blk):
"'tx_hash' -> 'rlp([blockhash,tx_number])"
for i, tx in enumerate(blk.get_transactions()):
self.db.put_temporarily(tx.hash, rlp.encode([blk.hash, i]))
self.db.commit_refcount_changes(blk.number)
def get_transaction(self, txhash):
"return (tx, block, index)"
blockhash, tx_num_enc = rlp.decode(self.db.get(txhash))
blk = rlp.decode(self.db.get(blockhash), blocks.Block, env=self.env)
num = utils.decode_int(tx_num_enc)
tx_data = blk.get_transaction(num)
return tx_data, blk, num
# children ##############
def _child_db_key(self, blk_hash):
return b'ci:' + blk_hash
def add_child(self, parent_hash, child_hash):
# only efficient for few children per block
children = list(set(self.get_children(parent_hash) + [child_hash]))
assert children.count(child_hash) == 1
self.db.put_temporarily(self._child_db_key(parent_hash), rlp.encode(children))
def get_children(self, blk_hash):
"returns block hashes"
key = self._child_db_key(blk_hash)
if key in self.db:
return rlp.decode(self.db.get(key))
return []
class Chain(object):
"""
Manages the chain and requests to it.
:ivar head_candidate: the block which if mined by our miner would become
the new head
"""
head_candidate = None
def __init__(self, env, genesis=None, new_head_cb=None, coinbase='\x00' * 20):
assert isinstance(env, Env)
self.env = env
self.db = self.blockchain = env.db
self.new_head_cb = new_head_cb
self.index = Index(self.env)
self._coinbase = coinbase
if 'HEAD' not in self.db:
self._initialize_blockchain(genesis)
log.debug('chain @', head_hash=self.head)
self.genesis = self.get(self.index.get_block_by_number(0))
log.debug('got genesis', nonce=self.genesis.nonce.encode('hex'),
difficulty=self.genesis.difficulty)
self._update_head_candidate()
def _initialize_blockchain(self, genesis=None):
log.info('Initializing new chain')
if not genesis:
genesis = blocks.genesis(self.env)
log.info('new genesis', genesis_hash=genesis, difficulty=genesis.difficulty)
self.index.add_block(genesis)
self._store_block(genesis)
assert genesis == blocks.get_block(self.env, genesis.hash)
self._update_head(genesis)
assert genesis.hash in self
self.commit()
@property
def coinbase(self):
assert self.head_candidate.coinbase == self._coinbase
return self._coinbase
@coinbase.setter
def coinbase(self, value):
self._coinbase = value
# block reward goes to different address => redo finalization of head candidate
self._update_head(self.head)
@property
def head(self):
if self.blockchain is None or 'HEAD' not in self.blockchain:
self._initialize_blockchain()
ptr = self.blockchain.get('HEAD')
return blocks.get_block(self.env, ptr)
def _update_head(self, block, forward_pending_transactions=True):
log.debug('updating head')
if not block.is_genesis():
#assert self.head.chain_difficulty() < block.chain_difficulty()
if block.get_parent() != self.head:
log.debug('New Head is on a different branch',
head_hash=block, old_head_hash=self.head)
# Some temporary auditing to make sure pruning is working well
if block.number > 0 and block.number % 500 == 0 and isinstance(self.db, RefcountDB):
trie.proof.push(trie.RECORDING)
block.to_dict(with_state=True)
n = trie.proof.get_nodelist()
trie.proof.pop()
sys.stderr.write('State size: %d\n' % sum([(len(rlp.encode(a)) + 32) for a in n]))
# Fork detected, revert death row and change logs
if block.number > 0:
b = block.get_parent()
h = self.head
b_children = []
if b.hash != h.hash:
log.warn('reverting')
while h.number > b.number:
h.state.db.revert_refcount_changes(h.number)
h = h.get_parent()
while b.number > h.number:
b_children.append(b)
b = b.get_parent()
while b.hash != h.hash:
h.state.db.revert_refcount_changes(h.number)
h = h.get_parent()
b_children.append(b)
b = b.get_parent()
for bc in b_children:
processblock.verify(bc, bc.get_parent())
self.blockchain.put('HEAD', block.hash)
assert self.blockchain.get('HEAD') == block.hash
sys.stderr.write('New head: %s %d\n' % (utils.encode_hex(block.hash), block.number))
self.index.update_blocknumbers(self.head)
self._update_head_candidate(forward_pending_transactions)
if self.new_head_cb and not block.is_genesis():
self.new_head_cb(block)
def _update_head_candidate(self, forward_pending_transactions=True):
"after new head is set"
log.debug('updating head candidate')
# collect uncles
blk = self.head # parent of the block we are collecting uncles for
uncles = set(u.header for u in self.get_brothers(blk))
for i in range(self.env.config['MAX_UNCLE_DEPTH'] + 2):
for u in blk.uncles:
assert isinstance(u, blocks.BlockHeader)
uncles.discard(u)
if blk.has_parent():
blk = blk.get_parent()
assert not uncles or max(u.number for u in uncles) <= self.head.number
uncles = list(uncles)[:self.env.config['MAX_UNCLES']]
# create block
ts = max(int(time.time()), self.head.timestamp + 1)
_env = Env(OverlayDB(self.head.db), self.env.config, self.env.global_config)
head_candidate = blocks.Block.init_from_parent(self.head, coinbase=self._coinbase,
timestamp=ts, uncles=uncles, env=_env)
assert head_candidate.validate_uncles()
self.pre_finalize_state_root = head_candidate.state_root
head_candidate.finalize()
# add transactions from previous head candidate
old_head_candidate = self.head_candidate
self.head_candidate = head_candidate
if old_head_candidate is not None and forward_pending_transactions:
log.debug('forwarding pending transactions')
for tx in old_head_candidate.get_transactions():
self.add_transaction(tx)
else:
log.debug('discarding pending transactions')
def get_uncles(self, block):
"""Return the uncles of `block`."""
if not block.has_parent():
return []
else:
return self.get_brothers(block.get_parent())
def get_brothers(self, block):
"""Return the uncles of the hypothetical child of `block`."""
o = []
i = 0
while block.has_parent() and i < self.env.config['MAX_UNCLE_DEPTH']:
parent = block.get_parent()
o.extend([u for u in self.get_children(parent) if u != block])
block = block.get_parent()
i += 1
return o
def get(self, blockhash):
assert is_string(blockhash)
assert len(blockhash) == 32
return blocks.get_block(self.env, blockhash)
def has_block(self, blockhash):
assert is_string(blockhash)
assert len(blockhash) == 32
return blockhash in self.blockchain
def __contains__(self, blockhash):
return self.has_block(blockhash)
def _store_block(self, block):
if block.number > 0:
self.blockchain.put_temporarily(block.hash, rlp.encode(block))
else:
self.blockchain.put(block.hash, rlp.encode(block))
def commit(self):
self.blockchain.commit()
def add_block(self, block, forward_pending_transactions=True):
"returns True if block was added sucessfully"
_log = log.bind(block_hash=block)
# make sure we know the parent
if not block.has_parent() and not block.is_genesis():
_log.debug('missing parent')
return False
if not block.validate_uncles():
_log.debug('invalid uncles')
return False
if not len(block.nonce) == 8:
_log.debug('nonce not set')
return False
elif not block.header.check_pow(nonce=block.nonce) and\
not block.is_genesis():
_log.debug('invalid nonce')
return False
if block.has_parent():
try:
processblock.verify(block, block.get_parent())
except processblock.VerificationFailed as e:
_log.critical('VERIFICATION FAILED', error=e)
f = os.path.join(utils.data_dir, 'badblock.log')
open(f, 'w').write(to_string(block.hex_serialize()))
return False
if block.number < self.head.number:
_log.debug("older than head", head_hash=self.head)
# Q: Should we have any limitations on adding blocks?
self.index.add_block(block)
self._store_block(block)
# set to head if this makes the longest chain w/ most work for that number
if block.chain_difficulty() > self.head.chain_difficulty():
_log.debug('new head')
self._update_head(block, forward_pending_transactions)
elif block.number > self.head.number:
_log.warn('has higher blk number than head but lower chain_difficulty',
head_hash=self.head, block_difficulty=block.chain_difficulty(),
head_difficulty=self.head.chain_difficulty())
block.transactions.clear_all()
block.receipts.clear_all()
block.state.db.commit_refcount_changes(block.number)
block.state.db.cleanup(block.number)
self.commit() # batch commits all changes that came with the new block
return True
def get_children(self, block):
return [self.get(c) for c in self.index.get_children(block.hash)]
def add_transaction(self, transaction):
"""Add a transaction to the :attr:`head_candidate` block.
If the transaction is invalid, the block will not be changed.
:returns: `True` is the transaction was successfully added or `False`
if the transaction was invalid
"""
assert self.head_candidate is not None
head_candidate = self.head_candidate
log.debug('add tx', num_txs=self.num_transactions(), tx=transaction, on=head_candidate)
if self.head_candidate.includes_transaction(transaction.hash):
log.debug('known tx')
return
old_state_root = head_candidate.state_root
# revert finalization
head_candidate.state_root = self.pre_finalize_state_root
try:
success, output = processblock.apply_transaction(head_candidate, transaction)
except processblock.InvalidTransaction as e:
# if unsuccessful the prerequisites were not fullfilled
# and the tx is invalid, state must not have changed
log.debug('invalid tx', error=e)
head_candidate.state_root = old_state_root # reset
return False
log.debug('valid tx')
# we might have a new head_candidate (due to ctx switches in pyethapp)
if self.head_candidate != head_candidate:
log.debug('head_candidate changed during validation, trying again')
self.add_transaction(transaction)
return
self.pre_finalize_state_root = head_candidate.state_root
head_candidate.finalize()
log.debug('tx applied', result=output)
assert old_state_root != head_candidate.state_root
return True
def get_transactions(self):
"""Get a list of new transactions not yet included in a mined block
but known to the chain.
"""
if self.head_candidate:
log.debug('get_transactions called', on=self.head_candidate)
return self.head_candidate.get_transactions()
else:
return []
def num_transactions(self):
if self.head_candidate:
return self.head_candidate.transaction_count
else:
return 0
def get_chain(self, start='', count=10):
"return 'count' blocks starting from head or start"
log.debug("get_chain", start=encode_hex(start), count=count)
blocks = []
block = self.head
if start:
if start not in self.index.db:
return []
block = self.get(start)
if not self.in_main_branch(block):
return []
for i in range(count):
blocks.append(block)
if block.is_genesis():
break
block = block.get_parent()
return blocks
def in_main_branch(self, block):
try:
return block.hash == self.index.get_block_by_number(block.number)
except KeyError:
return False
def get_descendants(self, block, count=1):
log.debug("get_descendants", block_hash=block)
assert block.hash in self
block_numbers = list(range(block.number + 1, min(self.head.number + 1,
block.number + count + 1)))
return [self.get(self.index.get_block_by_number(n)) for n in block_numbers]
| vaporry/pyethereum | ethereum/chain.py | Python | mit | 15,969 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.