repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
renegelinas/mi-instrument
|
mi/dataset/parser/test/test_ctdav_nbosi_auv.py
|
Python
|
bsd-2-clause
| 1,901
| 0.002104
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@fid mi-instrument/mi/dataset/parser/test/test_ctdav_nbosi_auv.py
@author Rene Gelinas
@brief Test code for a ctdav_nbosi_auv data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.driver.ctdav_nbosi.auv.resource import RESOURCE_PATH
from mi.dataset.parser.ctdav_nbosi_auv import CtdavNbosiAuvParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class CtdavNbosiAuvTestCase(ParserUnitTestCase):
"""
ctdav_nbosi_auv Parser unit test suite
"""
def test_simple(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
Expect the first two input records to be skipped due to invalid timestamp.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'CP05MOAS-A6264_AUVsubset_reduced.csv'), 'rU')
parser = CtdavNbosiAuvParser(stream_handle,
self.exception_callback)
particles = parser.get_records(20)
self.assert_particles(particles, 'ctdav_nbosi_auv.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
def test_long_stream(self):
"""
Read test data and pull out data particles.
|
Assert the expected number of particles
|
is captured and there are no exceptions
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'CP05MOAS-A6264_AUVsubset.csv'), 'rU')
parser = CtdavNbosiAuvParser(stream_handle,
self.exception_callback)
particles = parser.get_records(10000)
self.assertEqual(len(particles), 10000)
self.assertEqual(self.exception_callback_value, [])
stream_handle.close()
|
uni2u/neutron
|
neutron/plugins/bigswitch/plugin.py
|
Python
|
apache-2.0
| 39,750
| 0.000126
|
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
from neutron.plugins.common import constants as pconst
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin):
def get_port_from_device(self, device):
port_id = re.sub(r"^%s" % const.TAP_DEVICE_PREFIX, "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
@property
def l3_plugin(self):
return manager.NeutronManager.get_service_plugins().get(
|
pconst.L3_ROUTER_NAT)
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_network
|
s = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers and self.l3_plugin:
routers = []
all_routers = self.l3
|
schriftgestalt/drawbot
|
tests/testExport.py
|
Python
|
bsd-2-clause
| 12,252
| 0.002285
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import sys
import os
import unittest
import glob
import drawBot
import random
import AppKit
from drawBot.context.tools.gifTools import gifFrameCount
from drawBot.misc import DrawBotError
from testSupport import StdOutCollector, TempFile, TempFolder, randomSeed
class ExportTest(unittest.TestCase):
def makeTestAnimation(self, numFrames=25):
randomSeed(0)
drawBot.newDrawing()
for i in range(numFrames):
drawBot.newPage(500, 500)
drawBot.frameDuration(1/25)
drawBot.fill(1)
drawBot.rect(0, 0, 500, 500)
drawBot.fill(0)
drawBot.rect(random.randint(0, 100), random.randint(0, 100), 400, 400)
def makeTestDrawing(self):
drawBot.newDrawing()
drawBot.newPage(500, 500)
drawBot.oval(100, 100, 300, 300)
def _saveImageAndReturnSize(self, extension, **options):
with TempFile(suffix=extension) as tmp:
drawBot.saveImage(tmp.path, **options)
fileSize = os.stat(tmp.path).st_size
return fileSize
def test_ffmpegCodec(self):
self.makeTestAnimation()
size_h264 = self._saveImageAndReturnSize(".mp4")
size_mpeg4 = self._saveImageAndReturnSize(".mp4", ffmpegCodec="mpeg4")
self.assertLess(size_h264, size_mpeg4, "encoded with h264 is expected to be smaller than with mpeg4")
def test_arbitraryOption(self):
self.makeTestAnimation(1)
with StdOutCollector(captureStdErr=True) as output:
self._saveImageAndReturnSize(".png", someArbitraryOption="foo")
self.assertEqual(output, ['*** DrawBot warning: Unrecognized saveImage() option found for PNGContext: someArbitraryOption ***'])
def test_export_mov(self):
self.makeTestAnimation(5)
self._saveImageAndReturnSize(".mov")
def test_export_gif(self):
self.makeTestAnimation(5)
self._saveImageAndReturnSize(".gif")
def test_export_png(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".png")
def test_export_jpg(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".jpg")
def test_export_jpeg(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".jpeg")
def test_export_tif(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".tif")
def test_export_tiff(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".tiff")
def test_export_bmp(self):
self.makeTestDrawing()
self._saveImageAndReturnSize(".bmp")
def test_imageResolution(self):
self.makeTestDrawing()
with TempFile(suffix=".png") as tmp:
drawBot.saveImage(tmp.path)
self.assertEqual(drawBot.imageSize(tmp.path), (500, 500))
drawBot.saveImage(tmp.path, imageResolution=144)
self.assertEqual(drawBot.imageSize(tmp.path), (1000, 1000))
drawBot.saveImage(tmp.path, imageResolution=36)
self.assertEqual(drawBot.imageSize(tmp.path), (250, 250))
drawBot.saveImage(tmp.path, imageResolution=18)
self.assertEqual(drawBot.imageSize(tmp.path), (125, 125))
def test_imagePNGInterlaced(self):
self.makeTestDrawing()
defaultSize = self._saveImageAndReturnSize(".png")
interlacedSize = self._saveImageAndReturnSize(".png", imagePNGInterlaced=True)
# XXX Huh, seems to make no difference, output files are identical
self.assertEqual(defaultSize, interlacedSize)
def test_imagePNGGamma(self):
self.makeTestDrawing()
defaultSize = self._saveImageAndReturnSize(".png")
gammaSize = self._saveImageAndReturnSize(".png", imagePNGGamma=0.8)
self.assertLess(defaultSize, gammaSiz
|
e)
def test_imageJPEGProgressive(self):
self.makeTestDrawing()
defaultSize = self._saveImageAndReturnSize(".jpg")
progressiveSize = self._saveImageAndReturnSize(".jpg", imageJPEGProgressive=True)
self.assertGreater(defaultSize, progressiveSize)
def test_imageJP
|
EGCompressionFactor(self):
self.makeTestDrawing()
lowCompressionSize = self._saveImageAndReturnSize(".jpg", imageJPEGCompressionFactor=1.0)
mediumCompressionSize = self._saveImageAndReturnSize(".jpg", imageJPEGCompressionFactor=0.5)
highCompressionSize = self._saveImageAndReturnSize(".jpg", imageJPEGCompressionFactor=0.0)
self.assertGreater(lowCompressionSize, mediumCompressionSize)
self.assertGreater(mediumCompressionSize, highCompressionSize)
def test_imageTIFFCompressionMethod(self):
self.makeTestDrawing()
defaultCompressionSize = self._saveImageAndReturnSize(".tif")
noCompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod=None)
packbitsCompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod="packbits")
packbits2CompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod=32773)
packbits3CompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod="PACKBITS")
lzwCompressionSize = self._saveImageAndReturnSize(".tif", imageTIFFCompressionMethod="lzw")
self.assertEqual(defaultCompressionSize, noCompressionSize)
self.assertEqual(packbitsCompressionSize, packbits2CompressionSize)
self.assertEqual(packbitsCompressionSize, packbits3CompressionSize)
self.assertGreater(noCompressionSize, packbitsCompressionSize)
self.assertGreater(packbitsCompressionSize, lzwCompressionSize)
def test_imageFallbackBackgroundColor(self):
self.makeTestDrawing()
with TempFile(suffix=".jpg") as tmp:
drawBot.saveImage(tmp.path, imageJPEGCompressionFactor=1.0)
self.assertEqual(drawBot.imagePixelColor(tmp.path, (5, 5)), (1.0, 1.0, 1.0, 1.0))
with TempFile(suffix=".jpg") as tmp:
drawBot.saveImage(tmp.path, imageJPEGCompressionFactor=1.0, imageFallbackBackgroundColor=(0, 1, 0))
r, g, b, a = drawBot.imagePixelColor(tmp.path, (5, 5))
self.assertEqual((round(r, 2), round(g, 2), round(b, 2)), (0, 0.97, 0)) # XXX 0.97 vs 1.0 "calibrated" vs "device"
with TempFile(suffix=".jpg") as tmp:
drawBot.saveImage(tmp.path, imageJPEGCompressionFactor=1.0, imageFallbackBackgroundColor=AppKit.NSColor.redColor())
r, g, b, a = drawBot.imagePixelColor(tmp.path, (5, 5))
self.assertEqual((round(r, 2), round(g, 2), round(b, 2)), (1, 0.15, 0))
def _testMultipage(self, extension, numFrames, expectedMultipageCount):
self.makeTestAnimation(numFrames)
with TempFolder() as tmpFolder:
with TempFile(suffix=extension, dir=tmpFolder.path) as tmp:
base, ext = os.path.splitext(tmp.path)
pattern = base + "_*" + ext
self.assertEqual(len(glob.glob(pattern)), 0)
drawBot.saveImage(tmp.path)
self.assertEqual(len(glob.glob(pattern)), 0)
drawBot.saveImage(tmp.path, multipage=False)
self.assertEqual(len(glob.glob(pattern)), 0)
drawBot.saveImage(tmp.path, multipage=True)
self.assertEqual(len(glob.glob(pattern)), expectedMultipageCount)
assert not os.path.exists(tmpFolder.path) # verify TempFolder cleanup
def test_multipage_png(self):
self._testMultipage(".png", numFrames=5, expectedMultipageCount=5)
def test_multipage_jpg(self):
self._testMultipage(".jpg", numFrames=6, expectedMultipageCount=6)
def test_multipage_svg(self):
self._testMultipage(".svg", numFrames=7, expectedMultipageCount=7)
def test_multipage_gif(self):
self._testMultipage(".gif", numFrames=8, expectedMultipageCount=0)
def test_multipage_pdf(self):
self._testMultipage(".pdf", numFrames=9, expectedMultipageCount=0)
def test_animatedGIF(self):
self.makeTestAnimation(5)
with TempFi
|
vicky2135/lucious
|
src/oscar/apps/catalogue/abstract_models.py
|
Python
|
bsd-3-clause
| 41,477
| 0.000072
|
import logging
import os
from datetime import date, datetime
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.finders import find
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files.base import File
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import Count, Sum
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language, pgettext_lazy
from treebeard.mp_tree import MP_Node
from oscar.core.compat import user_is_anonymous, user_is_authenticated
from oscar.core.loading import get_class, get_classes, get_model
from oscar.core.utils import slugify
from oscar.core.validators import non_python_keyword
from oscar.models.fields import AutoSlugField, NullCharField
from oscar.models.fields.slugfield import SlugField
ProductManager, BrowsableProductManager = get_classes(
'catalogue.managers', ['ProductManager', 'BrowsableProductManager'])
ProductAttributesContainer = get_class(
'catalogue.product_attributes', 'ProductAttributesContainer')
Selector = get_class('partner.strategy', 'Selector')
@python_2_unicode_compatible
class AbstractProductClass(models.Model):
"""
Used for defining options and attributes for a subset of products.
E.g. Books, DVDs and Toys. A product can only belong to one product class.
At least one product class must be created when setting up a new
Oscar deployment.
Not necessarily equivalent to top-level categories but usually will be.
"""
name = models.CharField(_('Name'), max_length=128)
slug = AutoSlugField(_('Slug'), max_length=128, unique=True,
populate_from='name')
#: Some product type don't require shipping (eg digital products) - we use
#: this field to take some shortcuts in the checkout.
requires_shipping = models.BooleanField(_("Requires shipping?"),
default=True)
#: Digital products generally don't require their stock levels to be
#: tracked.
track_stock = models.BooleanField(_("Track stock levels?"), default=True)
#: These are the options (set by the user when they add to basket) for this
#: item class. For instance, a product class of "SMS message" would always
#: require a message to be specified before it could be bought.
#: Note that you can also set options on a per-product level.
options = models.ManyToManyField(
'catalogue.Option', blank=True, verbose_name=_("Options"))
class Meta:
abstract = True
app_label = 'catalogue'
ordering = ['name']
verbose_name = _("Product class")
verbose_name_plural = _("Product classes")
def __str__(self):
return self.name
@property
def has_attributes(self):
return self.attributes.exists()
@python_2_unicode_compatible
class AbstractCategory(MP_Node):
"""
A product category. Merely used for navigational purposes; has no
effects on business logic.
Uses django-treebeard.
"""
name = models.CharField(_('Name'), max_length=255, db_index=True)
description = models.TextField(_('Description'), blank=True)
image = models.ImageField(_('Image'), upload_to='categories', blank=True,
null=True, max_length=255)
slug = SlugField(_('Slug'), max_length=255, db_index=True)
_slug_separator = '/'
_full_name_separator = ' > '
def __str__(self):
return self.full_name
@property
def full_name(self):
"""
Returns a string representation of the category and it's ancestors,
e.g. 'Books > Non-fiction > Essential programming'.
It's rarely used in Oscar's codebase, but used to be stored as a
CharField and is hence kept for backwards compatibility. It's also
sufficiently useful to keep around.
"""
names = [category.name for category in self.get_ancestors_and_self()]
return self._full_name_separator.join(names)
@property
def full_slug(self):
"""
Returns a string of this category's slug concatenated with the slugs
of it's ancestors, e.g. 'books/non-fiction/essential-programming'.
Oscar used to store this as in the 'slug' model field, but this field
has been re-purposed to only store this category's slug and to not
include it's ancestors' slugs.
"""
slugs = [category.slug for category in self.get_ancestors_and_self()]
return self._slug_separator.join(slugs)
|
def generate_slug(self):
"""
Generates a slug for a category. This makes no attempt at generating
a unique slug.
"""
return slugify(self.name)
def ensure_slug_uniqueness(self):
"""
Ensures that the category's slug is unique amongst it's siblings.
This is inefficient and probably not thread-safe.
"""
unique_slug = self.slug
siblings = self.get_s
|
iblings().exclude(pk=self.pk)
next_num = 2
while siblings.filter(slug=unique_slug).exists():
unique_slug = '{slug}_{end}'.format(slug=self.slug, end=next_num)
next_num += 1
if unique_slug != self.slug:
self.slug = unique_slug
self.save()
def save(self, *args, **kwargs):
"""
Oscar traditionally auto-generated slugs from names. As that is
often convenient, we still do so if a slug is not supplied through
other means. If you want to control slug creation, just create
instances with a slug already set, or expose a field on the
appropriate forms.
"""
if self.slug:
# Slug was supplied. Hands off!
super(AbstractCategory, self).save(*args, **kwargs)
else:
self.slug = self.generate_slug()
super(AbstractCategory, self).save(*args, **kwargs)
# We auto-generated a slug, so we need to make sure that it's
# unique. As we need to be able to inspect the category's siblings
# for that, we need to wait until the instance is saved. We
# update the slug and save again if necessary.
self.ensure_slug_uniqueness()
def get_ancestors_and_self(self):
"""
Gets ancestors and includes itself. Use treebeard's get_ancestors
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_ancestors()) + [self]
def get_descendants_and_self(self):
"""
Gets descendants and includes itself. Use treebeard's get_descendants
if you don't want to include the category itself. It's a separate
function as it's commonly used in templates.
"""
return list(self.get_descendants()) + [self]
def get_absolute_url(self):
"""
Our URL scheme means we have to look up the category's ancestors. As
that is a bit more expensive, we cache the generated URL. That is
safe even for a stale cache, as the default implementation of
ProductCategoryView does the lookup via primary key anyway. But if
you change that logic, you'll have to reconsider the caching
approach.
"""
current_locale = get_language()
cache_key = 'CATEGORY_URL_%s_%s' % (current_locale, self.pk)
url = cache.get(cache_key)
if not url:
url = reverse(
'catalogue:category',
kwargs={'category_slug': self.full_slug, 'pk': self.pk})
cache.set(cache_key, url)
|
martinb07/mysmarthome
|
plugins/sonos/__init__.py
|
Python
|
gpl-3.0
| 30,168
| 0.001392
|
#!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
# ########################################################################
# Copyright 2013 KNX-User-Forum e.V. http://knx-user-forum.de/
#########################################################################
# This file is part of SmartHome.py. http://mknx.github.io/smarthome/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
#########################################################################
import http
import logging
import lib.connection
import lib.tools
import os
import re
import socket
import threading
import json
import urllib
from urllib.parse import urlparse
import fcntl
import struct
import requests
logger = logging.getLogger('')
sonos_speaker = {}
class UDPDispatcher(lib.connection.Server):
def __init__(self, ip, port):
lib.connection.Server.__init__(self, ip, port, proto='UDP')
self.dest = 'udp:' + ip + ':{port}'.format(port=port)
logger.debug('starting udp listener with {url}'.format(url=self.dest))
self.connect()
def handle_connection(self):
try:
data, address = self.socket.recvfrom(10000)
address = "{}:{}".format(address[0], address[1])
logger.debug("{}: incoming connection from {}".format('sonos', address))
except Exception as err:
logger.error("{}: {}".format(self._name, err))
return
try:
sonos = json.loads(data.decode('utf-8').strip())
uid = sonos['uid']
if not uid:
logger.error("No uid found in sonos udp response!\nResponse: {}")
if uid not in sonos_speaker:
logger.warning("no sonos speaker configured with uid '{uid}".format(uid=uid))
return
for key, value in sonos.items():
instance_var = getattr(sonos_speaker[uid], key)
if isinstance(instance_var, list):
for item in instance_var:
item(value, 'Sonos', '')
except Exception as err:
logger.error("Error parsing sonos broker response!\nError: {}".format(err))
class Sonos():
def __init__(self, smarthome, listen_host='0.0.0.0', listen_port=9999, broker_url=None, refresh=120):
self._sonoslock = threading.Lock()
self._lan_ip = get_lan_ip()
if not self._lan_ip:
logger.critical("Could not fetch internal ip address. Set it manually!")
self.alive = False
return
logger.info("using local ip address {ip}".format(ip=self._lan_ip))
# check broker variable
if broker_url:
self._broker_url = broker_url
else:
self._broker_url = "http://{ip}:12900".format(ip=self._l
|
an_ip)
if self._broker_url:
logger.warning("No broker url given, assuming current ip and default broker port: {url}".
format(url=self._broker_url))
|
else:
logger.error("Could not detect broker url !!!")
return
# normalize broker url
if not self._broker_url.startswith('http://'):
self._broker_url = "http://{url}".format(url=self._broker_url)
# ini vars
self._listen_host = listen_host
self._listen_port = listen_port
self._sh = smarthome
self._command = SonosCommand()
logger.debug('refresh sonos speakers every {refresh} seconds'.format(refresh=refresh))
# add current_state command to scheduler
self._sh.scheduler.add('sonos-update', self._subscribe, cycle=refresh)
# start UDP listener
UDPDispatcher(self._listen_host, self._listen_port)
def run(self):
self.alive = True
def _subscribe(self):
"""
Subscribe the plugin to the Sonos Broker
"""
logger.debug('(re)registering to sonos broker server ...')
self._send_cmd(SonosCommand.subscribe(self._lan_ip, self._listen_port))
for uid, speaker in sonos_speaker.items():
self._send_cmd(SonosCommand.current_state(uid))
def _unsubscribe(self):
"""
Unsubscribe the plugin from the Sonos Broker
"""
logger.debug('unsubscribing from sonos broker server ...')
self._send_cmd(SonosCommand.unsubscribe(self._lan_ip, self._listen_port))
def stop(self):
"""
Will be executed, if Smarthome.py receives a terminate signal
"""
# try to unsubscribe the plugin from the Sonos Broker
self._unsubscribe()
self.alive = False
def _resolve_uid(self, item):
uid = None
parent_item = item.return_parent()
if (parent_item is not None) and ('sonos_uid' in parent_item.conf):
uid = parent_item.conf['sonos_uid'].lower()
else:
logger.warning("sonos: could not resolve sonos_uid".format(item))
return uid
def parse_item(self, item):
if 'sonos_recv' in item.conf:
uid = self._resolve_uid(item)
if uid is None:
return None
attr = item.conf['sonos_recv']
logger.debug("sonos: {} receives updates by {}".format(item, attr))
if not uid in sonos_speaker:
sonos_speaker[uid] = SonosSpeaker()
attr_list = getattr(sonos_speaker[uid], attr)
if not item in attr_list:
attr_list.append(item)
if 'sonos_send' in item.conf:
try:
self._sonoslock.acquire()
uid = self._resolve_uid(item)
if uid is None:
return None
attr = item.conf['sonos_send']
logger.debug("sonos: {} is send to {}".format(item, attr))
return self._update_item
finally:
self._sonoslock.release()
return None
def parse_logic(self, logic):
pass
def _update_item(self, item, caller=None, source=None, dest=None):
if caller != 'Sonos':
value = item()
if 'sonos_send' in item.conf:
uid = self._resolve_uid(item)
if not uid:
return None
command = item.conf['sonos_send']
cmd = ''
if command == 'mute':
if isinstance(value, bool):
group_item_name = '{}.group_command'.format(item._name)
group_command = 0
for child in item.return_children():
if child._name.lower() == group_item_name.lower():
group_command = child()
break
cmd = self._command.mute(uid, value, group_command)
if command == 'led':
if isinstance(value, bool):
group_item_name = '{}.group_command'.format(item._name)
group_command = 0
for child in item.return_children():
if child._name.lower() == group_item_name.lower():
group_command = child()
break
cmd = self._command.led(uid, value, group_command)
if command == 'play':
if isinstance(value, bool):
cmd = self._command.play(uid, value)
if
|
linuxsoftware/dominoes
|
davezdominoes/gamecoordinator/security.py
|
Python
|
agpl-3.0
| 5,040
| 0.002183
|
# ------------------------------------------------------------------------------
# Security Central
# ------------------------------------------------------------------------------
from .models import User
from pyramid.security import Allow, Everyone, Authenticated, ALL_PERMISSIONS
from pyramid.authentication import SessionAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from .utils.gauth import getSecret, verifyOneTimePassword
import logging
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
def includeme(config):
"""Set up the authentication and authorization policies"""
authnPolicy = SessionAuthenticationPolicy(callback=getGroups)
authzPolicy = ACLAuthorizationPolicy()
config.set_authentication_policy(authnPolicy)
config.set_authorization_policy(authzPolicy)
config.set_root_factory(Root)
# Custom predicates
config.add_view_predicate("userNeedsVerification",
UserNeedsVerificationPredicate)
log.info("security set up")
# ------------------------------------------------------------------------------
# Authentication
# ------------------------------------------------------------------------------
def getGroups(name, request):
user = request.user
if user is None:
log.info("getGroups called for non-existant user %s" % name)
return None
if user.usesGauth and not user.gauthVerified:
log.debug("getGroups called for non-verified user %s" % name)
return None
return getGroupsForUser(user)
def getGroupsForUser(user):
groups = []
return groups
class Authentication:
TO_VERIFY, OK, FAILED, LOCKED_OUT = range(4)
def checkAuthentication(name, givenPass):
"""Check the given login and password matches an active user"""
result = Authentication.FAILED
name = name.replace(':', ';')
user = User.getByLogin(name)
if user:
if user.failed_logins < 99:
if givenPass and user.verifyPassword(givenPass):
log.info("User %s password OK" % name)
if user.usesGauth:
user.gauthVerified = False
result = Authentication.TO_VERIFY
else:
result = Authentication.OK
user.failed_logins = 0
else:
log.info("User %s authentication FAILED" % name)
user.failed_logins += 1
else:
log.warning("User %s locked out" % name)
result = Authentication.LOCKED_OUT
else:
log.info("User %s does not exist" % name)
return result, user
def checkVerification(user, givenOtp):
"""Verify the given one-time-password of users who use gauth"""
result = Authentication.FAILED
if user.usesGauth:
if user.failed_logins < 3:
secret = getSecret(user.gauth_key, user.id)
if givenOtp and verifyOneTimePassword(givenOtp, secret):
log.info("User %s verification OK" % user.login)
result = Authentication.OK
user.failed_logins = 0
else:
log.info("User %s verification FAILED" % user.login)
user.failed_logins += 1
else:
log.warning("User %s locked out" % user.login)
result = Authentication.LOCKED_OUT
else:
log.error("User %s does not use gauth!!!" % user.login)
return result
# ------------------------------------------------------------------------------
# View Predicates
# ------------------------------------------------------------------------------
class UserNeedsVerificationPredicate(object):
def __init__(self, flag, config):
self.flag = flag
def text(self):
if self.flag:
return "User does need verification"
else:
return "User does not need verification"
phash = text
def __call__(self, context, request):
user = request.user
needsVerification = user and user.usesGauth and not user.gauthVerified
return self.flag == needsVerification
# ------------------------------------------------------------------------------
# Security Domains
# ------------------------------------------------------------------------------
class Root(dict):
|
"""The root security domain"""
__acl__ = [(Allow, Everyone, ()),
(Allow, Authen
|
ticated, ('view', 'edit', 'play')),
(Allow, 'role:admin', ALL_PERMISSIONS) ]
def __init__(self, request):
pass
class UserSettings(object):
"""The security domain for user settings"""
def __init__(self, request):
self.request = request
@property
def __acl__(self):
# just delegate acl handling to the current user
if self.request.user:
return self.request.user.__acl__
|
rebeccamorgan/easyskim
|
nat_proc/FrequencySummarizer.py
|
Python
|
apache-2.0
| 2,032
| 0.015256
|
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
from string import punctuation
from heapq import nlargest
import re
"""
Modified from http://glowingpython.blogspot.co.uk/2014/09/text-summarization-with-nltk.html
"""
class FrequencySummarizer:
def __init__(self, low_thresh=0.1, high_thresh=0.9):
"""
Initialize the text summarizer.
Words that have a frequency term lower than low_thresh
or higer than high_thresh will be ignored.
"""
ignore = ['fig','figure','ibid', 'et al','cf','NB','N.B.']
self._low_thresh = low_thresh
self._high_thresh = high_thresh
self._stopwords = set(stopwords.words('english') + list(punctuation) + list(ignore))
def _compute_frequencies(self, word_tk):
freq = defaultdict(int)
for s in word_tk:
for word in s:
if word not in self._stopwords:
freq[word] += 1
# frequencies normalization and fitering
m = float(max(freq.values()))
for w in freq.keys():
freq[w] = freq[w]/m
if freq[w] >= self._high_thresh or freq[w] <= self._low_thresh:
del freq[w]
return freq
def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
text = "".join([unicode(x) for x in text])
sents = sent_tokenize(text)
if n > len(sents):
n = len(sents)
word_tk = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_tk)
ranking = defaultdict(int)
for i,sent in enumerate(word_tk):
for w in sent:
if w in self._freq and len(w)>4: #Onl
|
y count words of length>4 as significant
ranking[i] += self._freq[w]
sentsindx = self._rank(ranking, n)
return [sents[j].encode('ascii', errors='backslashreplace') for j in sentsindx]
def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """
return nlargest(n, ranking,
|
key=ranking.get)
|
xyuanmu/XX-Net
|
python3.8.2/Lib/site-packages/cffi/vengine_cpy.py
|
Python
|
bsd-2-clause
| 43,314
| 0.00067
|
#
# DEPRECATED: implementation for ffi.verify()
#
import sys, imp
from . import model
from .error import VerificationError
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_
|
dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, VerificationError, library):
|
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = ge
|
Udayraj123/dashboard_IITG
|
Binder/discussions/migrations/0001_initial.py
|
Python
|
mit
| 904
| 0.002212
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10a1 on 2016-06-19 04:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration
|
(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
opera
|
tions = [
migrations.CreateModel(
name='post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
home-assistant/home-assistant
|
homeassistant/components/repetier/sensor.py
|
Python
|
apache-2.0
| 5,911
| 0.000677
|
"""Support for monitoring Repetier Server Sensors."""
from datetime import datetime
import logging
import time
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import REPETIER_API, SENSOR_TYPES, UPDATE_SIGNAL, RepetierSensorEntityDescription
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
description = SENSOR_TYPES[sensor_type]
name = f"{info['name']}{description.name or ''}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, description)
entities.append(entity)
add_entities(entities, True)
class RepetierSensor(SensorEntity):
"""Class to create and populate a Repetier Sensor."""
entity_description: RepetierSensorEntityDescription
_attr_should_poll = False
def __init__(
self,
api,
temp_id,
name,
printer_id,
description: RepetierSensorEntityDescription,
):
"""Init new sensor."""
self.entity_description = description
self._api = api
self._attributes: dict = {}
self._temp_id = temp_id
self._printer_id = printer_id
self._state = None
self._attr_name = name
self._attr_available = False
@property
def extra_state_attributes(self):
"""Return sensor attributes."""
return self._attributes
@property
def native_value(self):
"""Return sensor state."""
return self._state
@callback
def update_callback(self):
"""Get new data and update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Connect update callbacks."""
self.async_on_remove(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self.update_callback)
)
def _get_data(self):
"""Return new data from the api cache."""
sensor_type = self.entity_description.key
data = self._api.get_data(self._printer_id, sensor_type, self._temp_id)
if data is None:
_LOGGER.debug("Data not found for %s and %s", sensor_type, self._temp_id)
self._attr_available = False
return None
self._attr_available = True
return data
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
_LOGGER.debug("Printer %s State %s", self.name, state)
self._attributes.update(data)
self._state = state
class RepetierTempSensor(RepetierSensor):
"""Represent a Repetier temp sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
temp_set = data["temp_set"]
_LOGGER.debug("Printer %s Setpoint: %s, Temp: %s", self.name, temp_set, state)
self._attributes.update(data)
self._state = state
class RepetierJobSensor(RepetierSensor):
"""Represent a Repetier job sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
class RepetierJobEndSensor(RepetierSensor):
"""Class to create and populate a Repetier Job End timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
print_time = data["print_time"]
from_start = data["from_start"]
time_end = start + round(print_time, 0)
self._state = datetime.utcfromtimestamp(time_end)
remaining = print_time - from_start
remaining_secs = int(round(remaining, 0))
_LOGGER.debug(
"Job %s remaining %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(
|
remaining_secs)),
)
class RepetierJobStartSensor(RepetierSensor):
"""Class to create and populate a Repetier Job Start timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMEST
|
AMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
from_start = data["from_start"]
self._state = datetime.utcfromtimestamp(start)
elapsed_secs = int(round(from_start, 0))
_LOGGER.debug(
"Job %s elapsed %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(elapsed_secs)),
)
|
vinaymayar/python-game-workshop
|
lesson4/exercises.py
|
Python
|
mit
| 6,887
| 0.001307
|
# lesson4/exercises.py
# Control flow and conditionals
#
# This file contains exercises about Python conditionals.
# Last lesson, we encountered the boolean type.
# Python uses booleans to evaluate conditions.
# Last time, we directly assigned boolean values True and False, but booleans are
# also returned by comparison operations.
# 1. Comparison Operators
# There are multiple comparison operators, the most common ones are:
# == for equality
# != for inequality
# > for greater than
# < for less than
# >= for greater than or equal
# <= for less than or equal
#
# Don't mistake the == operator with the = operator that we learned when studying
# variables.
# The == operator (equal to) asks whether two values are the same as each other and
# returns a boolean (True or False)
# The = operator (assignment) puts the value on the right into the variable on
# the left, and returns nothing.
x = 2 # assignment!
print(x == 2) # comparison! prints out True
print(x == 3) # comparison! prints out False
print(x < 3) # comparison! prints out True
# Exercise 1:
# Try different operators and expressions and see what they evaluate to.
# Experiment with the different types we learned: float, int and even
# strings!
# Remember, you also can compare two variables to each other!
# What happens when you try to compare strings?
# 2. Boolean Operators
# There are three boolean operators (or, and, not). These are used to formulate
# more complex boolean expressions.
#
# and, or: these are called binary operators, because they take in 2 boolean
# values.
# Uncomment the lines below, and see what they evalute to.
# print("True and True equals " + str(True and True))
# print("True and False equals " + str(True and False))
# print("True or True equals " + str(True or True))
# print("True or False equals " + str(True or False))
# The and operator evaluates an expression to True if both Boolean values are
# True; otherwise, it evaluates to False.
# The or operator evaluates an expression to True if either of the two Boolean
# values is True. If both are False, it evaluates to False.
# Truth tables
#
# and truth table:
# True and True -> True
# True and False -> False
# False and True -> False
# False and False -> False
#
#
# or truth table:
# True or True -> True
# True or False -> True
# False or True -> True
# False or False -> False
# The not operator only takes in a single boolean value. It simply inverts the
# boolean value.
# Uncomment the lines below and see what they evaluate to
# print(not True)
# print(not False)
# print(not not True)
# Exercise 2: Creating complex boolean expressions.
# Create three boolean expressions with at least 2 binary operators and 2
# comparison operators. Store each result in a variable and print it.
# For example,
# mybool1 = (2 + 2 == 4 and not 2 + 2 == 5 and 2 * 2 == 2 + 2)
# name = "Maria"
# eggs = 5
# my_age = 21
# mybool2 = (name != "Maria" and not eggs == 5 and my_age < 18)
# Again, experiment with different variable types. See what works and what
# gives an error. If there is an error, can you find out why?
# 3. Conditionals
#
# The Python programs we've written so far have had one-track minds: they can
# add two numbers or print something, but they don't have the ability to pick
# one of these outcomes over the other.
# When developing games, sometimes we'd like our code to be able to make
# decisions.
# Control flow gives us this ability to choose among different paths depending
# on what else is happening in the program.
# The control flow statements we will be learning in this lesson are: if, elif,
# and else.
# Each of these flow control statements decides what to do based on whether its
# condition is True or False.
# In the code below, change the value of the variable name and see what happens
# In special, try making it equal to "Maria"
name = ""
if name == "Maria":
print("Hi, Maria.")
# The if-statement seen above means the following.
# "If this condition is true, execute the code in the block."
# In Python, an if statement consists of the following:
# The if keyword
# A condition (that is, an expression that evaluates to True or False)
# A colon
# Starting on the next line, an indented block of code (called the if block)
# VERY IMPORTANT: Identation in python.
# Lines of Python code can be grouped together in blocks. You can tell when a
# block begins and ends from the indentation of the lines of code. There are
# three rules for blocks.
# Blocks begin when the indentation increases.
# Blocks can contain other blocks.
# Blocks end when the indentation decreases to zero or to an outer
# block's indentation.
# Blocks in Python are indented by 4 spaces more than its containing block.
# Usually, the TAB button will automatically input 4 spaces in IDLE.
# The piece code below shows how indentation works.
name = "Joana"
password = "abacate"
if name == "Joana":
print("Hello Joana")
if password == "abacate":
print("Access granted.")
else:
print("Wrong password.")
# How many levels of indentations are there?
# How many blocks of code?
# An if-statement can optionally be followed by an else-statement.
# The else block will be executed when the if statement's condition is False.
# Try changing the value of the variable password from the piece of code above
# and see what happens.
# The if and else statements allow us to make simple decisions. If we want to
# make our code more complex, we can use an elif statement.
# The elif statement is an "else if" statement that always follows an if or
# another elif statement. It provides another condition that is checked only
# if the previous conditions were False.
name = "Joao"
age = 16
if name == "Maria":
print("Hi, Maria. You might be underage, but it doesn't mat
|
ter :)")
elif age < 18:
print("You are underage, and
|
you're not Maria! Sorry.")
how_many_potatoes = 4
if how_many_potatoes > 20:
print("lots of potatoes")
elif how_many_potatoes > 5:
print("some potatoes, but not more than 20!")
elif how_many_potatoes > 10:
print("the program will never get here " + \
"because the previous case will be " + \
"true if there are more than 5 potatoes.")
elif how_many_potatoes > 0:
print("a few potatoes")
# It is possible to have multiple elif statements.
# However, notice that an control flow statement must always start with an if
# statement and else statements, if they exist, should always be in the end.
# Exercise 3:
# To practice your newly acquired skills of control flows. Go to the file
# guard.py and fill in the blanks to create a program that detects whether
# someone should be allowed in a super secret club.
# Exercise 4:
# Now we will write our first game in python! Woooooooooah, we are so awesome!
# Go to the file guess_game.py and follow the instructions.
|
evereux/flicket
|
setup.py
|
Python
|
mit
| 10,056
| 0.002088
|
#! usr/bin/python3
# -*- coding: utf8 -*-
import datetime
from getpass import getpass
from flask_script import Command
from scripts.create_json import WriteConfigJson
from application import db, app
from application.flicket_admin.models.flicket_config import FlicketConfig
from application.flicket.models.flicket_models import FlicketStatus, FlicketPriority, FlicketDepartment, FlicketCategory
from application.flicket.models.flicket_user import FlicketUser, FlicketGroup
from application.flicket.scripts.hash_password import hash_password
admin = 'admin'
# configuration defaults for flicket
flicket_config = {'posts_per_page': 50,
'allowed_extensions': ['txt', 'log', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'msg', 'doc', 'docx', 'ppt',
'pptx', 'xls', 'xlsx'],
'ticket_upload_folder': 'application/flicket/static/flicket_uploads',
'avatar_upload_folder': 'application/flicket/static/flicket_avatars',
}
# departments and categories defaults for flicket
depart_categories = [
{'department': 'Design', 'category': ['Dataset', 'ECN', 'ECR', 'Other']},
{'department': 'Manufacturing', 'category': ['Process Planning', 'Tooling', 'Equipment', 'Other']},
{'department': 'IT', 'category': ['Internet', 'Intranet', 'Other']},
{'department': 'Quality', 'category': ['Procedures', 'Manuals', 'Other']},
{'department': 'Human Resources', 'category': ['Holidays', 'Sick Leave', 'Other']},
{'department': 'Commercial', 'category': ['Approved Suppliers', 'Other']},
]
class RunSetUP(Command):
def run(self):
WriteConfigJson().json_exists()
username, password, email = self.get_admin_details()
self.set_db_config_defaults()
self.set_email_config()
self.create_admin(username=username, password=password, email=email, job_title='admin')
self.create_notifier()
self.create_admin_group()
self.create_default_ticket_status()
self.create_default_priority_levels()
self.create_default_depts()
# commit changes to the database
db.session.commit()
@staticmethod
def set_db_config_defaults(silent=False):
print('Please enter site base url including port. For example this would be "http://192.168.1.1:8000".')
base_url = input('Base url> ')
count = FlicketConfig.query.count()
if count > 0:
if not silent:
print('Flicket Config database seems to already be populated. Check values via application.')
return
set_config = FlicketConfig(
posts_per_page=flicket_config['posts_per_page'],
allowed_extensions=', '.join(flicket_config['allowed_extensions']),
ticket_upload_folder=flicket_config['ticket_upload_folder'],
avatar_upload_folder=flicket_config['avatar_upload_folder'],
base_url=base_url,
application_title='Flicket',
mail_max_emails=10,
mail_port=465
)
if not silent:
print('Adding config values to database.')
db.session.add(set_config)
db.session.commit()
@staticmethod
def get_admin_details():
# todo: add some password validation to prevent easy passwords being entered
_username = admin
match = False
email = input("Enter admin email: ")
while match is False:
password1 = getpass("Enter password: ")
password2 = getpass("Re-enter password: ")
if password1 != password2:
print("Passwords do not match, please try again.\n\n")
match = False
else:
return _username, password1, email
@staticmethod
def create_admin(username, password, email, job_title, silent=False):
""" creates flicket_admin user. """
query = FlicketUser.query.filter_by(username=username)
if query.count() == 0:
add_user = FlicketUser(username=username,
name=username,
password=hash_password(password),
email=email,
job_title=job_title,
date_added=datetime.datetime.now())
db.session.add(add_user)
if not silent:
print('Admin user added.')
else:
print('Admin user is already added.')
@staticmethod
def create_notifier():
""" creates user for notifications """
query = FlicketUser.query.filter_by(username=app.config['NOTIFICATION']['username'])
if query.count() == 0:
add_user = FlicketUser(username=app.config['NOTIFICATION']['username'],
name=app.config['NOTIFICATION']['name'],
password=hash_password(app.config['NOTIFICATION']['password']),
email=app.config['NOTIFICATION']['email'],
date_added=datetime.datetime.now())
db.session.add(add_user)
print("Notification user added.")
else:
print('Notification user already added.')
@staticmethod
def create_admin_group(silent=False):
""" creates flicket_admin and super_user group and assigns flicket_admin to group admin. """
query = FlicketGroup.query.filter_by(group_name=app.config['ADMIN_GROUP_NAME'])
if query.count() == 0:
add_group = FlicketGroup(group_name=app.config['ADMIN_GROUP_NAME'])
db.session.add(add_group)
if not silent:
print("Admin group added")
user = FlicketUser.query.filter_by(username=admin).first()
group = FlicketGroup.query.filter_by(group_name=app.config['ADMIN_GROUP_NAME']).first()
in_group = False
# see if user flicket_admin is already in flicket_admin group.
for g in group.users:
if g.username == admin:
in_group = True
break
if not in_group:
group.users.append(user)
if not silent:
print("Added flicket_admin user to flicket_admin group.")
# create the super_user group
query = FlicketGroup.query.filter_by(group_name=app.config['SUPER_USER_GROUP_NAME'])
if query.count() == 0:
add_group = FlicketGroup(group_name=app.config['SUPER_USER_GROUP_NAME'])
db.session.add(add_group)
if not silent:
print("super_user group added")
# noinspection PyArgumentList
@staticmethod
def create_default_ticket_status(silent=False):
""" set up default status levels """
sl = ['O
|
pen', 'Closed', 'In Work', 'Awaiting Information']
for s in sl:
status = FlicketStatus.query.filter_by(status=s).first()
if not status:
add_status = Flic
|
ketStatus(status=s)
db.session.add(add_status)
if not silent:
print('Added status level {}'.format(s))
@staticmethod
def create_default_priority_levels(silent=False):
""" set up default priority levels """
pl = ['low', 'medium', 'high']
for p in pl:
priority = FlicketPriority.query.filter_by(priority=p).first()
if not priority:
add_priority = FlicketPriority(priority=p)
db.session.add(add_priority)
if not silent:
print('Added priority level {}'.format(p))
@staticmethod
def create_default_depts(silent=False):
""" creates default departments and categories. """
for d in depart_categories:
department = d['department']
categories = d['category']
query = FlicketDepartment.query.filter_by(department=department).first()
if not query:
add_department = FlicketDepartment(
department=department
)
|
QingChenmsft/azure-cli
|
src/azure-cli/setup.py
|
Python
|
mit
| 3,406
| 0.000587
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.0.17+dev"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/__init__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
import sys
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/__init__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language ::
|
Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-cli-acr',
'azure-cli-acs',
'azure-cli-appservice',
'azure-cli-batch',
'azure-cli-backup',
'azure-cli-billing',
'azure-cli-cdn',
'a
|
zure-cli-cloud',
'azure-cli-cognitiveservices',
'azure-cli-component',
'azure-cli-container',
'azure-cli-configure',
'azure-cli-consumption',
'azure-cli-core',
'azure-cli-cosmosdb',
'azure-cli-dla',
'azure-cli-dls',
'azure-cli-eventgrid',
'azure-cli-extension',
'azure-cli-feedback',
'azure-cli-find',
'azure-cli-interactive',
'azure-cli-iot',
'azure-cli-keyvault',
'azure-cli-lab',
'azure-cli-monitor',
'azure-cli-network',
'azure-cli-nspkg',
'azure-cli-profile',
'azure-cli-rdbms',
'azure-cli-redis',
'azure-cli-resource',
'azure-cli-role',
'azure-cli-sql',
'azure-cli-storage',
'azure-cli-vm',
'azure-cli-servicefabric'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli',
version=VERSION,
description='Microsoft Azure Command-Line Tools',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
scripts=[
'az',
'az.completion.sh',
'az.bat',
],
packages=[
'azure',
'azure.cli',
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
|
brenton/cobbler
|
cobbler/collection.py
|
Python
|
gpl-2.0
| 13,191
| 0.010992
|
"""
Base class for any serializable list of things...
Copyright 2006, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import exceptions
from cexceptions import *
import serializable
import utils
impor
|
t glob
import sub_process
import action_litesync
import item_system
import item_profile
import item_distro
impor
|
t item_repo
import item_image
from utils import _
class Collection(serializable.Serializable):
def __init__(self,config):
"""
Constructor.
"""
self.config = config
self.clear()
self.api = self.config.api
self.log_func = self.api.log
self.lite_sync = None
def factory_produce(self,config,seed_data):
"""
Must override in subclass. Factory_produce returns an Item object
from datastructure seed_data
"""
raise exceptions.NotImplementedError
def clear(self):
"""
Forget about objects in the collection.
"""
self.listing = {}
def find(self, name=None, return_list=False, no_errors=False, **kargs):
"""
Return first object in the collection that maches all item='value'
pairs passed, else return None if no objects can be found.
When return_list is set, can also return a list. Empty list
would be returned instead of None in that case.
"""
matches = []
# support the old style innovation without kwargs
if name is not None:
kargs["name"] = name
kargs = self.__rekey(kargs)
# no arguments is an error, so we don't return a false match
if len(kargs) == 0:
raise CX(_("calling find with no arguments"))
# performance: if the only key is name we can skip the whole loop
if len(kargs) == 1 and kargs.has_key("name") and not return_list:
return self.listing.get(kargs["name"].lower(), None)
for (name, obj) in self.listing.iteritems():
if obj.find_match(kargs, no_errors=no_errors):
matches.append(obj)
if not return_list:
if len(matches) == 0:
return None
return matches[0]
else:
return matches
SEARCH_REKEY = {
'kopts' : 'kernel_options',
'ksmeta' : 'ks_meta',
'inherit' : 'parent',
'ip' : 'ip_address',
'mac' : 'mac_address',
'virt-file-size' : 'virt_file_size',
'virt-ram' : 'virt_ram',
'virt-path' : 'virt_path',
'virt-type' : 'virt_type',
'virt-bridge' : 'virt_bridge',
'virt-cpus' : 'virt_cpus',
'dhcp-tag' : 'dhcp_tag',
'netboot-enabled' : 'netboot_enabled'
}
def __rekey(self,hash):
"""
Find calls from the command line ("cobbler system find")
don't always match with the keys from the datastructs and this
makes them both line up without breaking compatibility with either.
Thankfully we don't have a LOT to remap.
"""
newhash = {}
for x in hash.keys():
if self.SEARCH_REKEY.has_key(x):
newkey = self.SEARCH_REKEY[x]
newhash[newkey] = hash[x]
else:
newhash[x] = hash[x]
return newhash
def to_datastruct(self):
"""
Serialize the collection
"""
datastruct = [x.to_datastruct() for x in self.listing.values()]
return datastruct
def from_datastruct(self,datastruct):
if datastruct is None:
return
for seed_data in datastruct:
item = self.factory_produce(self.config,seed_data)
self.add(item)
def rename(self,ref,newname,with_sync=True,with_triggers=True):
"""
Allows an object "ref" to be given a newname without affecting the rest
of the object tree.
"""
# make a copy of the object, but give it a new name.
oldname = ref.name
newref = ref.make_clone()
newref.set_name(newname)
self.add(newref, with_triggers=with_triggers,save=True)
# now descend to any direct ancestors and point them at the new object allowing
# the original object to be removed without orphanage. Direct ancestors
# will either be profiles or systems. Note that we do have to care as
# set_parent is only really meaningful for subprofiles. We ideally want a more
# generic set_parent.
kids = ref.get_children()
for k in kids:
if k.COLLECTION_TYPE == "distro":
raise CX(_("internal error, not expected to have distro child objects"))
elif k.COLLECTION_TYPE == "profile":
if k.parent != "":
k.set_parent(newname)
else:
k.set_distro(newname)
self.api.profiles().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "system":
k.set_profile(newname)
self.api.systems().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "repo":
raise CX(_("internal error, not expected to have repo child objects"))
else:
raise CX(_("internal error, unknown child type (%s), cannot finish rename" % k.COLLECTION_TYPE))
# now delete the old version
self.remove(oldname, with_delete=True, with_triggers=with_triggers)
return True
def add(self,ref,save=False,with_copy=False,with_triggers=True,with_sync=True,quick_pxe_update=False,check_for_duplicate_names=False,check_for_duplicate_netinfo=False):
"""
Add an object to the collection, if it's valid. Returns True
if the object was added to the collection. Returns False if the
object specified by ref deems itself invalid (and therefore
won't be added to the collection).
with_copy is a bit of a misnomer, but lots of internal add operations
can run with "with_copy" as False. True means a real final commit, as if
entered from the command line (or basically, by a user).
With with_copy as False, the particular add call might just be being run
during deserialization, in which case extra semantics around the add don't really apply.
So, in that case, don't run any triggers and don't deal with any actual files.
"""
if self.lite_sync is None:
self.lite_sync = action_litesync.BootLiteSync(self.config)
# migration path for old API parameter that I've renamed.
if with_copy and not save:
save = with_copy
if not save:
# for people that aren't quite aware of the API
# if not saving the object, you can't run these features
with_triggers = False
with_sync = False
# Avoid adding objects to the collection
# if an object of the same/ip/mac already exists.
self.__duplication_checks(ref,check_for_duplicate_names,check_for_duplicate_netinfo)
if ref is None or not ref.is_valid():
raise CX(_("insufficient or invalid arguments supplied"))
if ref.COLLECTION_TYPE != self.collection_type():
raise CX(_("API error: storing wrong data type in collection"))
if not save:
# don't need to run triggers, so add it already ...
self.listing[ref.name.lower()] = ref
# perform filesystem operations
if save:
self.log_func("saving %s %s" % (self.collection_type(), ref.name))
# failure of a pre trigger will prevent the object from being added
|
krajj7/spectrogram
|
viewer/ui_viewer.py
|
Python
|
gpl-2.0
| 6,982
| 0.007734
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './viewer.ui'
#
# Created: Sun Aug 23 04:04:27 2009
# by: PyQt4 UI code generator 4.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(566,421)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.pathEdit = QtGui.QLineEdit(self.centralwidget)
self.pathEdit.setObjectName("pathEdit")
self.horizontalLayout.addWidget(self.pathEdit)
self.browseButton = QtGui.QPushButton(self.centralwidget)
self.browseButton.setObjectName("browseButton")
self.horizontalLayout.addWidget(self.browseButton)
spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.horizontalLayout.addWidget(self.label_3)
self.ppsSpin = QtGui.QSpinBox(self.centralwidget)
self.ppsSpin.setMaximum(10000)
self.ppsSpin.setSingleStep(10)
self.ppsSpin.setProperty("value",QtCore.QVariant(100))
self.ppsSpin.setObjectName("ppsSpin")
self.horizontalLayout.addWidget(self.ppsSpin)
self.verticalLayout.addLayout(self.horizontalLayout)
self.scrollArea = QtGui.QScrollArea(self.centralwidget)
self.scrollArea.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtGui.QWidget(self.scrollArea)
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0,1,513,290))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.scrollAreaWidgetContents_2)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.imageLabel = CursorLabel(self.scrollAreaWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imageLabel.sizePolicy().hasHeightForWidth())
self.imageLabel.setSizePolicy(sizePolicy)
self.imageLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.imageLabel.setObjectName("imageLabel")
self.horizontalLayout_3.addWidget(self.imageLabel)
self.verticalLayout.addWidget(self.scrollArea)
self.progress = QtGui.QProgressBar(self.cent
|
ralwidget)
self.progress.setProperty("value",QtCore.QVariant(0))
self.progress.setTextVisible(False)
|
self.progress.setObjectName("progress")
self.verticalLayout.addWidget(self.progress)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.commandCheck = QtGui.QCheckBox(self.centralwidget)
self.commandCheck.setObjectName("commandCheck")
self.horizontalLayout_2.addWidget(self.commandCheck)
self.commandEdit = QtGui.QLineEdit(self.centralwidget)
self.commandEdit.setObjectName("commandEdit")
self.horizontalLayout_2.addWidget(self.commandEdit)
spacerItem1 = QtGui.QSpacerItem(20,20,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.centeredCheck = QtGui.QCheckBox(self.centralwidget)
self.centeredCheck.setChecked(True)
self.centeredCheck.setObjectName("centeredCheck")
self.horizontalLayout_2.addWidget(self.centeredCheck)
spacerItem2 = QtGui.QSpacerItem(20,20,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.playButton = QtGui.QPushButton(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(170,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active,QtGui.QPalette.Button,brush)
brush = QtGui.QBrush(QtGui.QColor(170,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive,QtGui.QPalette.Button,brush)
brush = QtGui.QBrush(QtGui.QColor(170,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled,QtGui.QPalette.Button,brush)
self.playButton.setPalette(palette)
self.playButton.setObjectName("playButton")
self.horizontalLayout_2.addWidget(self.playButton)
self.stopButton = QtGui.QPushButton(self.centralwidget)
self.stopButton.setObjectName("stopButton")
self.horizontalLayout_2.addWidget(self.stopButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Spectrogram", None, QtGui.QApplication.UnicodeUTF8))
self.browseButton.setText(QtGui.QApplication.translate("MainWindow", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Pixels per second", None, QtGui.QApplication.UnicodeUTF8))
self.imageLabel.setText(QtGui.QApplication.translate("MainWindow", "[image]", None, QtGui.QApplication.UnicodeUTF8))
self.commandCheck.setText(QtGui.QApplication.translate("MainWindow", "Command", None, QtGui.QApplication.UnicodeUTF8))
self.commandEdit.setText(QtGui.QApplication.translate("MainWindow", "xmms -p", None, QtGui.QApplication.UnicodeUTF8))
self.centeredCheck.setText(QtGui.QApplication.translate("MainWindow", "Centered", None, QtGui.QApplication.UnicodeUTF8))
self.playButton.setText(QtGui.QApplication.translate("MainWindow", "Play", None, QtGui.QApplication.UnicodeUTF8))
self.stopButton.setText(QtGui.QApplication.translate("MainWindow", "Stop", None, QtGui.QApplication.UnicodeUTF8))
from cursorlabel import CursorLabel
|
cigroup-ol/metaopt
|
metaopt/concurrent/worker/util/import_function.py
|
Python
|
bsd-3-clause
| 654
| 0.001529
|
# -*- coding: utf-8 -*-
"""
Utility that imports a function.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_litera
|
ls, with_statement
def import_function(function):
"""Import
|
s function given by qualified package name"""
function = __import__(function, globals(), locals(), ['function'], 0).f
# function = getattr(__import__(function["module"], globals(), locals(), ['function'], 0), function["name"]) TODO
# Note that the following is equivalent:
# from MyPackage.MyModule import f as function
# Also note this always imports the function "f" as "function".
return function
|
pythonprobr/notmagic
|
pt-br/baralho_mut.py
|
Python
|
mit
| 4,149
| 0.004097
|
#!/usr/bin/env python3
"""
>>> baralho = Baralho()
>>> len(baralho)
52
>>> baralho[0]
Carta(valor='2', naipe='paus')
>>> baralho[-1]
Carta(valor='A', naipe='espadas')
>>> from random import choice
>>> choice(baralho) #doctest:+SKIP
Carta(valor='4', naipe='paus')
>>> choice(baralho) #doctest:+SKIP
Carta(valor='A', naipe='espadas')
>>> choice(baralho) #doctest:+SKIP
Carta(valor='8', naipe='espadas')
>>> baralho[:5] #doctest:+NORMALIZE_WHITESPACE
[Carta(valor='2', naipe='paus'), Carta(valor='3', naipe='paus'),
Carta(valor='4', naipe='paus'), Carta(valor='5', naipe='paus'),
Carta(valor='6', naipe='paus')]
>>> baralho[-3:] #doctest:+NORMALIZE_WHITESPACE
[Carta(valor='Q', naipe='espadas'),
Carta(valor='K', naipe='espadas'),
Carta(valor='A', naipe='espadas')]
>>> for carta in baralho: #doctest:+ELLIPSIS
... print(carta)
...
Carta(valor='2', naipe='paus')
Carta(valor='3', naipe='paus')
Carta(valor='4', naipe='paus')
...
Carta(valor='Q', naipe='espadas')
Carta(valor='K', naipe='espadas')
Carta(valor='A', naipe='espadas')
To generate a reversed listing:
::
>>> for carta in reversed(baralho): #doctest:+ELLIPSIS
... print(carta)
...
Carta(valor='A', naipe='espadas')
Carta(valor='K', naipe='espadas')
Carta(valor='Q', naipe='espadas')
...
Carta(valor='4', naipe='paus')
Carta(valor='3', naipe='paus')
Carta(valor='2', naipe='paus')
For a numbered listing, we use `enumerate`:
::
>>> for n, carta in enumerate(baralho, 1): #doctest:+ELLIPSIS
... print(format(n, '2'), carta)
...
1 Carta(valor='2', naipe='paus')
2 Carta(valor='3', naipe='paus')
3 Carta(valor='4', naipe='paus')
...
50 Carta(valor='Q', naipe='espadas')
51 Carta(valor='K', naipe='espadas')
52 Carta(valor='A', naipe='espadas')
Get all the Jacks in a baralho.
::
>>> [carta for carta in baralho if carta.valor=='J']
[Carta(valor='J', naipe='paus'), Carta(valor='J', naipe='ouros'), Carta(valor='J', naipe='copas'), Carta(valor='J', naipe='espadas')]
Ranking by alternate
|
color naipes: ouros (lowest), followed by paus, copas, and espadas (highest).
>>> hand = [Carta(valor='2', naipe='ouros'), Carta(valor='2', naipe='paus'),
... Carta(valor='3', naipe='ouros'), Carta(valor='3', naipe='paus'),
... Carta(valor='A', naipe='espadas')]
>>> [cores_alternadas(carta) for carta in hand]
[0, 1, 4, 5, 51]
>>> hand = [Carta(valor='A', naipe='espadas'),
... C
|
arta(valor='K', naipe='ouros'),
... Carta(valor='A', naipe='ouros')]
>>> for carta in sorted(hand,key=cores_alternadas):
... print(carta)
Carta(valor='K', naipe='ouros')
Carta(valor='A', naipe='ouros')
Carta(valor='A', naipe='espadas')
>>> for carta in sorted(baralho, key=cores_alternadas): #doctest:+ELLIPSIS
... print(carta)
Carta(valor='2', naipe='ouros')
Carta(valor='2', naipe='paus')
Carta(valor='2', naipe='copas')
Carta(valor='2', naipe='espadas')
Carta(valor='3', naipe='ouros')
...
Carta(valor='A', naipe='copas')
Carta(valor='A', naipe='espadas')
>>> from random import shuffle
>>> shuffle(baralho)
"""
import collections
Carta = collections.namedtuple('Carta', ['valor', 'naipe'])
class Baralho:
valores = [str(n) for n in range(2,11)] + list('JQKA')
naipes = 'paus ouros copas espadas'.split()
def __init__(self):
self.cartas = [Carta(v, n) for n in self.naipes for v in self.valores]
def __len__(self):
return len(self.cartas)
def __getitem__(self, posicao):
return self.cartas[posicao]
def __setitem__(self, posicao, carta):
self.cartas[posicao] = carta
def cores_alternadas(carta):
valor_value = Baralho.valores.index(carta.valor)
naipes = 'ouros paus copas espadas'.split()
return valor_value * len(naipes) + naipes.index(carta.naipe)
|
sathnaga/virt-test
|
qemu/tests/floppy.py
|
Python
|
gpl-2.0
| 18,545
| 0.001887
|
import logging, time, os, sys, re
from autotest.client.shared import error
from autotest.client import utils
from autotest.client.shared.syncdata import SyncData
from virttest import data_dir, env_process, utils_test, aexpect
@error.context_aware
def run_floppy(test, params, env):
"""
Test virtual floppy of guest:
1) Create a floppy disk image on host
2) Start the guest with this floppy image.
3) Make a file system on guest virtual floppy.
4) Calculate md5sum value of a file and copy it into floppy.
5) Verify whether the md5sum does match.
@param test: QEMU test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
source_file = params["source_file"]
dest_file = params["dest_file"]
login_timeout = int(params.get("login_timeout", 360))
floppy_prepare_timeout = int(params.get("floppy_prepare_timeout", 360))
guest_floppy_path = params["guest_floppy_path"]
def create_floppy(params, prepare=True):
"""
Creates 'new' floppy with one file on it
@param params: paramters for test
@param preapre: if True then it prepare cd images.
@return: path to new floppy file.
"""
error.context("creating test floppy")
floppy = params["floppy_name"]
if not os.path.isabs(floppy):
floppy = os.path.join(data_dir.get_data_dir(), floppy)
if prepare:
utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy)
return floppy
def cleanup_floppy(path):
""" Removes created floppy """
error.context("cleaning up temp floppy images")
os.remove("%s" % path)
def lazy_copy(vm, dst_path, check_path, copy_timeout=None, dsize=None):
"""
Start disk load. Cyclic copy from src_path to dst_path.
@param vm: VM where to find a disk.
@param src_path: Source of data
@param copy_timeout: Timeout for copy
@param dsize: Size of data block which is periodically copied.
"""
if copy_timeout is None:
copy_timeout = 120
session = vm.wait_for_login(timeout=login_timeout)
cmd = ('nohup bash -c "while [ true ]; do echo \"1\" | '
'tee -a %s >> %s; sleep 0.1; done" 2> /dev/null &' %
(check_path, dst_path))
pid = re.search(r"\[.+\] (.+)",
session.cmd_output(cmd, timeout=copy_timeout))
return pid.group(1)
class MiniSubtest(object):
def __new__(cls, *args, **kargs):
self = super(MiniSubtest, cls).__new__(cls)
ret = None
exc_info = None
if args is None:
args = []
try:
try:
ret = self.test(*args, **kargs)
except Exception:
exc_info = sys.exc_info()
finally:
if hasattr(self, "clean"):
try:
self.clean()
except Exception:
if exc_info is None:
raise
if exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
return ret
class test_singlehost(MiniSubtest):
def test(self):
create_floppy(params)
vm = env.get_vm(params["main_vm"])
vm.create()
self.session = vm.wait_for_login(timeout=login_timeout)
self.dest_dir = params["mount_dir"]
# If mount_dir specified, treat guest as a Linux OS
# Some Linux distribution does not load floppy at boot and Windows
# needs time to load and init floppy driver
if self.dest_dir:
lsmod = self.session.cmd("lsmod")
if not 'floppy' in lsmod:
self.session.cmd("modprobe floppy")
else:
time.sleep(20)
error.context("Formating floppy disk before using it")
format_cmd = params["format_floppy_cmd"]
self.session.cmd(format_cmd, timeout=120)
logging.info("Floppy disk formatted successfully")
if self.dest_dir:
error.context("Mounting floppy")
self.session.cmd("mount -t vfat %s %s" % (guest_floppy_path,
self.dest_dir))
error.context("Testing floppy")
self.session.cmd(params["test_floppy_cmd"])
error.context("Copying file to the floppy")
md5_cmd = params.get("md5_cmd")
if md5_cmd:
md5_source = self.session.cmd("%s %s" % (params["md5_cmd"],
source_file))
try:
md5_source = md5_source.split(" ")[0]
except IndexError:
error.TestError("Failed to get md5 from source file,"
" output: '%s'" % md5_source)
else:
md5_source = None
self.session.cmd("%s %s %s" % (params["copy_cmd"], source_file,
dest_file))
logging.info("Succeed to copy file '%s' into floppy disk" %
source_file)
error.context("Checking if the file is unchanged after copy")
if md5_cmd:
md5_dest = self.session.cmd("%s %s" % (params["md5_cmd"],
dest_file))
try:
md5_dest = md5_dest.split(" ")[0]
except IndexError:
error.TestError("Failed to get md5 from dest file,"
" output: '%s'" % md5_dest)
if md5_source != md5_dest:
raise error.TestFail("File changed after copy to floppy")
else:
|
md5_dest = None
self.session.cmd("%s %s %s" % (params["diff_file_cmd"],
source_file, dest_file))
|
def clean(self):
clean_cmd = "%s %s" % (params["clean_cmd"], dest_file)
self.session.cmd(clean_cmd)
if self.dest_dir:
self.session.cmd("umount %s" % self.dest_dir)
self.session.close()
class Multihost(MiniSubtest):
def test(self):
error.context("Preparing migration env and floppies.")
mig_protocol = params.get("mig_protocol", "tcp")
self.mig_type = utils_test.MultihostMigration
if mig_protocol == "fd":
self.mig_type = utils_test.MultihostMigrationFd
if mig_protocol == "exec":
self.mig_type = utils_test.MultihostMigrationExec
self.vms = params.get("vms").split(" ")
self.srchost = params["hosts"][0]
self.dsthost = params["hosts"][1]
self.is_src = params["hostid"] == self.srchost
self.mig = self.mig_type(test, params, env, False, )
if self.is_src:
self.floppy = create_floppy(params)
self.floppy_dir = os.path.dirname(self.floppy)
params["start_vm"] = "yes"
env_process.process(test, params, env,
env_process.preprocess_image,
env_process.preprocess_vm)
vm = env.get_vm(self.vms[0])
vm.wait_for_login(timeout=login_timeout)
else:
self.floppy = create_floppy(params, False)
self.floppy_dir = os.path.dirname(self.floppy)
def clean(self):
self.mig.cleanup()
if self.is_src:
cleanup_floppy(self.floppy)
class test_multihost_write(Multihost):
def test(self):
super(test_multihost_write, self).test()
copy_timeout = int(params.get("copy_timeout", 480))
self.mount_dir = params["mount_dir"]
|
nrwahl2/ansible
|
test/units/plugins/callback/test_callback.py
|
Python
|
gpl-3.0
| 11,491
| 0.000087
|
# (c) 2012-2014, Chris Meyers <chris.meyers.fsu@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import textwrap
import types
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open, MagicMock
from ansible.plugins.callback import CallbackBase
class TestCallback(unittest.TestCase):
# FIXME: This doesn't really test anything...
def test_init(self):
CallbackBase()
def test_display(self):
display_mock = MagicMock()
display_mock.verbosity = 0
cb = CallbackBase(display=display_mock)
self.assertIs(cb._display, display_mock)
def test_display_verbose(self):
display_mock = MagicMock()
display_mock.verbosity = 5
cb = CallbackBase(display=display_mock)
self.assertIs(cb._display, display_mock)
# TODO: import callback module so we can patch callback.cli/callback.C
class TestCallbackResults(unittest.TestCase):
def test_get_item(self):
cb = CallbackBase()
results = {'item': 'some_item'}
res = cb._get_item(results)
self.assertEquals(res, 'some_item')
def test_get_item_no_log(self):
cb = CallbackBase()
results = {'item': 'some_item', '_an
|
sible_no_log': True}
res = cb._get_item(results)
self.assertEquals(res, "(censored due to no_log)")
results = {'item': 'some_item', '_ansible_no_log': False}
res = cb._ge
|
t_item(results)
self.assertEquals(res, "some_item")
def test_clean_results(self):
cb = CallbackBase()
result = {'item': 'some_item',
'invocation': 'foo --bar whatever [some_json]',
'changed': True}
self.assertTrue('changed' in result)
self.assertTrue('invocation' in result)
cb._clean_results(result, 'debug')
class TestCallbackDumpResults(unittest.TestCase):
def test_internal_keys(self):
cb = CallbackBase()
result = {'item': 'some_item',
'_ansible_some_var': 'SENTINEL',
'testing_ansible_out': 'should_be_left_in LEFTIN',
'invocation': 'foo --bar whatever [some_json]',
'some_dict_key': {'a_sub_dict_for_key': 'baz'},
'bad_dict_key': {'_ansible_internal_blah': 'SENTINEL'},
'changed': True}
json_out = cb._dump_results(result)
self.assertFalse('"_ansible_' in json_out)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('LEFTIN' in json_out)
def test_no_log(self):
cb = CallbackBase()
result = {'item': 'some_item',
'_ansible_no_log': True,
'some_secrets': 'SENTINEL'}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('no_log' in json_out)
self.assertTrue('output has been hidden' in json_out)
def test_exception(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'exception': ['frame1', 'SENTINEL']}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertFalse('exception' in json_out)
self.assertTrue('LEFTIN' in json_out)
def test_verbose(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'_ansible_verbose_always': 'chicane'}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('LEFTIN' in json_out)
def test_diff(self):
cb = CallbackBase()
result = {'item': 'some_item LEFTIN',
'diff': ['remove stuff', 'added LEFTIN'],
'_ansible_verbose_always': 'chicane'}
json_out = cb._dump_results(result)
self.assertFalse('SENTINEL' in json_out)
self.assertTrue('LEFTIN' in json_out)
# TODO: triggr the 'except UnicodeError' around _get_diff
# that try except orig appeared in 61d01f549f2143fd9adfa4ffae42f09d24649c26
# in 2013 so maybe a < py2.6 issue
class TestCallbackDiff(unittest.TestCase):
def setUp(self):
self.cb = CallbackBase()
def _strip_color(self, s):
return re.sub('\033\\[[^m]*m', '', s)
def test_difflist(self):
# TODO: split into smaller tests?
difflist = [{'before': ['preface\nThe Before String\npostscript'],
'after': ['preface\nThe After String\npostscript'],
'before_header': 'just before',
'after_header': 'just after'
},
{'before': ['preface\nThe Before String\npostscript'],
'after': ['preface\nThe After String\npostscript'],
},
{'src_binary': 'chicane'},
{'dst_binary': 'chicanery'},
{'dst_larger': 1},
{'src_larger': 2},
{'prepared': 'what does prepared do?'},
{'before_header': 'just before'},
{'after_header': 'just after'}]
res = self.cb._get_diff(difflist)
self.assertIn('Before String', res)
self.assertIn('After String', res)
self.assertIn('just before', res)
self.assertIn('just after', res)
def test_simple_diff(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': 'one\nthree\nfour\n',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +1,3 @@
one
-two
three
+four
'''))
def test_new_file(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': '',
'after': 'one\ntwo\nthree\n',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -0,0 +1,3 @@
+one
+two
+three
'''))
def test_clear_file(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one\ntwo\nthree\n',
'after': '',
})),
textwrap.dedent('''\
--- before: somefile.txt
+++ after: generated from template somefile.j2
@@ -1,3 +0,0 @@
-one
-two
-three
'''))
def test_no_trailing_newline_before(self):
self.assertMultiLineEqual(
self._strip_color(self.cb._get_diff({
'before_header': 'somefile.txt',
'after_header': 'generated from template somefile.j2',
'before': 'one
|
AmbiBox/kodi.script.ambibox
|
resources/lib/ambiwincon.py
|
Python
|
gpl-2.0
| 1,568
| 0.000638
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 KenV99
#
# This program
|
is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warr
|
anty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
WM_HOTKEY = 786
VK_SHIFT = 16
VK_CONTROL = 17
VK_SPACE = 32
VK_HOME = 36
VK_LEFT = 37
VK_UP = 38
VK_RIGHT = 39
VK_DOWN = 40
VK_INSERT = 45
VK_DELETE = 46
VK_HELP = 47
VK_LWIN = 91
VK_RWIN = 92
VK_APPS = 93
VK_NUMPAD0 = 96
VK_NUMPAD1 = 97
VK_NUMPAD2 = 98
VK_NUMPAD3 = 99
VK_NUMPAD4 = 100
VK_NUMPAD5 = 101
VK_NUMPAD6 = 102
VK_NUMPAD7 = 103
VK_NUMPAD8 = 104
VK_NUMPAD9 = 105
VK_F1 = 112
VK_F2 = 113
VK_F3 = 114
VK_F4 = 115
VK_F5 = 116
VK_F6 = 117
VK_F7 = 118
VK_F8 = 119
VK_F9 = 120
VK_F10 = 121
VK_F11 = 122
VK_F12 = 123
VK_F13 = 124
VK_F14 = 125
VK_F15 = 126
VK_F16 = 127
VK_F17 = 128
VK_F18 = 129
VK_F19 = 130
VK_F20 = 131
VK_F21 = 132
VK_F22 = 133
VK_F23 = 134
VK_F24 = 135
VK_NUMLOCK = 144
VK_SCROLL = 145
VK_LSHIFT = 160
VK_RSHIFT = 161
VK_LCONTROL = 162
VK_RCONTROL = 163
MOD_ALT = 1
MOD_CONTROL = 2
MOD_SHIFT = 4
MOD_WIN = 8
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_countries_togo.py
|
Python
|
gpl-3.0
| 1,105
| 0.00272
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
""
|
"
class Countries_Togo():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin
|
.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Togo"]))
|
brhoades/megaminer16-anarchy
|
joueur/game_manager.py
|
Python
|
mit
| 3,572
| 0.006719
|
from joueur.delta_mergeable import DeltaMergeable
from joueur.base_game_object import BaseGameObject
from joueur.utilities import camel_case_converter
from joueur.serializer import is_game_object_reference, is_object
# @class GameManager: managed the game and it's game objects including unserializing deltas
class GameManager():
def __init__(
|
self, game):
self.game = game
self._game_object_classes = game._game_object_classes
def set_constants(self, constants):
self._server_constants = constants
self._DELTA_REMOVED = constants['DELTA_REMOVED']
|
self._DELTA_LIST_LENGTH = constants['DELTA_LIST_LENGTH']
## applies a delta state (change in state information) to this game
def apply_delta_state(self, delta):
if 'gameObjects' in delta:
self._init_game_objects(delta['gameObjects'])
self._merge_delta(self.game, delta)
## game objects can be refences in the delta states for cycles, they will all point to the game objects here.
def _init_game_objects(self, delta_game_objects):
for id, obj in delta_game_objects.items():
if not id in self.game._game_objects: # then we need to create it
self.game._game_objects[id] = self._game_object_classes[obj['gameObjectName']]()
## recursively merges delta changes to the game.
def _merge_delta(self, state, delta):
delta_length = -1
if self._DELTA_LIST_LENGTH in delta:
delta_length = delta[self._DELTA_LIST_LENGTH]
del delta[self._DELTA_LIST_LENGTH] # we don't want to copy this key/value over to the state, it was just to signify it is an array
if delta_length > -1: # then this part in the state is an array
while len(state) > delta_length: # remove elements off the array to make it's size correct.
state.pop()
while len(state) < delta_length: # append elements on the array to make it's size correct.
state.append(None)
for key in delta: # deltas will always be objects when iterating through, arrays just have keys of numbers
d = delta[key]
state_key = key # array's keys are real numbers, not strings e.g. "1"
key_in_state = False
if isinstance(state, list):
state_key = int(key)
key_in_state = state_key < len(state)
else:
if isinstance(state, DeltaMergeable):
state_key = "_" + camel_case_converter(state_key)
key_in_state = state_key in state
value = d
if d == self._DELTA_REMOVED:
value = None
if key_in_state:
del state[state_key]
elif is_game_object_reference(d): # then this is a shallow reference to a game object
value = self.game.get_game_object(d['id'])
elif is_object(d) and key_in_state and is_object(state[state_key]):
value = None
self._merge_delta(state[state_key], d)
elif not key_in_state and is_object(d):
if isinstance(d, dict):
state[state_key] = [] if d in self._DELTA_LIST_LENGTH else {}
value = None
self._merge_delta(state[state_key], d)
if value != None:
if isinstance(state_key, int) or isinstance(state, dict):
state[state_key] = value
else:
setattr(state, state_key, value)
|
vurmux/gorynych
|
gorynych/core/edge.py
|
Python
|
apache-2.0
| 312
| 0.003205
|
from .entity import Entity
class Edge(Entity):
"""Basic class for all edge objects"""
meta = {
"ontology": "gch",
"typename": "Edge",
"hierarchy": "gch/Entity.Edge"
}
def __init__(self, attributes
|
={}, t
|
ags=set([])):
super(Edge, self).__init__(attributes, tags)
|
neopenx/Dragon
|
Dragon/python/dragon/vm/theano/compile/sharedvalue.py
|
Python
|
bsd-2-clause
| 890
| 0.003371
|
# --------------------------------------------------------
# Theano @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import numpy as np
import dragon.core.workspace as ws
from dragon.core.tensor import Tensor, GetTensorN
|
ame
def shared(value, name=None, **kwargs):
"""Construct a Tensor initialized with ``value``.
Parameters
----------
value : basic type, list or numpy.ndarray
The numerical values.
name : str
The name of tensor.
Returns
-------
Tensor
The initialized tensor.
"""
if not isinstance(value, (int, float, list, np.ndar
|
ray)):
raise TypeError("Unsupported type of value: {}".format(type(value)))
if name is None: name = GetTensorName()
tensor = Tensor(name).Variable()
ws.FeedTensor(tensor, value)
return tensor
|
IljaKosynkin/OnFlyLocalizer
|
OnFlyLocalizer/OnFlyLocalizer/LanguageEnumGenerator.py
|
Python
|
apache-2.0
| 1,612
| 0.003102
|
import os
def generate_enum(path, localizations):
full_path = path + "/" + "Language.swift"
if not os.path.isfile(full_path):
enum_file = open(full_path, 'w+')
enum_file.write("import Foundation\n\n")
enum_file.write("enum Language: String {\n")
enum_file.write("\tprivate static let languageKey = \"AppleLanguages\"\n")
enum_file.write("\tprivate static var currentLanguage: Language?\n\n")
for localization in localiz
|
ations:
enum_file.write("\tcase " + localization + " = \"" + localization + "\"\n")
enum_file.write("\tcase Undefined = \"\"\n\n")
enum_file.write("\tstatic func getCurrentLanguage() -> Language {\n")
enum_file.write("\t\tif let language = currentLanguage {\n")
enum_file.write("\t\t\treturn language\n")
enum_file.write("\t\t}\n\n")
enum_file.write("\t\tif let array = UserDefaults.standard.stringArray(forKey: languageKey), let label
|
= array.first, let language = Language(rawValue: label) {\n")
enum_file.write("\t\t\tcurrentLanguage = language\n")
enum_file.write("\t\t\treturn language\n")
enum_file.write("\t\t}\n\n")
enum_file.write("\t\treturn .Undefined\n")
enum_file.write("\t}\n\n")
enum_file.write("\tstatic func setCurrentLanguage(language: Language) {\n")
enum_file.write("\t\tcurrentLanguage = language\n")
enum_file.write("\t\tUserDefaults.standard.set([language.rawValue], forKey: languageKey)\n")
enum_file.write("\t}\n\n")
enum_file.write("}")
enum_file.close()
|
tgquintela/pythonUtils
|
pythonUtils/TUI_tools/automatic_questioner.py
|
Python
|
mit
| 19,433
| 0.001029
|
"""
automatic_questioner
--------------------
Module which serves as a interactor between the possible database with the
described structure and which contains information about functions and
variables of other packages.
Scheme of the db
----------------
# {'function_name':
# {'variables':
# {'variable_name':
# {'question_info':
# {'qtype': ['simple_input', 'confirmation_question',
# 'selection_options', 'selection_list_options'],
# 'question_spec': 'question_spec'},
# 'default': default}},
########
# 'descendants': [{'agg_description':
# {variable_name:
# {'variable_value': 'function_name'}
# },
# 'agg_name': 'aggregated_parameter_name'}]
# }}
######## OR
# 'descendants': [{'agg_description': 'function_name'
# 'agg_name': 'aggregated_parameter_name'}]
# }}
#TODO: checker 1 function with list of functions and dicts of dicts
"""
from tui_questioner import general_questioner
def check_quest_info(db):
"""Function which carry out the automatic checking of the database of
function and variables.
Parameters
----------
db: dict
the dictionary of all the information about
|
the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
Returns
-------
check: boolean
returns the correctness of the database.
path: list
path of the possible error.
message: str
messag
|
e of the error if it exists.
"""
## 0. Initial preset variables needed
# Function to compare lists
def equality_elements_list(a, b):
a = a.keys() if type(a) == dict else a
b = b.keys() if type(b) == dict else b
c = a[-1::-1]
return a == b or c == b
# List of elements available in some dicts at some levels
first_level = ['descendants', 'variables']
desc_2_level = ['agg_description', 'agg_name']
vars_2_level = ['question_info', 'default']
vars_3_level = ['qtype', 'question_spec']
# Messages of errors
m0 = "The given database of functions is not a dictionary."
m1 = "The function '%s' does not have "+str(first_level)+" as keys."
m2 = "The variables of function '%s' is not a dict."
m3 = "Incorrect keys "+str(vars_2_level)+" in function %s and variable %s."
m4 = "Incorrect question_info format for function %s and variable %s."
m5 = "Not a string the 'qtype' of function %s and variable %s."
m6 = "Incorrect 'question_spec' format for function %s and variable %s."
m7 = "Descendants of the function %s is not a list."
m8 = "Elements of the list of descendants not a dict for function %s."
m9 = "Incorrect structure of a dict in descendants for function %s."
m10 = "Incorrect type of agg_description for function %s and variable %s."
m11 = "Incorrect type of agg_description for function %s."
## Check db is a dict
if type(db) != dict:
return False, [], m0
## Loop for check each function in db
for funct in db.keys():
## Check main keys:
first_bl = equality_elements_list(db[funct], first_level)
if not first_bl:
return False, [funct], m1 % funct
## Check variables
if not type(db[funct]['variables']) == dict:
check = False
path = [funct, 'variables']
message = m2 % funct
return check, path, message
for var in db[funct]['variables']:
varsbles = db[funct]['variables']
v2_bl = equality_elements_list(varsbles[var], vars_2_level)
v3_bl = equality_elements_list(varsbles[var]['question_info'],
vars_3_level)
qtype_bl = db[funct]['variables'][var]['question_info']['qtype']
qtype_bl = type(qtype_bl) != str
qspec_bl = db[funct]['variables'][var]['question_info']
qspec_bl = type(qspec_bl['question_spec']) != dict
if not v2_bl:
check = False
path = [funct, 'variables', var]
message = m3 % (funct, var)
return check, path, message
### Check question_info
if not v3_bl:
check = False
path = [funct, 'variables', 'question_info']
message = m4 % (funct, var)
return check, path, message
if qtype_bl:
check = False
path = [funct, 'variables', 'question_info', 'qtype']
message = m5 % (funct, var)
return check, path, message
if qspec_bl:
check = False
path = [funct, 'variables', 'question_info', 'question_spec']
message = m6 % (funct, var)
return check, path, message
## Check descendants
if not type(db[funct]['descendants']) == list:
check = False
path = [funct, 'descendants']
message = m7 % funct
return check, path, message
for var_desc in db[funct]['descendants']:
if not type(var_desc) == dict:
check = False
path = [funct, 'descendants']
message = m8 % funct
return check, path, message
d2_bl = equality_elements_list(var_desc.keys(), desc_2_level)
if not d2_bl:
check = False
path = [funct, 'descendants']
message = m9 % funct
return check, path, message
if type(var_desc['agg_description']) == str:
pass
elif type(var_desc['agg_description']) == dict:
for varname in var_desc['agg_description']:
if not type(var_desc['agg_description'][varname]) == dict:
check = False
path = [funct, 'descendants', 'agg_description']
message = m10 % (funct, varname)
return check, path, message
else:
check = False
path = [funct, 'descendants', 'agg_description']
message = m11 % funct
return check, path, message
return True, [], ''
def automatic_questioner(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarv
|
estnltk/estnltk
|
estnltk/vabamorf/tests/test_disambiguate.py
|
Python
|
gpl-2.0
| 1,255
| 0.000807
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
import unittest
from ..morf import analyze, disambiguate
# EINO SANTANEN. Muodon vanhimmat
# http://luulet6lgendus.blogspot.com/
sentences = '''KÕIGE VANEM MUDEL
Pimedas luusivad robotid,
originaalsed tšehhi robotid kahekümnendatest.
Robota! kisendavad nad, uhked originaalsed robotid,
hüüdes iseenda nime.
Robota! möirgavad nad, naftasegused elukad,
hiiglase vaimusünnitised, robotid:
kurvameelsetena kauguses,
ebamäär
|
astena kauguses,
mattudes vastuoludesse,
muutudes peaaegu julmaks oma õiglusejanus.
Robota! Kui päike pageb monoliitide kohalt,
tähistavad nad vägisi
öö salajast geomeetriat.
Õudne on inimesel vaadata
neid metsikuid mudeleid.
Kuuntele, romantiikkaa, 2002'''.split('\n')
class TestDisambiguator(unittest.TestCase):
"""Test the separate disambiguate function
aga
|
inst the built in disambiguate=True function.
Both must work the same."""
def test_disambiguator(self):
for sentence in sentences:
an_with = analyze(sentence)
an_without = analyze(sentence, disambiguate=False)
disamb = disambiguate(an_without)
self.assertListEqual(an_with, disamb)
|
linkfloyd/linkfloyd
|
linkfloyd/summaries/management/commands/send_summary_mails.py
|
Python
|
bsd-3-clause
| 2,371
| 0.003374
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from django.template.loader import render_to_string
from django.conf import settings
from preferences.models import UserPreferences
from summaries.models import Unseen
from django.contrib.sites.models import Site
from optparse import make_option
from django.core.mail import EmailMultiAlternatives
class Command(BaseCommand):
args = 'daily | weekly | monthly'
help = 'Builds and sends summary mails for given period'
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry',
default=False,
help='Run without posting emails and writing them on stdout'),
)
def handle(self, *args, **options):
if not len(args) == 1:
raise CommandError("Give a period please")
period = args[0]
if not period in ("daily", "weekly", "monthly"):
raise CommandError("Period must be daily, weekly or monthly.")
users = [preference.user for preference in
UserPreferences.objects.filter(summary_mails=period)]
for user in users:
unseen_models = Unseen.objects.filter(user=user)
unseen_links = [unseen.link for unseen in unseen_models]
if unseen_links:
email_title = "%s new links for you:" % len(unseen_links)
email_body_txt = render_to_string("summaries/body.txt", {
"user": user,
"links": unseen_links,
"site": Site.objects.get_current()
})
email_body_html = render_to_string("summaries/body.html", {
"user": user,
"links": unseen_links,
"site": Site.objects.get_current()
})
email = EmailMultiAlternatives(
email_title,
email_body_txt,
"Linkfloyd %s
|
" %settings.DEFAULT_FROM_EMAIL,
[user.email,])
email.at
|
tach_alternative(email_body_html, "text/html")
email.send()
self.stdout.write("Summary email for %s sent\n" % user)
if not options['dry']:
unseen_models.delete()
|
carragom/modoboa
|
doc/conf.py
|
Python
|
isc
| 7,215
| 0.006514
|
# -*- coding: utf-8 -*-
#
# Modoboa documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 3 22:29:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Modoboa'
copyright = u'2016, Antoine Nguyen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Modoboadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Modoboa.tex', u'Modoboa Documentation',
u'Antoine Nguyen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manu
|
al" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use
|
_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'modoboa', u'Modoboa Documentation',
[u'Antoine Nguyen'], 1)
]
intersphinx_mapping = {
'amavis': ('http://modoboa-amavis.readthedocs.org/en/latest/', None)
}
|
PietPtr/FinalProject
|
backend/restaurant/migrations/0010_auto_20161103_1400.py
|
Python
|
gpl-3.0
| 526
| 0.001901
|
# -*- coding: utf-8
|
-*-
# Generated by Django 1.10.2 on 2016-11-03 14:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0009_permission'),
]
operations = [
migrations.AlterModelOptions(
|
name='permission',
options={'permissions': (('isCook', 'Can see the cooks page'), ('isWaiter', 'Can see the waiter page'), ('isCashier', 'Can see the cashier page'))},
),
]
|
AthosOrg/athos-core
|
setup.py
|
Python
|
mit
| 491
| 0.034623
|
#!/usr/bin/env python
from setuptools import setup, find_packages
# get requirements.txt
with open('requirements.txt') as f:
required = f.
|
read().splitlines()
setup(name='athos-core',
description = 'Athos project core',
url = 'https://github.com/AthosOrg/',
packages = find_packages(),
entry_points = {
'console_scripts': [
'athos-core=athos.
|
cmd:main'
]
},
install_requires = required,
package_data = {'athos': ['default.yml']}
)
|
wagigi/fabtools-python
|
fabtools/user.py
|
Python
|
bsd-2-clause
| 8,682
| 0
|
"""
Users
=====
"""
from pipes import quote
import posixpath
import random
import string
from fabric.api import hide, run, settings, sudo, local
from fabtools.group import (
exists as _group_exists,
create as _group_create,
)
from fabtools.files import uncommented_lines
from fabtools.utils import run_as_root
# Python2 and 3 compatibility
from past.builtins import basestring
def exists(name):
"""
Check if a user exists.
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
return run('getent passwd %(name)s' % locals()).succeeded
_SALT_CHARS = string.ascii_letters + string.digits + './'
def _crypt_password(password):
from crypt import crypt
random.seed()
salt = ''
for _ in range(2):
salt += random.c
|
hoice(_SALT_CHARS)
crypted_password = crypt(password, salt)
return crypted_password
def create(name, comment=None, home=None, create_home=None, skeleton_dir=None,
group=None, create_group=True, extra_groups=None, password=None,
system=False, shell=None, uid=None, ssh_public_keys=None,
non_unique=False):
"""
Create a new user
|
and its home directory.
If *create_home* is ``None`` (the default), a home directory will be
created for normal users, but not for system users.
You can override the default behaviour by setting *create_home* to
``True`` or ``False``.
If *system* is ``True``, the user will be a system account. Its UID
will be chosen in a specific range, and it will not have a home
directory, unless you explicitely set *create_home* to ``True``.
If *shell* is ``None``, the user's login shell will be the system's
default login shell (usually ``/bin/sh``).
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if not fabtools.user.exists('alice'):
fabtools.user.create('alice')
with cd('/home/alice'):
# ...
"""
# Note that we use useradd (and not adduser), as it is the most
# portable command to create users across various distributions:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/useradd.html
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if group:
args.append('-g %s' % quote(group))
if create_group:
if not _group_exists(group):
_group_create(group)
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if create_home is None:
create_home = not system
if create_home is True:
args.append('-m')
elif create_home is False:
args.append('-M')
if skeleton_dir:
args.append('-k %s' % quote(skeleton_dir))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if system:
args.append('-r')
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % uid)
if non_unique:
args.append('-o')
args.append(name)
args = ' '.join(args)
run_as_root('useradd %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, basestring):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def modify(name, comment=None, home=None, move_current_home=False, group=None,
extra_groups=None, login_name=None, password=None, shell=None,
uid=None, ssh_public_keys=None, non_unique=False):
"""
Modify an existing user.
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if fabtools.user.exists('alice'):
fabtools.user.modify('alice', shell='/bin/sh')
"""
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if move_current_home:
args.append('-m')
if group:
args.append('-g %s' % quote(group))
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if login_name:
args.append('-l %s' % quote(login_name))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % quote(uid))
if non_unique:
args.append('-o')
if args:
args.append(name)
args = ' '.join(args)
run_as_root('usermod %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, basestring):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def home_directory(name):
"""
Get the absolute path to the user's home directory
Example::
import fabtools
home = fabtools.user.home_directory('alice')
"""
with settings(hide('running', 'stdout')):
return run('echo ~' + name)
def local_home_directory(name=''):
"""
Get the absolute path to the local user's home directory
Example::
import fabtools
local_home = fabtools.user.local_home_directory()
"""
with settings(hide('running', 'stdout')):
return local('echo ~' + name, capture=True)
def authorized_keys(name):
"""
Get the list of authorized SSH public keys for the user
"""
ssh_dir = posixpath.join(home_directory(name), '.ssh')
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
return uncommented_lines(authorized_keys_filename, use_sudo=True)
def add_ssh_public_key(name, filename):
"""
Add a public key to the user's authorized SSH keys.
*filename* must be the local filename of a public key that should be
added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_key('alice', '~/.ssh/id_rsa.pub')
"""
add_ssh_public_keys(name, [filename])
def add_ssh_public_keys(name, filenames):
"""
Add multiple public keys to the user's authorized SSH keys.
*filenames* must be a list of local filenames of public keys that
should be added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_keys('alice', [
'~/.ssh/id1_rsa.pub',
'~/.ssh/id2_rsa.pub',
])
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
_require_file(authorized_keys_filename, mode='600', owner=name,
use_sudo=True)
for filename in filenames:
with open(filename) as public_key_file:
public_key = public_key_file.read().strip()
# we don't use fabric.contrib.files.append() as it's buggy
if public_key not in authorized_keys(name):
sudo('echo %s >>%s' % (quote(public_key),
quote(authorized_keys_filename)))
def add_host_keys(name, hostname):
"""
Add all public keys of a host to the user's SSH known hosts file
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
known_hosts_fil
|
github/codeql
|
python/ql/test/query-tests/Imports/general/mutates_in_test.py
|
Python
|
mit
| 117
| 0.008547
|
import mutable_attr
import unittest
class T(unittest.TestCase):
def test_foo(self):
|
mutable_attr.y
|
= 3
|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/ctypes/test/test_cast.py
|
Python
|
gpl-2.0
| 3,210
| 0.002492
|
from ctypes import *
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_sh
|
ort) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
ad
|
dress = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = b"foo bar"
self.assertEqual(array._objects, {'0': b"foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertIs(p._objects, array._objects)
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
p[0] = b"spam spam"
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
p[1] = b"foo bar"
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
self.assertIs(array._objects, p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p(b"hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
b"hiho")
try:
c_wchar_p
except NameError:
pass
else:
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
|
wikimedia/pywikibot-sf-export
|
jira.py
|
Python
|
mit
| 12,518
| 0.004793
|
#!/usr/bin/env python
#
# Required packages:
reqs = """
requests >= 2.0.0
python-bugzilla >= 0.8.0
html2text >= 3.200.3
"""
import sys
try:
import requests
assert(requests.__version__ >= "2.0.0")
import bugzilla
assert(bugzilla.__version__ >= "0.8.0")
import html2text
assert(html2text.__version__ >= "3.200.3")
except (ImportError, AssertionError), e:
print "Required package not found: ", e
open("jira-reqs.txt", "w").write(reqs)
print "Please pip install -r jira-reqs.txt"
sys.exit(1)
import sys
import textwrap
import json
import re
from datetime import datetime, timedelta
# iets met BugZilla nog
# JIRA config
stepsize = 1000
if len(sys.argv) < 3:
print("
|
""Usage: {argv[0]} 'bugzilla component name within Tool Labs Tools' 'JIRA JQL query' [-importdoubles]
-importdoubles can be used to double-import bugs, which is useful for
testing. Otherwise, bugs that already exist in B
|
ugzilla
are skipped.
Example:
{argv[0]} 'DrTrigonBot - General' 'project = DRTRIGON'"
""".format(argv=sys.argv))
sys.exit(1)
component = sys.argv[1]
jql = sys.argv[2]
# BZ config
bug_defaults = {
'product': 'Tool Labs tools', # SET THIS!
'component': component, #"Database Queries", # SET THIS!
'version': 'unspecified',
'blocked': '', # SET THIS! (to tracking bug or empty for no tracking bug)
'op_sys': 'All',
'rep_platform': 'All',
}
base_url = "https://bugzilla.wikimedia.org/xmlrpc.cgi"
saveMigration = True
skip_existing = "-importdoubles" not in sys.argv
if False:
base_url = "http://192.168.1.103:8080/xmlrpc.cgi"
saveMigration = False
skip_existing = False
bug_defaults = {
'product': 'TestProduct', # SET THIS!
'component': 'TestComponent', # SET THIS!
'version': 'unspecified',
'blocked': '', # SET THIS! (to tracking bug or empty for no tracking bug)
'op_sys': 'All',
'rep_platform': 'All',
}
username = "wmf.bugconverter@gmail.com"
import config
password = config.password
print "Logging in to Bugzilla..."
bz = bugzilla.Bugzilla(url=base_url)
bz.login(username, password)
def hook(a):
for key in a:
if isinstance(a[key], basestring):
try:
a[key] = datetime.strptime(a[key], "%Y-%m-%dT%H:%M:%S.%f+0000")
except Exception, e:
pass
return a
def get(*args, **kwargs):
kwargs['verify'] = False # mitmproxy
return json.loads(requests.get(*args, **kwargs).text, object_hook=hook)
def reply_format(text, nindent=1):
prefix = ('>'*nindent + ' ') if nindent > 0 else ''
return textwrap.fill(text, initial_indent=prefix, subsequent_indent=prefix, break_long_words=False)
def htmltobz(html):
# remove 'plain text' links that were linkified by jira
html = re.sub(r'<a href="(.*?)">\1</a>', r'\1', html)
h = html2text.HTML2Text()
h.body_width = 0
h.ignore_links = True
h.inline_links = False
h.unicode_snob = True
return h.handle(html)
users = {}
try:
f = open('user-email-mapping.json', 'r')
users = json.load(f)
except Exception, e:
print e
def getBZuser(email, name):
global users
if not email:
email = name + "@invalid"
if email in users:
return users[email]
try:
user = bz.getuser(email)
users[email] = email
return email
except bugzilla.xmlrpclib.Fault, e:
if e.faultCode == 51:
pass
else:
raise
# not found, try heuristics. Search by Full Name!
fusers = bz.searchusers(name)
if not fusers:
users[email] = None
else:
user = fusers[0]
print "Assuming %s <%s> is actually %s <%s>" % (name, email, user.real_name, user.email)
if raw_input("Is this OK? Y/n ").upper().strip() == "Y":
users[email] = user.email
else:
users[email] = None
return users[email]
print "Retrieving issues from JIRA..."
issues = get(
'https://jira.toolserver.org/rest/api/2/search',
params={
'jql': jql,
'fields': 'self',
'maxResults': stepsize
}
)['issues']
runAll = False
maillist = {}
retrIssues = []
print "Getting %i details..." % len(issues)
for issue in issues:
issue = get(issue['self'] + "?expand=renderedFields")
retrIssues.append(issue)
fields = issue['fields']
if fields['assignee']:
maillist[fields['assignee']['emailAddress']] = fields['assignee']['displayName']
maillist[fields['reporter']['emailAddress']] = fields['reporter']['displayName']
for c in fields['comment']['comments']:
if 'author' in c:
maillist[c['author']['emailAddress']] = c['author']['displayName']
print "Retrieving users from bugzilla..."
for mail, name in maillist.items():
bzu = getBZuser(mail, name)
if bzu:
print "%s <%s> => %s" % (name, mail, bzu)
else:
print "%s <%s> not found" % (name, mail)
f = open('user-email-mapping.json', 'w')
json.dump(users, f, indent=4)
f.close()
for issue in retrIssues:
fields = issue['fields']
renderedFields = issue['renderedFields']
# check if issue is already on BZ
existing_bugs = bz.query({"short_desc": issue['key'] + " "})
if existing_bugs and skip_existing:
found = False
for bug in existing_bugs:
if (issue['key'] + " ") in bug.summary:
print "Skipping " + issue['key'] + " " + fields['summary'] + "; already uploaded? Check bug ID %i" % bug.bug_id
found = True
break
if found:
continue
cclist = set()
if fields['assignee']:
cclist.add(getBZuser(fields['assignee']['emailAddress'], fields['assignee']['displayName']))
assignee = "%s <%s>" % (fields['assignee']['displayName'], fields['assignee']['emailAddress'])
else:
assignee = "(none)"
cclist.add(getBZuser(fields['reporter']['emailAddress'], fields['reporter']['displayName']))
print issue['key'] + " " + fields['summary'],
sys.stdout.flush()
if not runAll:
if raw_input().upper() == "A":
runAll = True
if not renderedFields['description']:
renderedFields['description'] = u''
description = u"""This issue was converted from https://jira.toolserver.org/browse/{i[key]}.
Summary: {f[summary]}
Issue type: {f[issuetype][name]} - {f[issuetype][description]}
Priority: {f[priority][name]}
Status: {f[status][name]}
Assignee: {assignee}
-------------------------------------------------------------------------------
From: {f[reporter][displayName]} <{f[reporter][emailAddress]}>
Date: {f[created]:%a, %d %b %Y %T}
-------------------------------------------------------------------------------
{description}
""".format(i=issue, f=fields, assignee=assignee, description=htmltobz(renderedFields['description']))
params = bug_defaults.copy()
params['bug_severity'] = fields['priority']['name']
params['summary'] = issue['key'] + " " + fields['summary']
params['description'] = description
params['assigned_to'] = username # set assignee to the bug convertor initially
bug = bz.createbug(**params)
print " -- bz id ", bug.bug_id,
sys.stdout.flush()
ncs = 0
natt = 0
for comment,renderedComment in zip(fields['comment']['comments'], renderedFields['comment']['comments']):
ncs += 1
if 'author' in comment:
cclist.add(getBZuser(comment['author']['emailAddress'], comment['author']['displayName']))
else:
comment['author'] = {'displayName': "Anonymous", 'emailAddress': 'None'}
commenttext = u"""-------------------------------------------------------------------------------
From: {f[author][displayName]} <{f[author][emailAddress]}>
Date: {f[created]:%a, %d %b %Y %T}
-------------------------------------------------------------------------------
{description}
""".format(f=comment, description=htmltobz(renderedComment["body"]))
bug.addcomment(commenttext)
if 'attachment' in fields:
for attachment in
|
OuachitaHillsMinistries/OHCFS
|
htbin/app.py
|
Python
|
gpl-2.0
| 447
| 0.006711
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# If this page isn't working, try executing `chmod +x app.py` in terminal.
# enable debugging
import cgitb, cgi; cgitb.enable()
from classes import Factory
fieldStorage = cgi.FieldStorage()
factory = Factory.Factory()
webApp = fact
|
ory.makeWebApp(fieldStorage)
def outputHeaders():
print "Content-Type: text/html"
print # signals end of headers
outputHeaders()
print webApp.getO
|
utput()
|
sveetch/sveedocuments
|
sveedocuments/utils/objects.py
|
Python
|
mit
| 871
| 0.003476
|
# -*- coding: utf-8 -*-
def get_instance_children(obj, depth=0, sig=0):
"""
Récupèration récursive des relations enfants d'un objet
@depth: integer limitant le niveau de recherc
|
he des enfants, 0=illimité
"""
children = []
# Pour toute les relations enfants de l'objet
for child in obj._meta.get_all_related_objects():
# Nom de l'attribut d'accès
cname = child.get_accessor_name()
verbose_name = child.model._meta.verbose_name
# Récupère tout les objets des relations
for elem in getattr(obj, cname).all():
followed = []
# Re
|
cherche récursive des enfants
if depth == 0 or sig < depth:
followed = get_instance_children(elem, depth=depth, sig=sig+1)
children.append( (verbose_name, unicode(elem), followed) )
return children
|
andela-ijubril/book-search
|
booker/bookstore/tests/test_models.py
|
Python
|
mit
| 527
| 0.003795
|
from django.test import Test
|
Case
from bookstore.models import Book, Category
class InventoryModelTest(TestCase):
def test_string_representation_of_categories(self):
category = Category.objects.create(name="health", description="health category")
self.assertEqual(category.name, 'health')
def test_string_representation_of_books(self):
test_category2 = Category.objects.create()
book = Book(name='some text', category=test_category2)
self.assertEqua
|
l(str(book), 'some text')
|
angelapper/odoo
|
addons/mrp_repair/mrp_repair.py
|
Python
|
agpl-3.0
| 35,968
| 0.004337
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from datetime import datetime
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
class mrp_repair(osv.osv):
_name = 'mrp.repair'
_inherit = 'mail.thread'
_description = 'Repair Order'
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates untaxed amount.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param field_name: Name of field.
@param arg: Argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = 0.0
for line in repair.operations:
res[repair.id] += line.price_subtotal
for line in repair.fees_lines:
res[repair.id] += line.price_subtotal
cur = repair.pricelist_id.currency_id
res[repair.id] = cur_obj.round(cr, uid, cur, res[repair.id])
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates taxed amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
#return {}.fromkeys(ids, 0)
cur_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for repair in self.browse(cr, uid, ids, context=context):
val = 0.0
cur = repair.pricelist_id.currency_id
for line in repair.operations:
#manage prices with tax included use compute_all instead of compute
if line.to_invoice and line.tax_id:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id.ids, line.price_unit, cur.id, line.product_uom_qty, line.product_id.id, repair.partner_id.id)
for c in tax_calculate['taxes']:
val += c['amount']
for line in repair.fees_lines:
if line.to_invoice and line.tax_id:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id.ids, line.price_unit, cur.id, line.product_uom_qty, line.product_id.id, repair.partner_id.id)
for c in tax_calculate['taxes']:
val += c['amount']
res[repair.id] = cur_obj.round(cr, uid, cur, val)
return res
def _amount_total(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates total amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
untax = self._amount_untaxed(cr, uid, ids, field_name, arg, context=context)
tax = self._amount_tax(cr, uid, ids, field_name, arg, context=context)
cur_obj = self.pool.get('res.currency')
for id in ids:
repair = self.browse(cr, uid, id, context=context)
cur = repair.pricelist_id.currency_id
res[id] = cur_obj.round(cr, uid, cur, untax.get(id, 0.0) + tax.get(id, 0.0))
return res
def _get_default_address(self, cr, uid, ids, field_name, arg, context=None):
res = {}
partner_obj = self.pool.get('res.partner')
for data in self.browse(cr, uid, ids, context=context):
adr_id = False
if data.partner_id:
adr_id = partner_obj.address_get(cr, uid, [data.partner_id.id], ['contact'])['contact']
res[data.id] = adr_id
return res
def _get_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('operations', 'in', ids)], context=context)
def _get_fee_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('fees_lines', 'in', ids)], context=context)
_columns = {
'name': fields.char('Repair Reference', required=True, states={'confirmed': [('readonly', True)]}, copy=False),
'product_id': fields.many2one('product.product', string='Product to Repair', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', select=True, help='Choose partner for whom the order will be invoiced and delivered.', states={'confirmed': [('readonly', True)]}),
'address_id': fields.many2one('res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}),
'default_address_id': fields.function(_get_default_address, type="many2one", relation="res.partner"),
'state': fields.selection([
('draft', 'Quotation'),
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('under_repair', 'Under Repair'),
('ready', 'Ready to Repair'),
('2binvoiced', 'To be Invoiced'),
('invoice_except', 'Invoice Exception'),
('done', 'Repaired')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order. \
\n* The \'Confirmed\' status is used when a user confirms the repair order. \
\n* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed. \
\n* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done. \
\n* The \'Done\' status is set when repairing is completed.\
\n* The \'Cancelled\' status is used when user cancel repair order.'),
'location_id': fields.many2one('stock.location', 'Current Location', select=True, required=True, readonly=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'location_dest_id': fields.many2one('stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'lot_id': fields.many2one('stock.production.lot', 'Repaired Lot', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot", oldname="prodlot_id"),
'guarantee_limit': fields.date('Warranty Expiration', states={'confirmed': [('readonly', True)]}),
'operations': fields.one2many('mrp.repair.line', 'repair_id', 'Operation Lines',
|
readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'pricelist_id': fields.m
|
any2one('product.pricelist', 'Pricelist', help='Pricelist of the selected partner.'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoicing Address'),
'invoice_method': fields.selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")
], "Invoice Method",
select=True, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True, track_visibility="onchange", copy=False),
'mov
|
rpatterson/test-har
|
test_har/tests/test_drf.py
|
Python
|
gpl-3.0
| 563
| 0
|
"""
Test using HAR files in Python tests against the Django ReST framework.
"""
from django import http
from rest
|
_framework import response
from test_har import django_rest_har as test_har
from test_har import tests
class HARDogfoodDRFTests(tests.HARDogfoodTestCase, test_har.HARTestCase):
"""
Test using HAR files in Python tests against the Django ReST framework.
"""
RESPONSE_TYPE = (http.HttpResponse, response.Response)
def test_runner(self):
"""
Ensure tests are running.
"""
self.a
|
ssertTrue(True)
|
bdcht/amoco
|
amoco/ui/render.py
|
Python
|
gpl-2.0
| 18,256
| 0.007066
|
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2006-2011 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
"""
render.py
=========
This module implement
|
s amoco's pygments interface to allow pretty printed
outputs of tables of tokens built from amoco's expressions and instructions.
The rendered texts are used as main inputs for graphic engines to build
their own views'
|
objects.
A token is a tuple (t,s) where t is a Token type and s is a python string.
The highlight method uses the Token type to decorate the string s such that
the targeted renderer is able to show the string with foreground/background
colors and bold/underline/etc stroke attributes.
The API of this module is essentially the vltable class which implements its
str interface by calls to the highlight function, wrapping the pygments formatters
to allow colored output.
Note that more specialized formatting like HTML tables or even LaTeX blobs is
also possible.
If the pygments package is not found, all output default to a kind of
"NullFormatter" that will just ignore input tokens' types and just assemble lines
into undercorated unicode strings.
"""
from io import BytesIO as StringIO
from amoco.config import conf
from amoco.logger import Log
logger = Log(__name__)
logger.debug("loading module")
import re
try:
from pygments.token import Token
from pygments.style import Style
from pygments.lexer import RegexLexer
from pygments.formatters import *
except ImportError:
logger.verbose("pygments package not found, no renderer defined")
has_pygments = False
# metaclass definition, with a syntax compatible with python2 and python3
class TokenType(type):
def __getattr__(cls, key):
return key
Token_base = TokenType("Token_base", (), {})
class Token(Token_base):
pass
class NullFormatter(object):
def __init__(self, **options):
self.options = options
def format(self, tokensource, outfile):
for t, v in tokensource:
outfile.write(v.encode("utf-8"))
Formats = {
"Null": NullFormatter(),
}
else:
logger.verbose("pygments package imported")
has_pygments = True
# define default dark style:
dark = {
Token.Literal : "#fff",
Token.Address : "#fb0",
Token.Orange : "#fb0",
Token.Constant : "#f30",
Token.Red : "#f30",
Token.Prefix : "#fff",
Token.Mnemonic : "bold",
Token.Register : "#33f",
Token.Memory : "#3ff",
Token.String : "#3f3",
Token.Segment : "#888",
Token.Comment : "#f8f",
Token.Green : "#8f8",
Token.Good : "bold #8f8",
Token.Name : "bold",
Token.Alert : "bold #f00",
Token.Column : "#000",
}
S = {}
# define sub-tokens with Mark/Taint/Hide atrribute,
# allowing to set tokens types like Token.Register.Taint
for k in dark.keys():
S[getattr(k,'Mark')] = "bg:#224"
S[getattr(k,'Taint')] = "bg:#422"
S[getattr(k,'Hide')] = "noinherit #222"
dark.update(S)
class DarkStyle(Style):
default_style = ""
styles = dark
# define default light style:
light = {
Token.Literal : "",
Token.Address : "#c30",
Token.Orange : "#c30",
Token.Constant : "#d00",
Token.Red : "#d00",
Token.Prefix : "#000",
Token.Mnemonic : "bold",
Token.Register : "#00f",
Token.Memory : "#00c0c0",
Token.String : "#008800",
Token.Segment : "#888",
Token.Comment : "#a3a",
Token.Green : "#008800",
Token.Good : "bold #008800",
Token.Name : "bold",
Token.Alert : "bold #f00",
Token.Column : "#fff",
}
S = {}
for k in light.keys():
S[getattr(k,'Mark')] = "bg:#aaaaff"
S[getattr(k,'Taint')] = "bg:#ffaaaa"
S[getattr(k,'Hide')] = "noinherit #fff"
light.update(S)
class LightStyle(Style):
default_style = ""
styles = light
# the default style is dark:
DefaultStyle = DarkStyle
# define supported formatters:
Formats = {
"Null" : NullFormatter(encoding="utf-8"),
"Terminal" : TerminalFormatter(style=DefaultStyle, encoding="utf-8"),
"Terminal256" : Terminal256Formatter(style=DefaultStyle, encoding="utf-8"),
"TerminalDark" : Terminal256Formatter(style=DarkStyle, encoding="utf-8"),
"TerminalLight": Terminal256Formatter(style=LightStyle, encoding="utf-8"),
"Html" : HtmlFormatter(style=LightStyle, encoding="utf-8"),
"HtmlDark" : HtmlFormatter(style=DarkStyle, encoding="utf-8"),
}
def highlight(toks, formatter=None, outfile=None):
"""
Pretty prints a list of tokens using optionally
a given formatter and an output io buffer.
If no explicit formatter is given, use the formatter from configuration
or the Null formatter if not specified in the amoco configuration.
If no output io buffer is given, a local StringIO is used.
The returned value is a decorated python string.
"""
formatter = formatter or Formats.get(conf.UI.formatter,"Null")
if isinstance(formatter, str):
formatter = Formats[formatter]
outfile = outfile or StringIO()
formatter.format(toks, outfile)
return outfile.getvalue().decode("utf-8")
def TokenListJoin(j, lst):
"""
insert token j (Literal if j is str) between elements of lst.
If lst[0] is a list, it is updated with following elements, else
a new list is returned.
Arguments:
j (token or str): the token tuple (Token.type, str) or
the str used as (Token.Literal, str) "join".
lst (list) : the list of token tuples to "join" with j.
Returns:
lst[0] updated with joined lst[1:] iff lst[0] is a list,
or a new list joined from elements of lst otherwise.
"""
# define join token:
if isinstance(j, str):
j = (Token.Literal, j)
# init output list:
res = lst[0] if len(lst)>0 else []
if not isinstance(res,list):
res = [res]
for x in lst[1:]:
res.append(j)
if isinstance(x,list):
res.extend(x)
else:
res.append(x)
return res
def LambdaTokenListJoin(j,f):
"""
returns a lambda that takes instruction i and returns the TokenListJoin
build from join argument j and lst argument f(i).
"""
return lambda i: TokenListJoin(j, f(i))
class vltable(object):
"""
A variable length table relies on pygments to pretty print tabulated data.
Arguments:
rows (list): optional argument with initial list of tokenrows.
formatter (Formatter): optional pygment's formatter to use
(defaults to conf.UI.formatter.)
outfile (file): optional output file passed to the formatter
(defaults to StringIO.)
Attributes:
rows (list of tokenrow): lines of the table, with tabulated data.
rowparams (dict): parameters associated with a line.
maxlength: maximum number of lines (default to infinity).
hidden_r (set): rows that should be hidden.
squash_r (bool): row is removed if True or empty if False.
hidden_c (set): columns that should be hidden.
squash_c (bool): column is removed if True or empty if False.
colsize (dict): mapping column index to its required width.
width (int): total width of the table.
height (int): total heigth of the table.
nrows (int): total number of rows (lines).
ncols (int): total number of columns.
header (str): table header line (empty by default).
footer (str): table footer line (empty by default).
"""
def __init__(self, rows=None, formatter=None, outfile=None):
if rows is None:
rows = []
self.rows = rows
self.rowparams = {
"colsize": {},
"hidden_c": set()
|
brianwgoldman/LengthBiasCGP
|
stats.py
|
Python
|
bsd-2-clause
| 1,633
| 0
|
'''
Takes file names from the final/ folder and parses the information into
readable values and produces statistical measures. Use this module as an
executable to process all result information for a single problem, such as:
python stats.py final/multiply*.dat
Do not mix problems in a single run.
NOTE: You CANNOT use pypy for this as scipy is current unsupported. Use
python 2.7 instead.
'''
from scipy import stats
import json
import sys
from os import path
from collections import defaultdict
from util import pretty_name, median_deviation
from scipy.stats.mstats import kruskalwallis
if __name__ == '__main__':
# Run through all of the files gathering different seeds into lists
statify = defaultdict(list)
active = defaultdict(list)
filecount = 0
for filename in sy
|
s.argv[1:]:
base = path.basename(filename)
try:
problem, nodes, version, seed = base.split('_')
|
with open(filename, 'r') as f:
data = json.load(f)
statify[version].append(data[1]['evals'])
active[version].append(data[1]['phenotype'])
filecount += 1
except ValueError:
print filename, "FAILED"
print 'Files Successfully Loaded', filecount
print 'Kruskal Wallis', kruskalwallis(statify.values())
for version, data in statify.iteritems():
print '--------- %s ---------' % pretty_name[version]
print "MES, MAD", median_deviation(data)
print 'Active', median_deviation(active[version])
print 'Mann Whitney U against Normal',
print stats.mannwhitneyu(statify['normal'], data)
|
werbk/task-4.11
|
fixture/TestBase.py
|
Python
|
apache-2.0
| 2,024
| 0.00247
|
from selenium.webdriver.firefox.webdriver import WebDriver
from tests_group.group_lib import GroupBase
from tests_contract.contract_lib import ContactBase
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, user_name, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("%s" % user_name)
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("%s" % password)
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self,):
wd = self.app.wd
return len(wd.find_elements_by_link_text("L
|
ogout")) > 0
def is_lo
|
gged_in_as(self, username):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text == '('+username+')'
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def ensure_login(self, user_name, password):
if self.is_logged_in():
if self.is_logged_in_as(user_name):
return
else:
self.logout()
self.login(user_name, password)
class BaseClass():
def __init__(self):
self.wd = WebDriver()
#self.wd.implicitly_wait(3)
self.session = SessionHelper(self)
self.group = GroupBase(self)
self.contact = ContactBase(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get("http://localhost/addressbook/")
def restore(self):
wd = self.wd
wd.quit()
|
farooqsheikhpk/Aspose.BarCode-for-Cloud
|
Examples/Python/generating-saving/cloud-storage/set-barcode-image-height-width-quality-settings.py
|
Python
|
mit
| 2,140
| 0.014953
|
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode.
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "PNG"
#Set height, Width and quality of the image
imageHeight = 1.0;
imageWidth = 1.0;
imageQuality = "default";
try:
|
#invoke Aspose.BarCode Cloud SDK API to generate image with specific height, width, and quality along with auto size option
|
response = barcodeApi.PutBarcodeGenerateFile(name, file= None, text=text, type=type, format=format, imageHeight=imageHeight, imageWidth=imageWidth, imageQuality=imageQuality)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1
|
richard-willowit/odoo
|
addons/board/controllers/main.py
|
Python
|
gpl-3.0
| 1,724
| 0.00116
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree as ElementTree
from odoo.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, action_id, context_to_save, domain, view_mode, name=''):
# Retrieve the 'My Dashboard' action from its xmlid
action = request.env.ref('board.open_board_my_dash_action')
if action and action['res_model'] == 'board.board' and action['views'][0][1] == 'form' and action_id:
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.env['board.board'].fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name': str(action_id),
'string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTr
|
ee.tostring(xml, encoding='unicode')
|
request.env['ir.ui.view.custom'].create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
})
return True
return False
|
Flexget/Flexget
|
flexget/tests/test_argparse.py
|
Python
|
mit
| 2,272
| 0.00088
|
from argparse import Action
from flexget.options import ArgumentParser
def test_subparser_nested_namespace():
p = ArgumentParser()
p.add_argument('--outer')
p.add_subparsers(nested_namespaces=True)
sub = p.add_subparser('sub')
sub.add_argument('--inner')
sub.add_subparsers()
subsub = sub.add_subparser('subsub')
subsub.add_argument('--innerinner')
result = p.parse_args(['--outer', 'a', 'sub', '--inner', 'b', 'subsub', '--innerinne
|
r', 'c'])
assert result.outer == 'a'
# First subpa
|
rser values should be nested under subparser name
assert result.sub.inner == 'b'
assert not hasattr(result, 'inner')
# The second layer did not define nested_namespaces, results should be in first subparser namespace
assert result.sub.innerinner == 'c'
assert not hasattr(result, 'innerinner')
def test_subparser_parent_defaults():
p = ArgumentParser()
p.add_argument('--a')
p.set_post_defaults(a='default')
p.add_subparsers()
p.add_subparser('sub')
p.add_subparser('sub_with_default', parent_defaults={'a': 'sub_default'})
# Make sure normal default works
result = p.parse_args(['sub'])
assert result.a == 'default'
# Test subparser default
result = p.parse_args(['sub_with_default'])
assert result.a == 'sub_default'
# Subparser default should not override explicit one
result = p.parse_args(['--a', 'manual', 'sub_with_default'])
assert result.a == 'manual'
def test_post_defaults():
class CustomAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, 'post_set'):
namespace.post_set = 'custom'
p = ArgumentParser()
p.add_argument('--post-set')
p.add_argument('--custom', action=CustomAction, nargs=0)
p.set_post_defaults(post_set='default')
# Explicitly specified, no defaults should be set
result = p.parse_args(['--post-set', 'manual'])
assert result.post_set == 'manual'
# Nothing supplied, should get the post set default
result = p.parse_args([])
assert result.post_set == 'default'
# Custom action should be allowed to set default
result = p.parse_args(['--custom'])
assert result.post_set == 'custom'
|
uw-it-cte/uw-restclients
|
restclients/test/myplan.py
|
Python
|
apache-2.0
| 3,193
| 0.002819
|
from django.test import TestCase
from restclients.myplan import get_plan
class MyPlanTestData(TestCase):
def test_javerage(self):
plan = get_plan(regid="9136CCB8F66711D5BE060004AC494FFE", year=2013, quarter="spring", terms=4)
self.assertEquals(len(plan.terms), 4)
self.assertEquals(plan.terms[0].year, 2013)
self.assertEquals(plan.terms[1].year, 2013)
self.assertEquals(plan.terms[2].year, 2013)
self.assertEquals(plan.terms[3].year, 2014)
self.assertEquals(plan.terms[0].quarter, 'Spring')
self.assertEquals(plan.terms[1].quarter, 'Summer')
self.assertEquals(plan.terms[2].quarter, 'Autumn')
self.assertEquals(plan.terms[3].quarter, 'Winter')
self.assertEquals(len(plan.terms[0].courses), 2)
self.assertEquals(len(plan.terms[1].courses), 1)
self.assertEquals(len(plan.terms[2].courses), 0)
self.assertEquals(len(plan.terms[3].courses), 0)
term_data = plan.terms[0]
self.assertEquals(term_data.course_search_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/course")
self.assertEquals(term_data.degree_audit_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/audit/degree")
self.assertEquals(term_data.myplan_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/plan/20132")
self.assertEquals(term_data.registration_href,
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/registration/20132")
self.assertEquals(term_data.registered_courses_count, 0)
self.assertEquals(term_data.registered_sections_count, 0)
self.assertEquals(term_data.courses[0].registrations_available, True)
self.assertEquals(term_data.courses[0].curriculum_abbr, 'CSE')
self.assertEquals(term_data.courses[0].course_number, '101')
self.assertEquals(len(term_data.courses[0].sections), 3)
self.assertEquals(term_data.courses[0].sections[0].section_id, 'A')
self.assertEquals(term_data.courses[0].sections[1].section_id, 'AA')
self.assertEquals(term_data.courses[0].sections[2].section_id, 'AB')
def test_json(self):
plan = get_plan(regid="9136CCB8F66711D5BE060004AC494FFE",
year=2013, quarter="spring",
terms=4)
json_data =
|
plan.json_data()
term_data = json_data["terms"][0]
self.assertEquals(term_data["courses"][0]["sections"][1]["section_id"], "AA")
self.assertEquals(term_data["registered_courses_count"], 0)
self.assertEquals(term_data["registration_href"],
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/reg
|
istration/20132")
self.assertEquals(term_data["course_search_href"],
"https://uwkseval.cac.washington.edu/student/myplan/mplogin/netid?rd=/student/myplan/course")
self.assertEquals(term_data["quarter"], "Spring")
|
styk-tv/offlineimap
|
offlineimap/emailutil.py
|
Python
|
gpl-2.0
| 1,573
| 0.001271
|
# Some useful functions to extract data out of emails
# Copyright (C) 2002-2012 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import email
from email.Parser import Parser as MailParser
import time
def get_message_date(content, header='Date'):
"""
Parses mail and returns resulting timestamp.
:param header: the header to extract date from;
:returns: timestamp or `None` in the case of failure.
"""
message = MailParser().parsestr(content, True)
dateheader = message.get(header)
# parsedate_tz returns a 10-tuple that can be passed to mktime_tz
# Will be No
|
ne if missing or not in a valid format. Note that
|
# indexes 6, 7, and 8 of the result tuple are not usable.
datetuple = email.utils.parsedate_tz(dateheader)
if datetuple is None:
return None
return email.utils.mktime_tz(datetuple)
|
imposeren/django-happenings
|
tests/integration_tests/event_factory.py
|
Python
|
bsd-2-clause
| 3,323
| 0.000602
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
import six
from happenings.models import Event
@override_settings(CALENDAR_SHOW_LIST=True)
class SetMeUp(TestCase):
@classmethod
def setUpClass(cls):
super(SetMeUp, cls).setUpClass()
cls.user = User.objects.create_user(
'foo', 'bar@example.com', 'secret'
)
html = '">%d</a><a class='
cls.cal_str = lambda self, day: html % day
cls.event_div = '<div class="calendar-event">'
@classmethod
def tearDownClass(cls):
cls.user.delete()
def clean_whitespace(self, response):
"""Remove all newlines and all occurances of multiple spaces."""
if hasattr(response, 'content'):
is_response = True
content = response.content
else:
is_response = False
content = response
if isinstance(content, six.text_type):
content = content.encode('utf-8')
content = content.replace(b'\n', b'')
for num_spaces in range(7, 2, -1):
# reduce all multiple spaces to 2 spaces.
# We can process here only `num_spaces=3` with the same result, but it will be slower
while content.find(b' '*num_spaces) >= 0:
content = content.replace(b' '*num_spaces, b' '*2)
content = content.replace(b' '*2, b'')
if is_response:
response.content = content
else:
content = content.decode('utf-8')
return content
def create_event(created_by, title, description, all_day=False,
start_date=None, end_date=None, categories=None, tags=None,
repeat='NEVER', end_repeat=None, full=True, utc=False):
"""
A factory method for creating events. If start_date is supplied,
end_date must also be supplied, and they must both be either lists
or tuples e.g. start_date=[2014, 2, 2], end_date=[2014, 2, 3].
"""
if start_
|
date and end_date:
# Set the start and end dates to local tz
|
if utc:
val = timezone.utc
else:
val = timezone.get_default_timezone()
start_date = timezone.make_aware(datetime.datetime(*start_date), val)
end_date = timezone.make_aware(datetime.datetime(*end_date), val)
elif start_date and not end_date or end_date and not start_date:
raise ValueError("Both start_date and end_date must be supplied or not"
" supplied at all when using create_event")
else:
start_date = timezone.now()
end_date = timezone.now()
event = Event.objects.create(
start_date=start_date,
end_date=end_date,
all_day=all_day,
created_by=created_by,
title=title,
description=description,
repeat=repeat,
end_repeat=end_repeat
)
if categories:
for category in categories:
event.categories.create(title=category)
if tags:
for tag in tags:
event.tags.create(name=tag)
if full:
event.full_clean()
event.save()
return event
|
GoUbiq/pyexchange
|
pyexchange/__init__.py
|
Python
|
apache-2.0
| 853
| 0.004689
|
"""
(c) 2013 LinkedIn C
|
orp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
impor
|
t logging
from .exchange2010 import Exchange2010Service # noqa
from .connection import ExchangeNTLMAuthConnection # noqa
from .connection import ExchangeHTTPBasicAuthConnection
# Silence notification of no default logging handler
log = logging.getLogger("pyexchange")
class NullHandler(logging.Handler):
def emit(self, record):
pass
log.addHandler(NullHandler())
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/ship/crafted/weapon/shared_shield_effectiveness_intensifier_mk4.py
|
Python
|
mit
| 512
| 0.042969
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import
|
*
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/weapon/shared_shield_effectiveness_intensifier_mk4.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","shield_effectiveness_
|
intensifier_mk4")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
KaiRo-at/socorro
|
alembic/versions/22e4e60e03f_bug_867387_bixie_dra.py
|
Python
|
mpl-2.0
| 9,842
| 0.015241
|
"""bug 867387 Bixie draft schema
Revision ID: 22e4e60e03f
Revises: 37004fc6e41e
Create Date: 2013-05-10 13:20:35.750954
"""
# revision identifiers, used by Alembic.
revision = '22e4e60e03f'
down_revision = '37004fc6e41e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
import sqlalchemy.types as types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "json"
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(u'CREATE SCHEMA bixie')
op.create_table(u'product_versions',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=True),
sa.Column(u'release_version', sa.TEXT(), nullable=True),
sa.Column(u'major_version', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'full_urls',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'url', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_product_releases',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'version', sa.TEXT(), nullable=False),
sa.Column(u'build', sa.TEXT(), nullable=False),
sa.Column(u'build_type', CITEXT(), nullable=False),
sa.Column(u'platform', sa.TEXT(), nullable=False),
sa.Column(u'product_name', CITEXT(), nullable=False),
sa.Column(u'repository', sa.TEXT(), nullable=False),
sa.Column(u'stability', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'products',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(
|
u'id'),
schema=u'bixie'
)
op.create_table(u'crashes_normalized',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature_id', sa.TEXT(), nullable=False),
sa.Column(u'error_message_id', JSON(), nullable=False),
sa.Column(u'product_id', sa.TEXT(), nullable=True),
sa.Column(u'user_agent_id', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
|
schema=u'bixie'
)
op.create_table(u'hosts',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'signatures',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'crashes',
sa.Column(u'crash_id', postgresql.UUID(), autoincrement=False,
nullable=False),
sa.Column(u'signature', sa.TEXT(), nullable=False),
sa.Column(u'error', JSON(), nullable=False),
sa.Column(u'product', sa.TEXT(), nullable=True),
sa.Column(u'protocol', sa.TEXT(), nullable=True),
sa.Column(u'hostname', sa.TEXT(), nullable=True),
sa.Column(u'username', sa.TEXT(), nullable=True),
sa.Column(u'port', sa.TEXT(), nullable=True),
sa.Column(u'path', sa.TEXT(), nullable=True),
sa.Column(u'query', sa.TEXT(), nullable=True),
sa.Column(u'full_url', sa.TEXT(), nullable=True),
sa.Column(u'user_agent', sa.TEXT(), nullable=True),
sa.Column(u'success', sa.BOOLEAN(), nullable=True),
sa.Column(u'client_crash_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'client_submitted_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_started_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column(u'processor_completed_datetime',
postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.PrimaryKeyConstraint(u'crash_id'),
schema=u'bixie'
)
op.create_table(u'release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', CITEXT(), nullable=False),
sa.Column(u'sort', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'os_names',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'error_messages',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_version_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_version_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'raw_adi',
sa.Column(u'adi_count', sa.BIGINT(), nullable=True),
sa.Column(u'date', sa.DATE(), nullable=True),
sa.Column(u'product_name', sa.TEXT(), nullable=True),
sa.Column(u'product_os_platform', sa.TEXT(), nullable=True),
sa.Column(u'product_os_version', sa.TEXT(), nullable=True),
sa.Column(u'product_version', sa.TEXT(), nullable=True),
sa.Column(u'build', sa.TEXT(), nullable=True),
sa.Column(u'build_channel', sa.TEXT(), nullable=True),
sa.Column(u'product_guid', sa.TEXT(), nullable=True),
sa.Column(u'received_at', postgresql.TIMESTAMP(timezone=True),
nullable=True),
sa.PrimaryKeyConstraint(),
schema=u'bixie'
)
op.create_table(u'users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'name', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_adi',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=False),
sa.Column(u'adi_count', sa.BIGINT(), nullable=False),
sa.Column(u'adi_date', sa.INTEGER(), nullable=False),
sa.Column(u'os_name', CITEXT(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'user_agents',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'error_message_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['error_message_id'],
[u'bixie.error_messages.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_users',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.Column(u'user_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'bixie.users.id'], ),
sa.PrimaryKeyConstraint(u'id'),
schema=u'bixie'
)
op.create_table(u'product_release_channels',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'release_channel_id', sa.INTEGER(), nullable=True),
sa.Column(u'product_id', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['product_id'], [u'bixie.products.id'], ),
sa.ForeignKeyConstraint(['release_channel_id'],
[u'bixie.release_channels.id'], ),
sa.PrimaryKeyConstraint(u'id'),
|
cosminbasca/rdftools
|
rdftools/__version__.py
|
Python
|
apache-2.0
| 707
| 0.001414
|
#
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o
|
r agreed to in writing, softw
|
are
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'basca'
version = (0, 9, 5)
str_version = '.'.join([str(v) for v in version])
|
vsemionov/wordbase
|
src/wordbase/pyparsing.py
|
Python
|
bsd-3-clause
| 146,612
| 0.015408
|
# module pyparsing.py
#
# Copyright (c) 2003-2011 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.6"
__versionTime__ = "26 June 2011 10:53"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
alphas = string.ascii_lowercase + string.ascii_uppercase
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, enumerate, sorted, reversed, list, tuple, set, any, all]
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for
|
all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, p
|
str, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
|
jlmdegoede/Invoicegen
|
hour_registration/views.py
|
Python
|
gpl-3.0
| 5,189
| 0.000964
|
from .models import HourRegistration
from orders.models import Product
from django.utils import timezone
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from datetime import datetime
import pytz
from django.con
|
trib.auth.decorators import permission_required
@login_required
@permission_required('hour_registration.add_hourregistration', 'hour_registration.change_hourregistration')
def start_time_tracking(request, product_id):
produc
|
t = Product.objects.get(id=product_id)
existing_time = HourRegistration.objects.filter(end=None)
if existing_time.count() == 0:
time = HourRegistration(product=product, start=timezone.now())
time.save()
return JsonResponse({'success': True, 'start': format_time_to_local(time.start)})
else:
return existing_time_tracking(request)
@login_required
@permission_required('hour_registration.change_hourregistration')
def end_time_tracking(request, product_id):
product = Product.objects.get(id=product_id)
time = HourRegistration.objects.filter(product=product, end=None)
if time.count() > 0:
time = time[0]
if not time.end:
time.end = timezone.now()
time.save()
return JsonResponse({'success': True})
@login_required
@permission_required('hour_registration.change_hourregistration')
def add_description_to_hourregistration(request):
if request.method == 'POST':
return add_description_to_hourregistration_post(request)
if request.method == 'GET':
return get_description_to_hourregistration(request)
def add_description_to_hourregistration_post(request):
product_id = request.POST['product_id']
product = Product.objects.get(id=product_id)
time = HourRegistration.objects.filter(product=product, end=None)
if time.count() > 0:
time = time[0]
description = request.POST['description']
time.description = description
time.save()
return JsonResponse({'success': True})
return JsonResponse({'error': 'No open HR found'})
def get_description_to_hourregistration(request):
product_id = request.GET['product_id']
product = Product.objects.get(id=product_id)
time = HourRegistration.objects.filter(product=product, end=None)
if time.count() > 0:
time = time[0]
return JsonResponse({'description': time.description})
return JsonResponse({'error': 'No HR object found'})
@login_required
@permission_required('hour_registration.view_hourregistration')
def existing_time_tracking(request):
time = HourRegistration.objects.filter(end=None).first()
if time is not None:
product = Product.objects.get(id=time.product_id)
return JsonResponse({'pk': time.product_id, 'start': format_time_to_local(time.start), 'title': product.title})
return JsonResponse({"existing": 'False'})
@login_required
@permission_required('hour_registration.delete_hourregistration')
def delete_time_tracking(request):
try:
time_id = request.POST['time_id']
time = HourRegistration.objects.get(pk=time_id)
time.delete()
return JsonResponse({'success': 'true'})
except HourRegistration.DoesNotExist:
return JsonResponse({'error': 'HR object not found'})
@login_required
@permission_required('hour_registration.change_hourregistration')
def set_end_time(request):
if 'pk' in request.POST and 'endDate' in request.POST and 'endTime' in request.POST:
enddate = request.POST['endDate']
endtime = request.POST['endTime']
hour_id = request.POST['pk']
end_date = format_date_and_time(enddate, endtime)
time = HourRegistration.objects.get(pk=hour_id)
time.end = pytz.timezone('Europe/Amsterdam').localize(end_date)
time.save()
return JsonResponse({'success': 'true'})
return JsonResponse({'success': 'false'})
@login_required
@permission_required('hour_registration.add_hourregistration')
def create_new_hour_registration(request):
if 'startDate' in request.POST and 'startTime' in request.POST \
and 'endDate' in request.POST \
and 'endTime' in request.POST \
and 'product_id' in request.POST:
startdate = request.POST['startDate']
starttime = request.POST['startTime']
enddate = request.POST['endDate']
endtime = request.POST['endTime']
product_id = request.POST['product_id']
description = ""
if 'description' in request.POST:
description = request.POST['description']
start_date = format_date_and_time(startdate, starttime)
end_date = format_date_and_time(enddate, endtime)
product = Product.objects.get(pk=product_id)
time = HourRegistration(start=start_date, end=end_date, product=product, description=description)
time.save()
return JsonResponse({'success': 'true'})
return JsonResponse({'success': 'false'})
def format_time_to_local(time):
return timezone.localtime(time).strftime('%d-%m-%Y %H:%M:%S')
def format_date_and_time(date, time):
return datetime.strptime(date + ' ' + time, '%d-%m-%Y %H:%M')
|
skg-net/ansible
|
lib/ansible/modules/notification/say.py
|
Python
|
gpl-3.0
| 2,275
| 0.002198
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Michael DeHaan <michael@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_M
|
ETADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: say
version_added: "1.2"
short_description: Makes a computer to speak.
description:
- makes a computer speak! Amuse your friends, annoy your coworkers!
no
|
tes:
- In 2.5, this module has been renamed from C(osx_say) to M(say).
- If you like this module, you may also be interested in the osx_say callback plugin.
options:
msg:
description:
What to say
required: true
voice:
description:
What voice to use
required: false
requirements: [ say or espeak or espeak-ng ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
- say:
msg: '{{ inventory_hostname }} is all done'
voice: Zarvox
delegate_to: localhost
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
def say(module, executable, msg, voice):
cmd = [executable, msg]
if voice:
cmd.extend(('-v', voice))
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
voice=dict(required=False),
),
supports_check_mode=True
)
msg = module.params['msg']
voice = module.params['voice']
possibles = ('say', 'espeak', 'espeak-ng')
if get_platform() != 'Darwin':
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
voice = None
for possible in possibles:
executable = module.get_bin_path(possible)
if executable:
break
else:
module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
if module.check_mode:
module.exit_json(msg=msg, changed=False)
say(module, executable, msg, voice)
module.exit_json(msg=msg, changed=True)
if __name__ == '__main__':
main()
|
guigovedovato/python
|
api/clienteApi.py
|
Python
|
gpl-3.0
| 1,042
| 0.002879
|
from flask import request
from flask_restful import Resource
import json
from core.bo.clienteBo import ClienteBo
class Cliente(Resource):
def __init__(self):
self.cliente = ClienteBo()
def get(self, parameter=""):
if parameter == "":
|
return self.cliente.get_all(), 201
else:
param
|
eter = json.loads(parameter)
if parameter.get('id'):
return self.cliente.get_by_id(parameter["id"]), 201
elif parameter.get('document'):
return self.cliente.get_document(parameter["document"], parameter["cliente"]), 201
elif parameter.get('board'):
return self.cliente.get_board(), 201
else:
return self.cliente.get_by_filter(parameter), 201
def put(self, parameter):
cliente = request.json
return self.cliente.update(parameter, cliente), 201
def post(self, parameter):
file = request.files['arquivo']
return self.cliente.upload(parameter, file), 201
|
gordenbrown51/damnvid
|
dLoader.py
|
Python
|
gpl-3.0
| 3,966
| 0.037317
|
# -*- coding: utf-8 -*-
from dCore import *
from dConstants import *
from dLog import *
from dThread import *
from dModules import *
class DamnVideoLoader(DamnThread):
def __init__(self, parent, uris, thengo=False, feedback=True, allownonmodules=True):
DamnThread.__init__(self)
self.uris = []
if type(uris) not in (type(()), type([])):
uris = [uris]
for i in uris:
self.uris.append(DamnUnicode(i))
self.parent = parent
self.thengo = thengo
self.feedback = feedback
self.done = False
self.result = None
self.allownonmodules = allownonmodules
Damnlog('DamnVideoLoader spawned with parameters: parent =',parent,'; thengo?',thengo,'; feedback?',feedback,'; a
|
llow non-modules?',allownonmodules)
def go(self):
if self.feedback:
self.parent.toggleLoading(True)
self.vidLoop(self.uris)
self.done = True
if self.feedback:
self.parent.toggleLoading(False)
else:
while self.done:
time.sleep(.1)
def postEvent(self, info):
if self.feedback:
DV.postEvent(self.parent, (DV.evt_loading, info))
def getVidName(self, uri):
return self.parent.getVidName(uri)
d
|
ef addValid(self, meta):
meta['original'] = self.originaluri
self.result = meta
self.postEvent({'meta':meta, 'go':self.thengo})
def SetStatusText(self, status):
self.postEvent({'status':status})
def showDialog(self, title, content, icon):
self.postEvent({'dialog':(title, content, icon)})
def vidLoop(self, uris):
Damnlog('Starting vidLoop with URIs:',uris)
for uri in uris:
Damnlog('vidLoop considering URI:',uri)
self.originaluri = uri
bymodule = False
for module in DamnIterModules(False):
Damnlog('Trying module',module['class'],'for URI',uri)
mod = module['class'](uri)
if mod.validURI():
Damnlog('Module has been chosen for URI',uri,':',mod)
mod.addVid(self)
bymodule = True
break
if not bymodule:
Damnlog('No module found for URI:',uri)
if not self.allownonmodules:
Damnlog('DamnVideoLoader exitting because no module was found and non-modules are not allowed.')
self.result = None
return
if REGEX_HTTP_GENERIC.match(uri):
Damnlog('HTTP regex still matches URI:',uri)
name = self.getVidName(uri)
if name == DV.l('Unknown title'):
name = REGEX_HTTP_EXTRACT_FILENAME.sub('', uri)
self.addValid({'name':name, 'profile':DV.prefs.get('defaultwebprofile'), 'profilemodified':False, 'fromfile':name, 'dirname':REGEX_HTTP_EXTRACT_DIRNAME.sub('\\1/', uri), 'uri':uri, 'status':DV.l('Pending.'), 'icon':'generic'})
else:
# It's a file or a directory
if os.path.isdir(uri):
Damnlog('URI',uri,'is a directory.')
if DV.prefs.get('DirRecursion') == 'True':
for i in os.listdir(uri):
self.vidLoop([uri + DV.sep + i]) # This is recursive; if i is a directory, this block will be executed for it too
else:
if len(uris) == 1: # Only one dir, so an alert here is tolerable
self.showDialog(DV.l('Recursion is disabled.'), DV.l('This is a directory, but recursion is disabled in the preferences. Please enable it if you want DamnVid to go through directories.'), wx.OK | wx.ICON_EXCLAMATION)
else:
self.SetStatusText(DV.l('Skipped ') + uri + DV.l(' (directory recursion disabled).'))
else:
Damnlog('URI',uri,'is a file.')
filename = os.path.basename(uri)
if uri in self.parent.videos:
self.SetStatusText(DV.l('Skipped ') + filename + DV.l(' (already in list).'))
if len(uris) == 1: # There's only one file, so an alert here is tolerable
self.showDialog(DV.l('Duplicate found'), DV.l('This video is already in the list!'), wx.ICON_EXCLAMATION | wx.OK)
else:
self.addValid({'name':filename[0:filename.rfind('.')], 'profile':DV.prefs.get('defaultprofile'), 'profilemodified':False, 'fromfile':filename, 'uri':uri, 'dirname':os.path.dirname(uri), 'status':DV.l('Pending.'), 'icon':'damnvid'})
DV.videoLoader = DamnVideoLoader
|
flacjacket/sympy
|
examples/intermediate/vandermonde.py
|
Python
|
bsd-3-clause
| 4,652
| 0.006449
|
#!/usr/bin/env python
"""Vandermonde matrix example
Demonstrates matrix computations using the Vandermonde matrix.
* http://en.wikipedia.org/wiki/Vandermonde_matrix
"""
from sympy import Matrix, pprint, Rational, sqrt, symbols, Symbol, zeros
def symbol_gen(sym_str):
"""Symbol generator
Generates sym_str_n where n is the number of times the generator
has been called.
"""
n = 0
while True:
yield Symbol("%s_%d" % (sym_str, n))
n += 1
def comb_w_rep(n, k):
"""Combinations with repetition
Returns the list of k combinations with repetition from n objects.
"""
if k == 0:
return [[]]
combs = [[i] for i in range(n)]
for i in range(k - 1):
curr = []
for p in combs:
for m in range(p[-1], n):
curr.append(p + [m])
combs = curr
return combs
def vandermonde(order, dim=1, syms='a b c d'):
"""Comptutes a Vandermonde matrix of given order and dimension.
Define syms to give beginning strings for temporary variables.
Returns the Matrix, the temporary variables, and the terms for the
|
polynomials.
"""
syms = syms.split()
if len(syms) < dim:
new_syms = []
for i in range(dim - len(syms)):
|
new_syms.append(syms[i%len(syms)] + str(i/len(syms)))
syms.extend(new_syms)
terms = []
for i in range(order + 1):
terms.extend(comb_w_rep(dim, i))
rank = len(terms)
V = zeros(rank)
generators = [symbol_gen(syms[i]) for i in range(dim)]
all_syms = []
for i in range(rank):
row_syms = [g.next() for g in generators]
all_syms.append(row_syms)
for j,term in enumerate(terms):
v_entry = 1
for k in term:
v_entry *= row_syms[k]
V[i*rank + j] = v_entry
return V, all_syms, terms
def gen_poly(points, order, syms):
"""Generates a polynomial using a Vandermonde system"""
num_pts = len(points)
if num_pts == 0:
raise ValueError("Must provide points")
dim = len(points[0]) - 1
if dim > len(syms):
raise ValueError("Must provide at lease %d symbols for the polynomial" % dim)
V, tmp_syms, terms = vandermonde(order, dim)
if num_pts < V.shape[0]:
raise ValueError(
"Must provide %d points for order %d, dimension "\
"%d polynomial, given %d points" % \
(V.shape[0], order, dim, num_pts))
elif num_pts > V.shape[0]:
print "gen_poly given %d points but only requires %d, "\
"continuing using the first %d points" % \
(num_pts, V.shape[0], V.shape[0])
num_pts = V.shape[0]
subs_dict = {}
for j in range(dim):
for i in range(num_pts):
subs_dict[tmp_syms[i][j]] = points[i][j]
V_pts = V.subs(subs_dict)
V_inv = V_pts.inv()
coeffs = V_inv.multiply(Matrix([points[i][-1] for i in xrange(num_pts)]))
f = 0
for j,term in enumerate(terms):
t = 1
for k in term:
t *= syms[k]
f += coeffs[j]*t
return f
def main():
order = 2
V, tmp_syms, _ = vandermonde(order)
print "Vandermonde matrix of order 2 in 1 dimension"
pprint(V)
print '-'*79
print "Computing the determinate and comparing to \sum_{0<i<j<=3}(a_j - a_i)"
det_sum = 1
for j in range(order + 1):
for i in range(j):
det_sum *= (tmp_syms[j][0] - tmp_syms[i][0])
print """
det(V) = %(det)s
\sum = %(sum)s
= %(sum_expand)s
""" % { "det": V.det(),
"sum": det_sum,
"sum_expand": det_sum.expand(),
}
print '-'*79
print "Polynomial fitting with a Vandermonde Matrix:"
x,y,z = symbols('x,y,z')
points = [(0,3), (1,2), (2,3)]
print """
Quadratic function, represented by 3 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 2, [x]),
}
points = [(0, 1, 1), (1, 0, 0), (1, 1, 0), (Rational(1, 2), 0, 0),
(0, Rational(1, 2), 0), (Rational(1, 2), Rational(1, 2), 0)]
print """
2D Quadratic function, represented by 6 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 2, [x, y]),
}
points = [(0, 1, 1, 1), (1, 1, 0, 0), (1, 0, 1, 0), (1, 1, 1, 1)]
print """
3D linear function, represented by 4 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 1, [x, y, z]),
}
if __name__ == "__main__":
main()
|
victorshch/axiomatic
|
test_axiom_system.py
|
Python
|
gpl-3.0
| 402
| 0
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from axiomatic.base import AxiomSystem
from axiomatic.elementary_conditions import MinMaxAxiom
# l, r, pmin, pmax
params = [1, 1, -0.8, 0.8]
a
|
xiom_list = [MinMaxAxiom(params)]
ts = pd.DataFrame(np.random.random((10, 2)))
print(ts)
print(MinMaxAxiom(params)
|
.run(ts, dict()))
now = AxiomSystem(axiom_list)
print(now.perform_marking(ts))
|
mikaelboman/home-assistant
|
homeassistant/components/proximity.py
|
Python
|
mit
| 8,866
| 0
|
"""
Support for tracking the proximity of a device.
Component to monitor the proximity of devices to a particular zone and the
direction of travel.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/proximity/
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.util.location import distance
DEPENDENCIES = ['zone', 'device_tracker']
DOMAIN = 'proximity'
# Default tolerance
DEFAULT_TOLERANCE = 1
# Default zone
DEFAULT_PROXIMITY_ZONE = 'home'
# Entity attributes
ATTR_DIST_FROM = 'dist_to_zone'
ATTR_DIR_OF_TRAVEL = 'dir_of_travel'
ATTR_NEAREST = 'nearest'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config): # pylint: disable=too-many-locals,too-many-statements
"""Get the zones and offsets from configuration.yaml."""
ignored_zones = []
if 'ignored_zones' in config[DOMAIN]:
for variable in config[DOMAIN]['ignored_zones']:
ignored_zones.append(variable)
# Get the devices from configuration.yaml.
if 'devices' not in config[DOMAIN]:
_LOGGER.error('devices not found in config')
return False
proximity_devices = []
for variable in config[DOMAIN]['devices']:
proximity_devices.append(variable)
# Get the direction of travel tolerance from configuration.yaml.
tolerance = config[DOMAIN].get('tolerance', DEFAULT_TOLERANCE)
# Get the zone to monitor proximity to from configuration.yaml.
proximity_zone = config[DOMAIN].get('zone', DEFAULT_PROXIMITY_ZONE)
entity_id = DOMAIN + '.' + proximity_zone
proximity_zone = 'zone.' + proximity_zone
state = hass.states.get(proximity_zone)
zone_friendly_name = (state.name).lower()
# Set the default values.
dist_to_zone = 'not set'
dir_of_travel = 'not set'
nearest = 'not set'
proximity = Proximity(hass, zone_friendly_name, dist_to_zone,
dir_of_travel, nearest, ignored_zones,
proximity_devices, tolerance, proximity_zone)
proximity.entity_id = entity_id
proximity.update_ha_state()
# Main command to monitor proximity of devices.
track_state_change(hass, proximity_devices,
proximity.check_proximity_state_change)
return True
class Proximity(Entity): # pylint: disable=too-many-instance-attributes
"""Representation of a Proximity."""
# pylint: disable=too-many-arguments
def __init__(self, hass, zone_frien
|
dly_name, dist_to, dir_of_travel,
nearest, ignored_zones, proximity_devices, tolerance,
proximity_zone):
"""Initialize the proximity."""
self.hass = hass
self.friendly_name = zone_friendly_name
self.dist_to = dist_to
self.dir_of_travel = dir_of_travel
self.nearest = nearest
self.ignored_zones = ignored_zones
self.proximity_devices = proximity_devices
self.tolerance = toleranc
|
e
self.proximity_zone = proximity_zone
@property
def name(self):
"""Return the name of the entity."""
return self.friendly_name
@property
def state(self):
"""Return the state."""
return self.dist_to
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return "km"
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DIR_OF_TRAVEL: self.dir_of_travel,
ATTR_NEAREST: self.nearest,
}
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
def check_proximity_state_change(self, entity, old_state, new_state):
"""Function to perform the proximity checking."""
entity_name = new_state.name
devices_to_calculate = False
devices_in_zone = ''
zone_state = self.hass.states.get(self.proximity_zone)
proximity_latitude = zone_state.attributes.get('latitude')
proximity_longitude = zone_state.attributes.get('longitude')
# Check for devices in the monitored zone.
for device in self.proximity_devices:
device_state = self.hass.states.get(device)
if device_state.state not in self.ignored_zones:
devices_to_calculate = True
# Check the location of all devices.
if (device_state.state).lower() == (self.friendly_name).lower():
device_friendly = device_state.name
if devices_in_zone != '':
devices_in_zone = devices_in_zone + ', '
devices_in_zone = devices_in_zone + device_friendly
# No-one to track so reset the entity.
if not devices_to_calculate:
self.dist_to = 'not set'
self.dir_of_travel = 'not set'
self.nearest = 'not set'
self.update_ha_state()
return
# At least one device is in the monitored zone so update the entity.
if devices_in_zone != '':
self.dist_to = 0
self.dir_of_travel = 'arrived'
self.nearest = devices_in_zone
self.update_ha_state()
return
# We can't check proximity because latitude and longitude don't exist.
if 'latitude' not in new_state.attributes:
return
# Collect distances to the zone for all devices.
distances_to_zone = {}
for device in self.proximity_devices:
# Ignore devices in an ignored zone.
device_state = self.hass.states.get(device)
if device_state.state in self.ignored_zones:
continue
# Ignore devices if proximity cannot be calculated.
if 'latitude' not in device_state.attributes:
continue
# Calculate the distance to the proximity zone.
dist_to_zone = distance(proximity_latitude,
proximity_longitude,
device_state.attributes['latitude'],
device_state.attributes['longitude'])
# Add the device and distance to a dictionary.
distances_to_zone[device] = round(dist_to_zone / 1000, 1)
# Loop through each of the distances collected and work out the
# closest.
closest_device = ''
dist_to_zone = 1000000
for device in distances_to_zone:
if distances_to_zone[device] < dist_to_zone:
closest_device = device
dist_to_zone = distances_to_zone[device]
# If the closest device is one of the other devices.
if closest_device != entity:
self.dist_to = round(distances_to_zone[closest_device])
self.dir_of_travel = 'unknown'
device_state = self.hass.states.get(closest_device)
self.nearest = device_state.name
self.update_ha_state()
return
# Stop if we cannot calculate the direction of travel (i.e. we don't
# have a previous state and a current LAT and LONG).
if old_state is None or 'latitude' not in old_state.attributes:
self.dist_to = round(distances_to_zone[entity])
self.dir_of_travel = 'unknown'
self.nearest = entity_name
self.update_ha_state()
return
# Reset the variables
distance_travelled = 0
# Calculate the distance travelled.
old_distance = distance(proximity_latitude, proximity_longitude,
old_state.attributes['latitude'],
old_state.attributes['longitude'])
new_distance = distance(proximity_latitude, proximity_longitude,
new_state.attributes['latitude'],
new_state.attributes['longitude'])
distance_travelled = round(new_distance - old_distance, 1)
# Check for tolerance
if distance_travelled < self.tolerance * -
|
BishopFox/SpoofcheckSelfTest
|
handlers/HomePageHandler.py
|
Python
|
apache-2.0
| 154
| 0
|
from
|
handlers.BaseHandlers import BaseHandler
class HomePageHandler(BaseHandler):
def get(self, *args, **kwargs):
self.render('home.
|
html')
|
mgaitan/moin2git
|
wikiconfig.py
|
Python
|
bsd-3-clause
| 2,368
| 0.00549
|
# -*- coding: iso-8859-1 -*-
"""MoinMoin Desktop Edition (MMDE) - Configuration
ONLY to be used for MMDE - if you run a personal wiki on your notebook or PC.
This is NOT intended for internet or server or multiuser use due to relaxed security settings!
"""
import sys, os
from MoinMoin.config imp
|
ort multiconfig, url_prefix_static
class LocalConfig(multiconfig.DefaultConfig):
# vvv DON'T TOUCH THIS EXCEPT IF YOU KNOW WHAT YOU DO vvv
# Directory containing THIS wikiconfig:
wikiconfig_dir = os.path.abspath(os.path.dirname(__file__))
# We assume this structure for a simple "unpack and run" scenario:
# wikiconfig.py
# wiki/
# data/
# underlay/
# If that's not true, feel free to just set instance_dir t
|
o the real path
# where data/ and underlay/ is located:
#instance_dir = '/where/ever/your/instance/is'
instance_dir = os.path.join(wikiconfig_dir, 'wiki')
# Where your own wiki pages are (make regular backups of this directory):
data_dir = os.path.join(instance_dir, 'data', '') # path with trailing /
# Where system and help pages are (you may exclude this from backup):
data_underlay_dir = os.path.join(instance_dir, 'underlay', '') # path with trailing /
DesktopEdition = True # give all local users full powers
acl_rights_default = u"All:read,write,delete,revert,admin"
surge_action_limits = None # no surge protection
sitename = u'MoinMoin DesktopEdition'
logo_string = u'<img src="%s/common/moinmoin.png" alt="MoinMoin Logo">' % url_prefix_static
# ^^^ DON'T TOUCH THIS EXCEPT IF YOU KNOW WHAT YOU DO ^^^
#page_front_page = u'FrontPage' # change to some better value
# Add your configuration items here.
secrets = 'This string is NOT a secret, please make up your own, long, random secret string!'
# Set this to your default front page, default is a blank page.
page_front_page = u"MyStartingPage"
# DEVELOPERS! Do not add your configuration items there,
# you could accidentally commit them! Instead, create a
# wikiconfig_local.py file containing this:
#
# from wikiconfig import LocalConfig
#
# class Config(LocalConfig):
# configuration_item_1 = 'value1'
#
try:
from wikiconfig_local import Config
except ImportError, err:
if not str(err).endswith('wikiconfig_local'):
raise
Config = LocalConfig
|
kartikp1995/gr-bokehgui
|
python/qa_waterfall_sink_f.py
|
Python
|
gpl-3.0
| 1,883
| 0.001062
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2013,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from bokehgui_swig import waterfall_sink_f_proc
from gnuradio import blocks, filter, gr, gr_unittest
class qa_waterfall_sink_f(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self)
|
:
self.tb = None
def test_001_t(self):
|
original = (1,) * 100 + (-1,) * 100 + (0,) * 50 + (10,) + (0,) * 49
expected_result = [(-200,) * 50 + (0,) + (-200,) * 49, (-20,) * 100]
src = blocks.vector_source_f(original, False, 1, [])
dst = waterfall_sink_f_proc(100, filter.firdes.WIN_RECTANGULAR, 0,
15000, 'Test')
self.tb.connect(src, dst)
self.tb.run()
result_data = dst.get_plot_data()
result_data1 = dst.get_plot_data()
self.assertEqual(expected_result[0], tuple(result_data[0]))
self.assertEqual(expected_result[0], tuple(result_data1[0]))
self.tb.stop()
self.tb.wait()
if __name__ == '__main__':
gr_unittest.run(qa_waterfall_sink_f, "qa_waterfall_sink_f.xml")
|
artofai/neural-network
|
layer.py
|
Python
|
mit
| 4,245
| 0.008009
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The Art of an Artificial Intelligence
http://art-of-ai.com
https://github.com/artofai
"""
__author__ = 'xevaquor'
__license__ = 'MIT'
import numpy as np
import util
class LayerBase(object):
def __init__(self):
self.size = None
self.W = np.zeros((0,0))
self.a = None
self.v = None
def random_init(self):
raise NotImplementedError()
class SigmoidOutputLayer(LayerBase):
def __init__(self, neurons_count, previous_layer_count):
self.size = neurons_count
self.prev_layer_size = previous_layer_count
self.W = np.zeros((self.prev_layer_size + 1, self.size))
def random_init(self):
self.W = np.random.normal(size=self.W.shape)
def phi(self, v):
return 1. / (1 + np.exp(-v))
def phi_prime(self, v):
return np.exp(-v)/((1 + np.exp(-v))**2)
class SigmoidInputLayer(LayerBase):
def __init__(self, input_size):
self.size = input_size
self.W = np.zeros((0,0))
def random_init(self):
pass
class SigmoidHiddenLayer(LayerBase):
def __init__(self, layer_size, prev_layer_size):
self.size = layer_size
self.prev_layer_size = prev_layer_size
self.W = np.zeros((self.prev_layer_size + 1, self.size))
def phi(self, v):
return 1. / (1 + np.exp(-v))
def phi_prime(self, v):
return np.exp(-v)/((1 + np.exp(-v))**2)
def random_init(self):
self.W = np.random.normal(size=self.W.shape)
class NN(object):
def __init__(self, input_size, hidden_sizes, output_size):
self.layers = [SigmoidInputLayer(input_size)]
for size in hidden_sizes:
self.layers.append(SigmoidHiddenLayer(size, self.layers[-1].size))
self.layers.append(SigmoidOutputLayer(output_size, self.layers[-1].size))
def set_wages(self, wages):
shapes = list([l.W.shape for l in self.layers[1:]])
packed = list(util.wrap_matrix(wages, shapes))
assert len(packed) == len(self.layers) - 1
for i, layer in enumerate(self.layers[1:]):
layer.W = packed[i]
def get_wages(self):
all_wages = [layer.W for layer in
|
self.layers]
return util.unwrap_matrix(all_wages)
def random_init(self):
for layer in self.layers:
layer.random_init()
def forward(self, X):
# hiden layer
m, n = X.shape
# examples, features
assert n == self.layers[0].size
self.layers[0].a = X
for i in range(1, len(self.layers)):
source_layer = self.layers[i-1]
dest_layer = self.layers[i]
bias = np.ones((source_layer
|
.a.shape[0], 1))
source_layer.a = np.hstack((bias, source_layer.a))
dest_layer.v = np.dot(source_layer.a, dest_layer.W)
dest_layer.a = dest_layer.phi(dest_layer.v)
self.y_hat = self.layers[-1].a
return self.y_hat
def cost(self, X, y):
self.y_hat = self.forward(X)
J = 0.5*np.sum((y-self.y_hat)**2)
return J
def nabla_cost(self, X, y):
self.forward(X)
return (self.y_hat - y)
def cost_prime(self, X, y):
# output layer delta
deltas = [None] * len(self.layers)
differentials = [None] * len(self.layers)
nabla_cost = self.nabla_cost(X, y)
deltas[-1] = np.multiply(nabla_cost, self.layers[-1].phi_prime(self.layers[-1].v))
differentials[2] = np.dot(self.layers[1].a.T, deltas[2])
truncatedW = self.layers[-1].W[1:, :]
#truncatedW = self.layers[-1].W
deltas[1] = np.multiply(np.dot(deltas[2], truncatedW.T), self.layers[1].phi_prime(self.layers[1].v))
bias = np.ones((X.shape[0], 1))
biased = np.hstack((bias, X))
differentials[1] = np.dot(self.layers[0].a.T, deltas[1])
return differentials[1:]
if __name__ == '__main__':
dd = NN(5, [2,3,4], 3)
dd.random_init()
X = np.array([[1,2,3,4,5],
[10,20,30,40,50],
[8,6,4,2,4]],dtype=float)
Y = np.array([[1,0,1],
[1,10,3],
[1,-4,4]], dtype=float)
yyy = dd.forward(X)
#print(yyy)
|
devops-alpha-s17/customers
|
customers/__init__.py
|
Python
|
apache-2.0
| 184
| 0.005435
|
'''
@author: Tea
|
m Alpha, <aa5186@nyu.edu>
Name: Customer Model
Purpose: This library is part of the customer REST API for the ecommerce website
'''
from customer import Cus
|
tomer
|
concentricsky/badgr-server
|
apps/composition/__init__.py
|
Python
|
agpl-3.0
| 77
| 0
|
# this app has been depr
|
ecated but sticks around for m
|
igrations dependencies
|
thrisp/flarf
|
setup.py
|
Python
|
mit
| 1,046
| 0
|
"""
Flarf: Flask Request Filter
-------------
Configurable request filters
"""
from setuptools import setup
setup(
name='Flask-Flarf',
version='0.0.5',
url='https://github.com/thrisp/flarf',
license='MIT',
author='Thrisp/Hurrata',
author_email='blueblank@gmail.com',
description='Flask request filtering',
long_description=__doc__,
packages=['flask_flarf'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.9'
],
test_suite='nose.collector',
tests_require=[
'nose',
'blinker'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: O
|
SI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Progr
|
amming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
jarrodmcc/OpenFermion
|
src/openfermion/utils/__init__.py
|
Python
|
apache-2.0
| 5,551
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._bch_expansion import bch_expand
from ._channel_state import (amplitude_damping_channel, dephasing_channel,
depolarizing_channel)
from ._commutators import anticommutator, commutator, double_commutator
from ._grid import Grid
from ._lattice import (HubbardSquareLattice, SpinPairs, Spin)
from ._lcu_util import (lambda_norm,
preprocess_lcu_coefficients_for_reversible_sampling)
from ._operator_utils import (chemist_ordered, count_qubits,
eigenspectrum, fourier_transform,
freeze_orbitals, get_file_path,
hermitian_conjugated, inline_sum,
inverse_fourier_transform,
is_hermitian, is_identity,
normal_ordered, prune_unused_indices,
reorder, up_then_down,
load_operator, save_operator,
group_into_tensor_product_basis_sets)
from ._rdm_mapping_functions import (kronecker_delta,
map_two_pdm_to_two_hole_dm,
map_two_pdm_to_one_pdm,
map_o
|
ne_pdm_to_one_hole_dm,
map_one_hole_dm_to_one_pdm,
|
map_two_pdm_to_particle_hole_dm,
map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm,
map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
from ._slater_determinants import (gaussian_state_preparation_circuit,
slater_determinant_preparation_circuit)
from ._special_operators import (majorana_operator, number_operator,
s_minus_operator, s_plus_operator,
s_squared_operator,
sx_operator, sy_operator, sz_operator)
from ._testing_utils import (haar_random_vector,
random_antisymmetric_matrix,
random_diagonal_coulomb_hamiltonian,
random_hermitian_matrix,
random_interaction_operator,
random_quadratic_hamiltonian,
random_qubit_operator,
random_unitary_matrix,
module_importable)
from ._trotter_error import error_bound, error_operator
from ._trotter_exp_to_qgates import (pauli_exp_to_qasm,
trotterize_exp_qubop_to_qasm,
trotter_operator_grouping)
from ._unitary_cc import (uccsd_convert_amplitude_format,
uccsd_generator,
uccsd_singlet_generator,
uccsd_singlet_get_packed_amplitudes,
uccsd_singlet_paramsize)
# Imports out of alphabetical order to avoid circular dependency.
from ._jellium_hf_state import hartree_fock_state_jellium
from ._low_rank import (get_chemist_two_body_coefficients,
low_rank_two_body_decomposition,
prepare_one_body_squared_evolution)
from ._low_depth_trotter_error import (
low_depth_second_order_trotter_error_bound,
low_depth_second_order_trotter_error_operator)
from ._sparse_tools import (boson_ladder_sparse,
boson_operator_sparse,
expectation,
expectation_computational_basis_state,
get_density_matrix,
get_gap,
get_ground_state,
get_linear_qubit_operator_diagonal,
inner_product,
jordan_wigner_sparse,
jw_configuration_state,
jw_hartree_fock_state,
jw_get_gaussian_state,
jw_get_ground_state_at_particle_number,
jw_number_restrict_operator,
jw_number_restrict_state,
jw_slater_determinant,
jw_sz_restrict_operator,
jw_sz_restrict_state,
qubit_operator_sparse,
sparse_eigenspectrum,
variance)
from ._davidson import Davidson, DavidsonOptions, QubitDavidson, SparseDavidson
from ._linear_qubit_operator import (
LinearQubitOperator,
LinearQubitOperatorOptions,
ParallelLinearQubitOperator,
generate_linear_qubit_operator,
)
from ._pubchem import geometry_from_pubchem
|
pauljxtan/pystuff
|
pycompvis/compvis/feature/detectors.py
|
Python
|
mit
| 6,800
| 0.003676
|
"""
Feature detection (Szeliski 4.1.1)
"""
import numpy as np
import scipy.signal as sig
import scipy.ndimage as ndi
from compvis.utils import get_patch
def sum_sq_diff(img_0, img_1, u, x, y, x_len, y_len):
"""
Returns the summed square difference between two image patches, using even
weighting across the patch.
Parameters :
img_0, img_1 : two images being compared
u : displacement vector between patches
x, y : coordinates of top-left corner of first patch
x_len, y_len : dimensions of patch
"""
patch_0 = get_patch(img_0, x, y, x_len, y_len)
patch_1 = get_patch(img_1, x + u[0], y + u[1], x_len, y_len)
return ((patch_1 - patch_0)**2).sum()
def autocorr(img, u, x, y, x_len, y_len):
"""
Returns the auto-correlation function for an image patch with a
displacement of u. Uses even weighting across the patch.
(This function simply calls sum_sq_diff() with both images the same, and is
left here for reference/convenience.)
Parameters :
img : image
u : displacement vector between patches
x, y : coordinates of top-left corner of patch
x_len, y_len : dimensions of patch
"""
return sum_sq_diff(img, img, u, x, y, x_len, y_len)
def autocorr_surface(img, u_x_range, u_y_range, x, y, x_len, y_len):
"""
Returns an auto-correlation surface for an image patch with a given range
of displacements.
Parameters :
img : image
u_x_range, u_y_range : ranges of displacements (tuples)
x, y : coordinates of top-left corner of patch
x_len, y_len : dimensions of patch
Returns :
surface : auto-correlation values
X, Y : grid mesh
"""
# Grid mesh
X, Y = np.meshgrid(range(u_x_range[0], u_x_range[1]),
range(u_y_range[0], u_y_range[1]))
# Auto-correlation surfacae
s = np.array([autocorr(img, (u_x, u_y), x, y, x_len, y_len)
for u_x, u_y in zip(np.ravel(X), np.ravel(Y))])
surface = s.reshape(X.shape)
return surface, X, Y
def harris(img, sigma_d=1, sigma_i=2, alpha=0.06, filter_type='gaussian'):
"""
Returns the Harris interest scores for corner detection.
Parameters :
img : image
sigma_d : width of derivative Gaussian
sigma_i : width of integration Gaussian
alpha : parameter in Harris-Stephens (1988) score
filter_type : 'gaussian' or 'sobel'
(Default values for sigma_d and sigma_i from Szeliski pp. 190)
(Default value for alpha from Szeliski pp. 189)
"""
#--- Gradients in x and y
# Derivative of Gaussian
if filter_type is 'gaussian':
I_x = ndi.gaussian_filter(img, sigma_d, (1, 0))#, mode='nearest')
I_y = ndi.gaussian_filter(img, sigma_d, (0, 1))#, mode='nearest')
# Sobel
elif filter_type is 'sobel':
I_x = ndi.sobel(img, 0)
I_y = ndi.sobel(img, 1)
#---
# Outer products
I_xx = I_x**2
I_yy = I_y**2
I_xy = I_x * I_y
# Convolve with Gaussian to get auto-correlation matrix
A_xx = ndi.gaussian_filter(I_xx, sigma_i)
A_yy = ndi.gaussian_filter(I_yy, sigma_i)
A_xy = ndi.gaussian_filter(I_xy, sigma_i)
# Harris scores
A_det = A_xx * A_yy - A_xy**2
A_tr = A_xx + A_yy
# (Harris-Stephens 1988)
#return A_det - alpha * A_tr**2
# Harmonic mean (Brown-Szeliski-Winder 2005)
return A_det / A_tr
def select_scores(scores, n_points, border=10):
"""
Selects the best scores from a given map.
Parameters :
scores : 2D score array
n_points : number of points to select
border : minimum distance from image boundaries
"""
# Mask out points too close to boundary
mask = np.zeros(scores.shape)
mask[border:-border, border:-border] = 1
scores *= mask
# Sort coordinates by response strength
coords_sorted_score = np.array(np.unravel_index(np.argsort(scores, axis=None), scores.shape)).T
#scores_sorted = scores[coords_sorted]
# Get highest scores
best_coords = coords_sorted_score[-n_points:]
best_scores = [scores[coord[0], coord[1]] for coord in best_coords]
return np.array(best_coords), np.array(best_scores)
def select_scores_anms(scores, n_points, c_robust=0.9, border=10):
"""
Selects the best scores from a given map, applying adaptive non-maximal
supression.
Parameters :
scores : 2D score array
n_points : number of points to select
c_robust : robustifying parameter
border : minimum distance from image boundaries
"""
# Mask out points too close to boundary
mask = np.zeros(scores.shape)
mask[border:-border, border:-border] = 1
scores *= mask
# Apply ANMS selection (prevent dense clusters)
supp_radii = get_suppression_radii(scores, c_robust)
# Sort coordinates by supression radii
coords_sorted_supp = np.array(np.unravel_index(np.argsort(supp_radii, axis=None), supp_radii.shape)).T
# Get scores with highest supression radii
best_coords = coords_sorted_supp[-n_points:]
best_scores = [scores[coord[0], coord[1]] for coord in best_coords]
return np.array(best_coords), np.array(best_scores)
def get_suppression_radii(scores, c_robust):
supp_radii = np.zeros(scores.shape)
# Coordinate with highest score
coord_max = np.unravel_index(scores.argmax(), scores.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
score = scores[i, j]
if score == 0:
|
continue
# Skip the highest score (infinite suppression radius)
if (i, j) == coord_max:
continue
# Find suppression radius
r = 0
r_found = False
while
|
not r_found:
r += 1
# Keep the candidate "window" within the image
x0 = i-r if i-r >= 0 else 0
x1 = i+r+1 if i+r+1 < scores.shape[0] else scores.shape[0]-1
y0 = j-r if j-r >= 0 else 0
y1 = j+r+1 if j+r+1 < scores.shape[1] else scores.shape[1]-1
candidates = scores[x0:x1, y0:y1]
# If a significantly stronger neighbour is found
if np.count_nonzero(score < c_robust*candidates):
r_found = True
break
supp_radii[i][j] = r
# Set the highest score to have the largest supression radius
supp_radii[coord_max] = supp_radii.max() + 1
return supp_radii
|
jriehl/numba
|
numba/targets/dictimpl.py
|
Python
|
bsd-2-clause
| 504
| 0
|
"""
This file implements the lowering for `dict()`
"""
from numba.targets.imputils import lower_builtin
@lower_builtin(dict)
def impl_dict(con
|
text, builder, sig, args):
"""
The `dict()` implementation simply forwards the work to `Dict.empty()`.
"""
from numba.typed import Dict
dicttype = sig.return_type
kt, vt = dicttype.key_type, dicttype.value_type
def call_ctor():
return Dict.empty(kt, vt)
|
return context.compile_internal(builder, call_ctor, sig, args)
|
pattarapol-iamngamsup/projecteuler_python
|
problem_006.py
|
Python
|
gpl-3.0
| 1,747
| 0.034345
|
""" Copyright 2012, July 31
Written by Pattarapol (Cheer) Iamngamsup
E-mail: IAM.PATTARAPOL@GMAIL.COM
Sum square difference
Problem 6
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of
the first ten natural numbers and the square of the sum is
3025 385 = 2640.
Find the difference between the sum of the squares of
the first one hundred natural numbers and th
|
e square of the sum.
"""
#################################################
# Importing libraries & modules
import datetime
#################################################
# Global variables
#################################################
# Functions
#################################################
# Classes
|
#################################################
# Main function
def main():
squareOfSum = ( ( ( 1+100 ) * 100 ) / 2)**2
sumOfSquare = 0
for i in range( 1, 101 ):
sumOfSquare += i*i
print( 'answer = {0}'.format( squareOfSum - sumOfSquare ) )
#################################################
# Main execution
if __name__ == '__main__':
# get starting date time
startingDateTime = datetime.datetime.utcnow()
print( 'startingDateTime = {0} UTC'.format( startingDateTime ) )
# call main function
main()
# get ending date time
endingdateTime = datetime.datetime.utcnow()
print( 'endingdateTime = {0} UTC'.format( endingdateTime ) )
# compute delta date time
deltaDateTime = endingdateTime - startingDateTime
print( 'deltaDateTime = {0}'.format( deltaDateTime ) )
|
kevinpdavies/pycsw
|
pycsw/plugins/profiles/apiso/apiso.py
|
Python
|
mit
| 50,869
| 0.00692
|
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
from lxml import etree
from pycsw import config, util
from pycsw.plugins.profiles import profile
CODELIST = 'http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml'
CODESPACE = 'ISOTC211/19115'
class APISO(profile.Profile):
''' APISO class '''
def __init__(self, model, namespaces, context):
self.context = context
self.namespaces = {
'apiso': 'http://www.opengis.net/cat/csw/apiso/1.0',
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd',
'srv': 'http://www.isotc211.org/2005/srv',
'xlink': 'http://www.w3.org/1999/xlink'
}
self.inspire_namespaces = {
'inspire_ds': 'http://inspire.ec.europa.eu/schemas/inspire_ds/1.0',
'inspire_common': 'http://inspire.ec.europa.eu/schemas/common/1.0'
}
self.repository = {
'gmd:MD_Metadata': {
'outputschema': 'http://www.isotc211.org/2005/gmd',
'queryables': {
'SupportedISOQueryables': {
'apiso:Subject': {'xpath': 'gmd:identificationInfo/gmd:MD_Identification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:keyword/gco:CharacterString|gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:Keywords']},
'apiso:Title': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Title']},
'apiso:Abstract': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:abstract/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Abstract']},
'apiso:Format': {'xpath': 'gmd:distributionInfo/gmd:MD_Distribution/gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Format']},
'apiso:Identifier': {'xpath': 'gmd:fileIdentifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Identifier']},
'apiso:Modified': {'xpath': 'gmd:dateStamp/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:Modified']},
'apiso:Type': {'xpath': 'gmd:hierarchyLevel/gmd:MD_ScopeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:Type']},
'apiso:BoundingBox': {'xpath': 'apiso:BoundingBox', 'dbcol': self.context.md_core_model['mappings']['pycsw:BoundingBox']},
'apiso:CRS': {'xpath': 'concat("urn:ogc:def:crs:","gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString",":","gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:version/gco:CharacterString",":","gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString")', 'dbcol': self.context.md_core_model['mappings']['pycsw:CRS']},
'apiso:AlternateTitle': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Cit
|
ation/gm
|
d:alternateTitle/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:AlternateTitle']},
'apiso:RevisionDate': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue="revision"]/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:RevisionDate']},
'apiso:CreationDate': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue="creation"]/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:CreationDate']},
'apiso:PublicationDate': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue="publication"]/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:PublicationDate']},
'apiso:OrganisationName': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OrganizationName']},
'apiso:HasSecurityConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_SecurityConstraints', 'dbcol': self.context.md_core_model['mappings']['pycsw:SecurityConstraints']},
'apiso:Language': {'xpath': 'gmd:language/gmd:LanguageCode|gmd:language/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Language']},
'apiso:ParentIdentifier': {'xpath': 'gmd:parentIdentifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ParentIdentifier']},
'apiso:KeywordType': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:KeywordType']},
'apiso:TopicCategory': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:TopicCategory']},
'apiso:ResourceLanguage': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:code/gmd:MD_LanguageTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:ResourceLanguage']},
'apiso:GeographicDescriptionCode': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:GeographicDescriptionCode']},
'apiso:Denominator': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', 'dbcol': self.context.md_core_model['mappings']['pycsw:Denominator']},
'apiso:DistanceValue': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIde
|
monodokimes/pythonmon
|
core/menu.py
|
Python
|
gpl-3.0
| 1,737
| 0
|
import sys
from core import loop
from util import jsonmanager, debug
def make_console_menu(name):
menu_data_file_path = '_Resources/Data/MenuData/'
path = menu_data_file_path + name + '.json'
data = jsonmanager.get_data(path)
title = data['Title']
item_data = data['Items']
args = []
for item_datum in item_data:
args.append((item_datum['Text'], item_datum['Action']))
return ConsoleMenu(title, args)
class ConsoleMenuItem:
def __init__(self, text, action):
self.text = text
self.action = action
def invoke(self):
try:
getattr(sys.modules[__name__], self.action)()
except AttributeError as error:
debug.log('Something went wrong :(')
debug.log(error.args)
raise error
class ConsoleMenu:
def __init__(self, title, args):
self.title = title
self.menu_items = []
for argument in args:
self.add_menu_item(argument[0], argument[1])
def add_menu_item(self, text, action):
self.menu_items.append(ConsoleMenuItem(text, action))
def get_menu_item(self, index):
return self.menu_items[index]
def display_menu_item(self, index):
menu_item = self.get_menu_item(index)
print('[' + str(index) + '] - ' + menu_item.
|
text)
def run(self):
for index in range(0, len(self.menu_items)):
self.display_menu_item(index)
result = input('Choose an option: ')
self.get_menu_item(int(result)).invoke()
def run_loop(game_loop):
game_loop.set_scene('pallet-town')
game_loop.run()
def
|
run_editor():
run_loop(loop.EditorLoop())
def run_game():
run_loop(loop.DefaultGameLoop())
|
drf24/labutils
|
utils_gui.py
|
Python
|
gpl-3.0
| 7,383
| 0.006772
|
#!/usr/bin/env python3
import idmaker
import utils
from tkinter import *
from PIL import Image, ImageTk
top = Tk()
top.wm_title("Voice Research Laboratory")
top.iconbitmap('icons/favicon.ico')
top.state('zoomed')
class MainWindow:
def __init__(self, top):
self.top = top
self.banner()
#self.makebuttons()
self.mainmenu()
def banner(self):
self.image = Image.open("icons/logo.png")
self.photo = ImageTk.PhotoImage(self.image)
self.bannerlabel = Label(self.top, image=self.photo)
self.bannerlabel.pack()
def mainmenu(self):
self.menubar = Menu(self.top)
#file menu
self.filemenu = Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label='Renamer', command=self.renamerCallBack)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.exitCallback)
self.menubar.add_cascade(label="File", menu=self.filemenu)
#experiement menu
self.experimentmenu = Menu(self.menubar, tearoff=0)
self.experimentmenu.add_command(label='ID Encryption', command=idmaker.makeid)
self.menubar.add_cascade(label='Experiments', menu=self.experimentmenu)
#stimuli menu
self.stimulimenu = Menu(self.menubar, tearoff=0)
self.stimulimenu.add_command(label='Manipulate Pitch', command=self.manipulatevoicescallback)
self.stimulimenu.add_command(label='Measure Duration', command=self.measuredurationcallback)
self.stimulimenu.add_command(label='Measure Pitch', command=self.measurepitchcallback)
self.menubar.add_cascade(label='Voice Stimuli', menu=self.stimulimenu)
#images menu
self.imagesmenu = Menu(self.menubar, tearoff=0)
self.imagesmenu.add_command(label='Flip horizontal', command=utils.Imagez.flip_horizontal)
self.imagesmenu.add_command(label='Flip vertical', command=utils.Imagez.flip_vertical)
self.imagesmenu.add_command(label='Invert colour', command=utils.Imagez.invert_colour)
self.imagesmenu.add_command(label='Convert to .jpg', command=utils.Imagez.convert2jpg)
self.menubar.add_cascade(label='Images', menu=self.imagesmenu)
#help menu
self.helpmenu = Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label="Help", command=self.helpCallBack)
self.menubar.add_cascade(label="Help", menu=self.helpmenu)
# display the menu
self.top.config(menu=self.menubar)
def makebuttons(self):
self.renamerbutton = Button(self.top, text="Renamer", font=("Arial", 32), command=self.renamerCallBack)
self.renamerbutton.pack()
self.emailbutton = Button(self.top, text='Email', font=("Arial", 32), command=self.emailCallBack)
self.emailbutton.pack()
self.manipulatevoicebutton = Button(self.top,
text = 'Manipulate Voices',
font=("Arial", 32),
command=self.manipulatevoicescallback)
self.manipulatevoicebutton.pack()
self.helpbutton = Button(self.top, text='Help', font=("Arial", 32), command=self.helpCallBack)
self.helpbutton.pack()
self.exitbutton = Button(self.top, text='Exit', font=("Arial", 32), command=self.exitCallback)
self.exitbutton.pack()
def manipulatevoicessubmitcallback(self):
self.sex = self.var.get()
if self.sex == 0:
utils.Praat.manipulateFo(sex='female')
self.manipulatewindow.destroy()
elif self.sex == 1:
utils.Praat.manipulateFo(sex='male')
self.manipulatewindow.destroy()
def manipulatevoicescallback(self):
self.manipulatewindow = Toplevel()
self.var = IntVar()
self
|
.radiolabel = Label(self.manipulatewindow, text='Choose sex of voices', font=('Arial', 32), justify=LEFT)
self.radiolabel.pack(anchor=W)
self.R1 = R
|
adiobutton(self.manipulatewindow, text="female", font=('Arial', 32), variable=self.var, value=0)
self.R1.pack(anchor=W)
self.R2 = Radiobutton(self.manipulatewindow, text="male", font=('Arial', 32), variable=self.var, value=1)
self.R2.pack(anchor=W)
self.radiosubmitbutton = Button(self.manipulatewindow, text = 'Submit', font=('Arial', 32), command=self.manipulatevoicessubmitcallback)
self.radiosubmitbutton.pack(anchor=W)
self.instructlabel = Label(self.manipulatewindow,
text='When you press submit, two windows will pop up.\n'
'First select the directory where your unmanipulated .wav '
'files are.\nThen select the directory you want to save '
'the manipulated .wav files to.', font=('Arial', 18), justify=LEFT)
self.instructlabel.pack(anchor=W)
def measuredurationcallback(self):
utils.Praat.measureDuration()
def measurepitchcallback(self):
utils.Praat.measureF0()
def exitCallback(self):
self.top.destroy()
def getReplacement(self):
self.pattern = self.patterns.get()
self.replacement = self.replacements.get()
utils.Filez.renamer(self.pattern, self.replacement)
self.renamerwindow.destroy()
def renamerCallBack(self):
self.renamerwindow = Toplevel()
self.entrytextLabel = Label(self.renamerwindow, text="Input search Pattern")
self.entrytextLabel.pack()
self.patterns = Entry(self.renamerwindow)
self.patterns.pack()
self.textLabel2 = Label(self.renamerwindow, text="Input replacement text")
self.textLabel2.pack()
self.replacements = Entry(self.renamerwindow)
self.replacements.pack()
self.submit = Button(self.renamerwindow, text='Submit', font=("Arial", 32), command=self.getReplacement)
self.submit.pack()
def getEmailtext(self):
self.body = self.e1.get()
utils.Emailz.send_email(self.body)
self.emailwindow.destroy()
def emailCallBack(self):
self.emailwindow = Toplevel()
self.emailwindow.title('Emailer')
self.emailwindow.focus_set()
self.emailLabel = Label(self.emailwindow, text="Enter email text", font=("Arial", 32)).pack()
self.e1 = Entry(self.emailwindow)
self.e1.pack()
self.submit = Button(self.emailwindow, text='Submit', font=("Arial", 32), command=self.getEmailtext)
self.submit.pack()
def closehelp(self):
self.helpLabel.destroy()
self.helpclosebutton.destroy()
def helpCallBack(self):
#self.helpwindow = Toplevel()
self.helpLabel = Label(self.top, text = utils.help_message, font=("Arial", 12))
self.helpLabel.pack()
self.helpclosebutton = Button(self.top, text='Close', font=("Arial", 18), command=self.closehelp)
self.helpclosebutton.pack()
def search4soundsCallback(self):
pass
def search4soundsSubmitCallback(self):
pass
topwindow = MainWindow(top)
top.mainloop()
|
frictionlessdata/tabulator-py
|
tabulator/parsers/xlsx.py
|
Python
|
mit
| 15,006
| 0.001066
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import io
import six
import shutil
import atexit
import openpyxl
import datetime
import re
from itertools import chain
from tempfile import NamedTemporaryFile
from ..parser import Parser
from .. import exceptions
from .. import helpers
# Module API
class XLSXParser(Parser):
"""Parser to parse Excel modern `xlsx` data format.
"""
# Public
options = [
"sheet",
"workbook_cache",
"fill_merged_cells",
"preserve_formatting",
"adjust_floating_point_error",
]
def __init__(
self,
loader,
force_parse=False,
sheet=1,
workbook_cache=None,
fill_merged_cells=False,
preserve_formatting=False,
adjust_floating_point_error=False,
):
self.__loader = loader
self.__sheet_pointer = sheet
self.__workbook_cache = workbook_cache
self.__fill_merged_cells = fill_merged_cells
self.__preserve_formatting = preserve_formatting
self.__adjust_floating_point_error = adjust_floating_point_error
self.__extended_rows = None
self.__encoding = None
self.__fragment = None
self.__force_parse = force_parse
self.__bytes = None
@property
def closed(self):
return self.__bytes is None or self.__bytes.closed
def open(self, source, encoding=None):
self.close()
self.__encoding = encoding
# Remote
# Create copy for remote source
# For remote stream we need local copy (will be deleted on close by Python)
# https://docs.python.org/3.5/library/tempfile.html#tempfile.TemporaryFile
if getattr(self.__loader, "remote", False):
# Cached
if self.__workbook_cache is not None and source in self.__workbook_cache:
self.__bytes = io.open(self.__workbook_cache[source], "rb")
# Not cached
else:
prefix = "tabulator-"
delete = self.__workbook_cache is None
source_bytes = self.__loader.load(source, mode="b", encoding=encoding)
target_bytes = NamedTemporaryFile(prefix=prefix, delete=delete)
shutil.copyfileobj(source_bytes, target_bytes)
source_bytes.close()
target_bytes.seek(0)
self.__bytes = target_bytes
if self.__workbook_cache is not None:
self.__workbook_cache[source] = target_bytes.name
atexit.register(os.remove, target_bytes.name)
# Local
else:
self.__bytes = self.__loader.load(source, mode="b", encoding=encoding)
# Get book
# To fill merged cells we can't use read-only because
# `sheet.merged_cell_ranges` is not available in this mode
self.__book = openpyxl.load_workbook(
self.__bytes, read_only=not self.__fill_merged_cells, data_only=True
)
# Get sheet
try:
if isinstance(self.__sheet_pointer, six.string_types):
self.__sheet = self.__book[self.__sheet_pointer]
else:
self.__sheet = self.__book.worksheets[self.__sheet_pointer - 1]
except (KeyError, IndexError):
message = 'Excel document "%s" doesn\'t have a sheet "%s"'
raise exceptions.SourceError(message % (source, self.__sheet_pointer))
self.__fragment = self.__sheet.title
self.__process_merged_cells()
# Reset parser
self.reset()
def close(self):
if not self.closed:
self.__bytes.close()
def reset(self):
helpers.reset_stream(self.__bytes)
self.__extended_rows = self.__iter_extended_rows()
@property
def encoding(self):
return self.__encoding
@property
def fragment(self):
return self.__fragment
@property
def extended_rows(self):
return self.__extended_rows
# Private
def __iter_extended_rows(self):
for row_number, row in enumerate(self.__sheet.iter_rows(), start=1):
yield (
row_number,
None,
extract_row_values(
row, self.__preserve_formatting, self.__adjust_floating_point_error,
),
)
def __process_merged_cells(self):
if self.__fill_merged_cells:
for merged_cell_range in list(self.__sheet.merged_cells.ranges):
merged_cell_range = str(merged_cell_range)
self.__sheet.unmerge_cells(merged_cell_range)
merged_rows
|
= openpyxl.utils.rows_from_range(merged_cell_range)
coordinates = list(chain.from_iterable(merged_rows))
value = self.__sheet[coordinates[0]].value
for coordinate in coordinates:
|
cell = self.__sheet[coordinate]
cell.value = value
# Internal
EXCEL_CODES = {
"yyyy": "%Y",
"yy": "%y",
"dddd": "%A",
"ddd": "%a",
"dd": "%d",
"d": "%-d",
# Different from excel as there is no J-D in strftime
"mmmmmm": "%b",
"mmmm": "%B",
"mmm": "%b",
"hh": "%H",
"h": "%-H",
"ss": "%S",
"s": "%-S",
# Possibly different from excel as there is no am/pm in strftime
"am/pm": "%p",
# Different from excel as there is no A/P or a/p in strftime
"a/p": "%p",
}
EXCEL_MINUTE_CODES = {
"mm": "%M",
"m": "%-M",
}
EXCEL_MONTH_CODES = {
"mm": "%m",
"m": "%-m",
}
EXCEL_MISC_CHARS = [
"$",
"+",
"(",
":",
"^",
"'",
"{",
"<",
"=",
"-",
"/",
")",
"!",
"&",
"~",
"}",
">",
" ",
]
EXCEL_ESCAPE_CHAR = "\\"
EXCEL_SECTION_DIVIDER = ";"
def convert_excel_date_format_string(excel_date):
"""
Created using documentation here:
https://support.office.com/en-us/article/review-guidelines-for-customizing-a-number-format-c0a1d1fa-d3f4-4018-96b7-9c9354dd99f5
"""
# The python date string that is being built
python_date = ""
# The excel code currently being parsed
excel_code = ""
prev_code = ""
# If the previous character was the escape character
char_escaped = False
# If we are in a quotation block (surrounded by "")
quotation_block = False
# Variables used for checking if a code should be a minute or a month
checking_minute_or_month = False
minute_or_month_buffer = ""
for c in excel_date:
ec = excel_code.lower()
# The previous character was an escape, the next character should be added normally
if char_escaped:
if checking_minute_or_month:
minute_or_month_buffer += c
else:
python_date += c
char_escaped = False
continue
# Inside a quotation block
if quotation_block:
if c == '"':
# Quotation block should now end
quotation_block = False
elif checking_minute_or_month:
minute_or_month_buffer += c
else:
python_date += c
continue
# The start of a quotation block
if c == '"':
quotation_block = True
continue
if c == EXCEL_SECTION_DIVIDER:
# We ignore excel sections for datetimes
break
is_escape_char = c == EXCEL_ESCAPE_CHAR
# The am/pm and a/p code add some complications, need to make sure we are not that code
is_misc_char = c in EXCEL_MISC_CHARS and (
c != "/" or (ec != "am" and ec != "a")
)
new_excel_code = False
# Handle a new code without a different characeter in between
if (
ec
and not is_escape_char
and not is_misc_char
# If the code does not start with c, we are in a new code
and not ec.startswith(c.lower())
# ot
|
google/deluca-lung
|
deluca/lung/experimental/controllers/_pid_correction.py
|
Python
|
apache-2.0
| 874
| 0.004577
|
import torch
from deluca.lung.core import Controller, LungEnv
class PIDCorrection(Controller):
def __init__(self, base_controller: Controller, sim: LungEnv, pid_K=[0.0, 0.0], decay=0.1, **kwargs):
self.base_controller = base_controller
self.sim = sim
self.I = 0.0
self.K = pid_K
self.decay = decay
self.reset()
def reset(self):
self.base_controller.reset()
self.sim.reset()
self.I = 0.0
def compute_action(self, s
|
tate, t):
u_in_base, u_out = self.base_controller(state, t)
err = self.sim.pressure - state
self.I = self.I * (1 - self.decay) + err * self.decay
pid_correction = self.K[0] * err + self.K[1] * self.I
u_in = torch.clamp(u_in_base + pid_correction, min=0.0, max=100.0)
self.s
|
im(u_in, u_out, t)
return u_in, u_out
|
bmedx/modulestore
|
xmodule/modulestore/tests/test_split_copy_from_template.py
|
Python
|
apache-2.0
| 7,859
| 0.002927
|
"""
Tests for split's copy_from_template method.
Currently it is only used for content libraries.
However for these tests, we make sure it also works when copying from course to course.
"""
import ddt
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.utils import MixedSplitTestCase
'''
TODO: Update
@ddt.ddt
class TestSplitCopyTemplate(MixedSplitTestCase):
"""
Test for split's copy_from_template method.
"""
@ddt.data(
LibraryFactory,
|
CourseFactory,
)
def test_copy_from_template(self, source_type):
"""
Test that the behavior of copy_from_template() matches its docstring
"""
source_container = source_type.create(module
|
store=self.store) # Either a library or a course
course = CourseFactory.create(modulestore=self.store)
# Add a vertical with a capa child to the source library/course:
vertical_block = self.make_block("vertical", source_container)
html_library_display_name = "HTML Display Name"
html_block = self.make_block("html", vertical_block, display_name=html_library_display_name)
if source_type == LibraryFactory:
source_container = self.store.get_library(
source_container.location.library_key, remove_version=False, remove_branch=False
)
else:
source_container = self.store.get_course(
source_container.location.course_key, remove_version=False, remove_branch=False
)
# Inherit the vertical and the problem from the library into the course:
source_keys = [source_container.children[0]]
new_blocks = self.store.copy_from_template(source_keys, dest_key=course.location, user_id=self.user_id)
self.assertEqual(len(new_blocks), 1)
course = self.store.get_course(course.location.course_key) # Reload from modulestore
self.assertEqual(len(course.children), 1)
vertical_block_course = self.store.get_item(course.children[0])
self.assertEqual(new_blocks[0], vertical_block_course.location)
html_block_course = self.store.get_item(vertical_block_course.children[0])
self.assertEqual(html_block_course.display_name, html_library_display_name)
# Override the display_name:
new_display_name = "The Trouble with Tribbles"
html_block_course.display_name = new_display_name
self.store.update_item(html_block_course, self.user_id)
# Test that "Any previously existing children of `dest_usage`
# that haven't been replaced/updated by this copy_from_template operation will be deleted."
extra_block = self.make_block("html", vertical_block_course)
# Repeat the copy_from_template():
new_blocks2 = self.store.copy_from_template(source_keys, dest_key=course.location, user_id=self.user_id)
self.assertEqual(new_blocks, new_blocks2)
# Reload problem_block_course:
html_block_course = self.store.get_item(html_block_course.location)
self.assertEqual(html_block_course.display_name, new_display_name)
# Ensure that extra_block was deleted:
vertical_block_course = self.store.get_item(new_blocks2[0])
self.assertEqual(len(vertical_block_course.children), 1)
with self.assertRaises(ItemNotFoundError):
self.store.get_item(extra_block.location)
def test_copy_from_template_publish(self):
"""
Test that copy_from_template's "defaults" data is not lost
when blocks are published.
"""
# Create a library with an html:
source_library = LibraryFactory.create(modulestore=self.store)
display_name_expected = "CUSTOM Library Display Name"
self.make_block("html", source_library, display_name=display_name_expected)
# Reload source_library since we need its branch and version to use copy_from_template:
source_library = self.store.get_library(
source_library.location.library_key, remove_version=False, remove_branch=False
)
# And a course with a vertical:
course = CourseFactory.create(modulestore=self.store)
self.make_block("vertical", course)
html_key_in_course = self.store.copy_from_template(
source_library.children, dest_key=course.location, user_id=self.user_id
)[0]
# We do the following twice because different methods get used inside
# split modulestore on first vs. subsequent publish
for __ in range(2):
# Publish:
self.store.publish(html_key_in_course, self.user_id)
# Test that the defaults values are there.
problem_published = self.store.get_item(
html_key_in_course.for_branch(ModuleStoreEnum.BranchName.published)
)
self.assertEqual(problem_published.display_name, display_name_expected)
def test_copy_from_template_auto_publish(self):
"""
Make sure that copy_from_template works with things like 'chapter' that
are always auto-published.
"""
source_course = CourseFactory.create(modulestore=self.store)
course = CourseFactory.create(modulestore=self.store)
# Populate the course:
about = self.make_block("about", source_course)
chapter = self.make_block("chapter", source_course)
sequential = self.make_block("sequential", chapter)
# And three blocks that are NOT auto-published:
vertical = self.make_block("vertical", sequential)
html = self.make_block("html", source_course)
# Reload source_course since we need its branch and version to use copy_from_template:
source_course = self.store.get_course(
source_course.location.course_key, remove_version=False, remove_branch=False
)
# Inherit the vertical and the html from the library into the course:
source_keys = [block.location for block in [about, chapter, html]]
block_keys = self.store.copy_from_template(source_keys, dest_key=course.location, user_id=self.user_id)
self.assertEqual(len(block_keys), len(source_keys))
# Build dict of the new blocks in 'course', keyed by category (which is a unique key in our case)
new_blocks = {}
block_keys = set(block_keys)
while block_keys:
key = block_keys.pop()
block = self.store.get_item(key)
new_blocks[block.category] = block
block_keys.update(set(getattr(block, "children", [])))
# Check that auto-publish blocks with no children are indeed published:
def published_version_exists(block):
""" Does a published version of block exist? """
try:
self.store.get_item(block.location.for_branch(ModuleStoreEnum.BranchName.published))
return True
except ItemNotFoundError:
return False
# Check that the auto-publish blocks have been published:
self.assertFalse(self.store.has_changes(new_blocks["about"]))
# We can't use has_changes because it includes descendants
self.assertTrue(published_version_exists(new_blocks["chapter"]))
self.assertTrue(published_version_exists(new_blocks["sequential"])) # Ditto
# Check that non-auto-publish blocks and blocks with non-auto-publish descendants show changes:
self.assertTrue(self.store.has_changes(new_blocks["html"]))
# Will have changes since a child block has changes.
self.assertTrue(self.store.has_changes(new_blocks["chapter"]))
# Verify that our published_version_exists works
self.assertFalse(published_version_exists(new_blocks["vertical"]))
'''
|
ema/conpaas
|
conpaas-services/src/conpaas/services/htc/manager/get_run_time.py
|
Python
|
bsd-3-clause
| 6,391
| 0.015334
|
#import os
import sys
import time
import xmltodict
import pprint
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
testing = False
# def poll_condor(jonbr, bagnr):
def poll_condor(filename):
# filename = "hist-%d-%d.xml" % ( jobnr, bagnr )
# command = "condor_history -constraint 'HtcJob == %d && HtcBag == %d' -xml > %s" % ( jobnr, bagnr, filename )
# os.system( command )
tries = 0
poll_dict = {}
while tries < 4:
tries += 1
_trystr = "Try %d (%s) :" % (tries, filename)
xml = open(filename).read()
xmldict = xmltodict.parse(xml)
print >> sys.stderr, "type(xmldict) = ", type(xmldict)
if not ( type(xmldict) == dict and xmldict.has_key('classads') ):
print >> sys.stderr, _trystr, "No classads, wait a little until the first results come in"
time.sleep(2)
continue
print >> sys.stderr, "type(xmldict['classads']) = ", type(xmldict['classads'])
if not ( type(xmldict['classads']) == dict and xmldict['classads'].has_key('c') ) :
print >> sys.stderr, _trystr, "No classads <c> entries, wait a little until the first results come in"
time.sleep(2)
continue
print >> sys.stderr, "type(xmldict['classads']['c']) = ", type(xmldict['classads']['c'])
if not ( type(xmldict['classads']['c']) == list and xmldict['classads']['c'][0].has_key('a') ) :
print >> sys.stderr, _trystr, "No classads attributes, wait a little until the first results come in"
time.sleep(2)
continue
poll_dict = get_poll_dict(xmldict)
break
# if poll_dict['CompletedTasks'] == poll_dict['TotalTask']:
#pp.pprint(xmldict)
return poll_dict
def get_poll_dict(xmldict):
if testing:
print >> sys.stderr, "selecting info from file %s, job %s, bag %s" % (filename, jobnr, bagnr)
res_dict = {}
# print >> sys.stderr, xml
# print "----"
# jobid = 0
for c in xmldict['classads']['c']:
tempdict = {}
# pp.pprint(c)
attrs=c['a']
# pp.pprint(attrs)
for d in attrs:
v = None
k = d['@n'].encode('ascii', 'ignore') # get rid of unicode from xmltodict
# handle float
if d.has_key('r'):
v=float( d['r'].encode('ascii', 'ignore') ) # get rid of unicode from xmltodict
# handle int
if d.has_key('i'):
v=int( d['i'].encode('ascii', 'ignore') ) # get rid of unicode from xmltodict
# handle string
if d.has_key('s'):
# pp.pprint(d)
if d['s'] == None:
v = 'None'
else:
v= d['s'].encode('ascii', 'ignore') # get rid of unicode from xmltodict
# handle boolean
if d.has_key('b'):
# pp.pprint(d)
v= 'True' if d['b']['@v'] == 't' else 'False'
# handle expression
if d.has_key('e'):
v= d['e'].encode('ascii', 'ignore') # get rid of unicode from xmltodict
if v != None:
tempdict[k] = v
else:
print "unknown datatype in "
pp.pprint(d)
attrdict = {}
for k in [ 'HtcJob', 'HtcBag', 'HtcTask',
'RemoteWallClockTime', 'Cmd',
'MATCH_EXP_MachineCloudMachineType' ]:
if tempdict.has_key(k):
attrdict[k] = tempdict[k]
#print kl
# cur_jobnr = "%(HtcJob)s" % tempdict
# if not ( jobnr == None or jobnr == cur_jobnr):
# continue
# cur_bagnr = "%(HtcBag)s" % tempdict
# if not ( bagnr == None or bagnr == cur_bagnr):
# continue
# tasknr = "%(HtcTask)s" % taskdict
taskid = "%(HtcJob)s.%(HtcBag)s.%(HtcTask)s" % tempdict
#jobid += 1
# print "----"
if res_dict.has_key(taskid):
res_dict[taskid].append ( attrdict )
else:
res_dict[taskid] = [ attrdict ]
if testing:
print >> sys.stderr, "====== res_dict ======"
pp.pprint(res_dict)
print >> sys.stderr, "------ res_dict ------"
return res_dict
"""
{ 'tasks':
{
taskid:
[
{
attr1: val1,
attrn: valn,
|
},
{
attr1: val1,
attrn: valn,
}
]
}
}
"""
def do_test(filename):
poll_dict = poll_condor(filename)
completed_tasks = 0
for _ in poll_dict.keys():
completed_tasks += len(poll_dict[_])
|
completed_task_sets = poll_dict.keys().__len__()
print >> sys.stderr, "Found %d completed tasks in %d sets" % (completed_tasks, completed_task_sets)
if False:
pp.pprint(poll_dict)
if __name__ == "__main__":
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
testing = True
usage = "usage : %s ClassAd_XML_file [ jobnr [ bagnr ] ]" % sys.argv[0]
argc = len(sys.argv)
jobnr = None
bagnr = None
print "%d args" % argc
if argc <= 1:
print usage
filename = "test3.xml"
if argc >= 2:
filename = sys.argv[1]
print "file = %s" % filename
if argc >= 3:
jobnr = sys.argv[2]
print "job = %s" % jobnr
if argc >= 4:
bagnr = sys.argv[3]
print "bag = %s" % bagnr
for _ in [ "test1.xml", "test2.xml", "test3.xml", "test4.xml" ] :
do_test( _ )
|
edouard-lopez/parlr
|
config.py
|
Python
|
apache-2.0
| 394
| 0.002538
|
import sys
im
|
port logging
logger = logging.getLogger(__name__)
def configure_logging():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Format
|
ter('%(asctime)s %(name)12s %(levelname)7s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
|
cackharot/geosnap-server
|
src/geosnap/service/UserService.py
|
Python
|
apache-2.0
| 2,611
| 0.000766
|
from datetime import datetime
import random
import string
from bson import ObjectId
class DuplicateUserException(Exception):
def __init__(self, message='User name/email already exits'):
Exception.__init__(self, message)
pass
class UserServiceException(Exception):
def __init__(self, message=None):
Exception.__init__(self, message)
@classmethod
def cannot_delete_super_admin(cls):
return UserServiceException("Cannot delete super admin user!")
class UserService(object):
def __init__(self, db):
self.db = db
self.users = self.db.user_collection
def generate_api_key(self):
s = string.ascii_letters +
|
string.digits
return ''.join(random.sample(s, 20))
def create(self, item):
if self.user_exists(item['email']):
raise DuplicateUserException()
item.pop('_id', None)
item['created_at'] = datetime.now()
item['status'] = True
if 'api_key' not in item:
item['api_ke
|
y'] = self.generate_api_key()
if 'roles' not in item or item['roles'] is None or len(item['roles']) == 0:
item['roles'] = ['member']
return self.users.insert(item)
def get_by_email(self, email):
return self.users.find_one({"email": email})
def validate_user(self, username, password):
query = {'email': username, 'password': password}
return self.users.find(query).count() > 0
def search(self, email=None):
query = {}
if email is not None:
query['email'] = email
return [x for x in self.users.find(query)]
def delete(self, id):
item = self.get_by_id(id)
if item and 'roles' in item and item['roles'] is not None and 'super_admin' in item['roles']:
raise UserServiceException.cannot_delete_super_admin()
return self.users.remove({"_id": ObjectId(id)})
def get_by_id(self, id):
return self.users.find_one({"_id": ObjectId(id)})
def get_by_api_key(self, api_key):
return self.users.find_one({"api_key": api_key})
def update(self, item):
if item['_id'] is None:
return item
if self.user_exists(item['email'], str(item['_id'])):
raise DuplicateUserException()
item['updated_at'] = datetime.now()
self.users.save(item)
return item
def user_exists(self, email, id=None):
query = {}
if id is not None:
query = {"_id": {"$ne": ObjectId(id)}}
query['email'] = email
return self.users.find(query).count() > 0
|
kikocorreoso/brython
|
www/tests/compression/huffman.py
|
Python
|
bsd-3-clause
| 8,832
| 0.001472
|
class ResizeError(Exception):
pass
def codelengths_from_frequencies(freqs):
freqs = sorted(freqs.items(),
key=lambda item: (item[1], -item[0]), reverse=True)
nodes = [Node(char=key, weight=value) for (key, value) in freqs]
while len(nodes) > 1:
right, left = nodes.pop(), nodes.pop()
node = Node(weight=right.weight + left.weight)
node.add([left, right])
if not nodes:
nodes.append(node)
else:
pos = 0
while pos < len(nodes) and nodes[pos].weight > node.weight:
pos += 1
nodes.insert(pos, node)
top = nodes[0]
tree = Tree(top)
tree.reduce(15)
codes = tree.codes()
code_items = list(codes.items())
code_items.sort(key=lambda item:(len(item[1]), item[0]))
return [(car, len(value)) for car, value in code_items]
def normalized(codelengths):
car, codelength = codelengths[0]
value = 0
codes = {car: "0" * codelength}
for (newcar, nbits) in codelengths[1:]:
value += 1
bvalue = str(bin(value))[2:]
bvalue = "0" * (codelength - len(bvalue)) + bvalue
if nbits > codelength:
codelength = nbits
bvalue += "0" * (codelength - len(bvalue))
value = int(bvalue, 2)
assert len(bvalue) == nbits
codes[newcar] = bvalue
return codes
class Tree:
def __init__(self, root):
self.root = root
self.nb_levels = 0
def length(self):
self.root.level = 0
node = self.root
nb_levels = 0
def set_level(node):
nonlocal nb_levels
for child in node.children:
child.level = node.level + 1
nb_levels = max(nb_levels, child.level)
if not child.is_leaf:
set_level(child)
set_level(self.root)
return nb_levels
def reduce_tree(self):
"""Change the tree to reduce the number of levels.
Uses the algorithm described in
http://compressions.sourceforge.net/Huffman.html#3
"""
currentlen = self.length()
deepest = self.nodes_at(currentlen)
deepest_leaves = [node for node in deepest if node.is_leaf]
rightmost_leaf = deepest_leaves[-1]
sibling = rightmost_leaf.parent.children[0]
# replace rightmost_leaf's parent by rightmost_leaf
parent = rightmost_leaf.parent
grand_parent = parent.parent
rank = grand_parent.children.index(parent)
children = grand_parent.children
children[rank] = rightmost_leaf
grand_parent.add(children)
# find first upper level with leaves
up_level = rightmost_leaf.level - 2
while up_level > 0:
nodes = self.nodes_at(up_level)
leaf_nodes = [node for node in nodes if node.is_leaf]
if leaf_nodes:
leftmost_leaf = leaf_nodes[0]
# replace by node with leaves = [sibling, leftmost_leaf]
parent = leftmost_leaf.parent
rank = parent.children.index(leftmost_leaf)
new_node = Node()
new_node.level = leftmost_leaf.level
children = [sibling, leftmost_leaf]
new_node.add(children)
parent.children[rank] = new_node
new_node.parent = parent
break
else:
up_level -= 1
if up_level == 0:
raise ResizeError
def nodes_at(self, level, top=None):
"""Return list of all the nodes below top at specified level."""
res = []
if top is None:
top = self.root
if top.level == level:
res = [top]
elif not top.is_leaf:
for child in top.children:
res += self.nodes_at(level, child)
return res
def reduce(self, maxlevels):
"""Reduce number of levels to maxlevels, if possible."""
while self.length() > maxlevels:
self.reduce_tree()
def codes(self, node=None, code=''):
"""Returns a dictionary mapping leaf characters to the Huffman code
of the node, as a string of 0's and 1's."""
if node is None:
self.dic = {}
node = self.root
if node.is_leaf:
self.dic[node.char] = code
else:
for i, child in enumerate(node.children):
self.codes(child, code + str(i))
return self.dic
class Node:
def __init__(self, char=None, weight=0, level=0):
self.char = char
self.is_leaf = char is not None
self.level = level
self.weight = weight
self.height = 0
def add(self, children):
self.children = children
for child in self.children:
child.parent = self
child.level = self.level + 1
self.height = max(self.height, children[0].height + 1,
children[1].height + 1)
node = self
while hasattr(node, "parent"):
node.parent.height = max(node.parent.height, node.height + 1)
node = node.parent
def __repr__(self):
if self.is_leaf:
return f'{chr(self.char)!r}'
else:
return f'{self.children}'
class Compresser:
def __init__(self, text):
if not isinstance(text, (bytes, bytearray, memoryview)):
raise TypeError("a bytes-like object is required, not '" +
type(text).__name__ + "'")
self.text = text
freqs = {}
for car in self.text:
freqs[car] = freqs.get(car, 0) + 1
self.codelengths = codelengths_from_frequencies(freqs)
self.codes = normalized(self.codelengths)
self.max_codelength = max(len(v) for v in self.codes.values())
def compressed_bytes(self):
compressed = self.compressed_str() + self.codes[256]
out = bytearray()
pos = 0
while pos < len(compressed):
bits = compressed[pos:pos + 8]
byte = int(bits, 2)
if len(bits) < 8:
byte <<= (8 - len(bits))
out.append(byte)
pos += 8
return out
def compressed_str(self):
return ''.join(self.codes[car] for car in self.text)
class Decompresser:
def __init__(self, compressed, codelengths):
self.compressed = compressed
codes = normalized(codelengths)
self.codes = {value : key for key, value in codes.items()}
self.root = Node()
self.make_tree(self.root)
def make_tree(self, node):
if node is self.root:
node.code = ''
children = []
for bit in '01':
next_code = node.code + bit
if next_code in self.codes:
child = Node(char=self.codes[next_code])
else:
child = Node()
child.code = next_code
children.append(child)
node.add(children)
for child in children:
if not chi
|
ld.is_leaf:
sel
|
f.make_tree(child)
def decompress(self):
source = self.compressed
if isinstance(source, (bytes, bytearray)):
return self.decompress_bytes()
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
code = int(source[pos])
child = node.children[code]
if child.is_leaf:
res.append(child)
node = self.root
else:
node = child
pos += 1
return bytes(res)
def decompress_bytes(self):
source = self.compressed
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
byte = source[pos]
mask = 128
while mask > 0:
code = bool(byte & mask)
child = node.children[code]
if child.is_leaf:
if child.char == 256:
break # end of block
res.append(child.char)
|
bigoldboy/repository.bigoldboy
|
plugin.video.VADER/categorySelectDialog.py
|
Python
|
gpl-3.0
| 3,306
| 0.005445
|
# -*- coding: utf-8 -*-
# Licence: GPL v.3 http://www.gnu.org/licenses/gpl.html
# This is an XBMC addon for demonstrating the capabilities
# and usage of PyXBMCt framework.
import os
import xbmc
import xbmcaddon
import pyxbmct
from lib import utils
import plugintools
from itertools import tee, islice, chain, izip
_addon = xbmcaddon.Addon()
_addon_path = _addon.getAddonInfo('path')
# Enable or disable Estuary-based design explicitly
# pyxbmct.skin.estuary = True
def previous_and_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return izip(prevs, items, nexts)
class categorySelectDialog(pyxbmct.AddonDialogWindow):
def __init__(self, title='', categories=None):
super(categorySelectDialog, self).__init__(title)
self.categories = categories
self.listOfRadioButtons = []
self.radioMap = {}
maxRows = len(categories)
self.setGeometry(400, 600, maxRows+1, 1)
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
def set_active_controls(self):
row = 0
for category in self.categories:
for catId in category:
catName = category[catId]
radiobutton = pyxbmct.RadioButton(catName)
catSetting = plugintools.get_setting(catName)
self.placeControl(radiobutton, row, 0)
self.connect(radiobutton, self.radio_update)
if catSetting == True:
radiobutton.setSelected(True)
else:
radiobutton.setSelected(False)
self.listOfRadioButtons.append(radiobutton)
self.radioMap[catName] = radiobutton
|
row = row + 1
self.close_button = pyxbmct.Button('Close')
self.placeControl(self.close_button, row, 0)
self.connect(self.close_button, self.close)
|
from itertools import tee, islice, chain, izip
def set_navigation(self):
for previous, item, nextItem in previous_and_next(self.listOfRadioButtons):
if previous != None:
item.controlUp(previous)
if nextItem != None:
item.controlDown(nextItem)
if nextItem == None:
item.controlDown(self.close_button)
self.close_button.controlUp(item)
# length = len(self.listOfRadioButtons)
# obj = self.listOfRadioButtons[length-1]
# item.controlDown(self.close_button)
self.setFocus(self.listOfRadioButtons[0])
def radio_update(self):
# Update radiobutton caption on toggle
# utils.log('entered radio update ' + str(listPos))
# radioButton = self.listOfRadioButtons[listPos]
radioButton = self.getFocus()
for catName, radioButtonItem in self.radioMap.iteritems():
if radioButton == radioButtonItem:
label = catName
if radioButton.isSelected():
plugintools.set_setting(label, 'True')
else:
plugintools.set_setting(label, 'False')
|
OddBloke/jenkins-job-linter
|
tests/__init__.py
|
Python
|
apache-2.0
| 612
| 0
|
# Copyright (C) 2017 Daniel Watkins <daniel@daniel-watkins.co.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in co
|
mpliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
flowdas/meta
|
tests/ex/bytes.py
|
Python
|
mpl-2.0
| 50
| 0
|
class Image(me
|
ta.Entity):
data = meta.Bytes
|
()
|
txt/evil
|
diapers1.py
|
Python
|
unlicense
| 1,288
| 0.041149
|
from __future__ import print_function,division
# duner. using numbers and sample.
"""
q +-----+ r +-----+
---->| C |---->| D |--> s
^ +-----+ +-+---+
| |
+-----------------+
C = stock of clean diapers
D = stock of dirty diapers
q = inflow of clean diapers
r = flow of clean diapers to dirty diapers
s = out-flow of dirty diapers
"""
class o:
def has(i) : return i.__dict__
def __init__(i,**d) : i.has().update(d)
def copy(i) : return o(**i.
|
has().copy())
def __getitem__(i,k): return i.has()[k]
def __setitem__(i,k,v): i.has()[k] = v
def __repr__(i) : return 'o'+str(i.has())
def sim(
|
state0,life=100,spy=False,dt=1):
t= 0
while t < life:
t += dt
state1 = state0.copy()
yield dt, t,state0,state1
state0 = state1
for key in state1.has().keys():
if state1[key] < 0:
state1[key] = 0
if spy:
print(t,state1)
def diapers():
def saturday(x): return int(x) % 7 == 6
world = o(C=20, D=0,q=0, r=8, s=0)
for dt,t,u,v in sim(world,life=60,spy=True,dt=0.5):
v.C += dt*(u.q - u.r)
v.D += dt*(u.r - u.s)
v.q = 70 if saturday(t) else 0
v.s = u.D if saturday(t) else 0
if t == 27: # special case (the day i forget)
v.s = 0
diapers()
|
blckshrk/Weboob
|
weboob/capabilities/recipe.py
|
Python
|
agpl-3.0
| 6,695
| 0.002091
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob i
|
s distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, StringField, IntField, Field, empty
import lxml.etree as ET
import
|
base64
import re
import urllib
__all__ = ['Recipe', 'ICapRecipe']
class Comment():
def __init__(self, author=None, rate=None, text=None):
self.author = author
self.rate = rate
self.text = text
def __str__(self):
result = u''
if self.author:
result += 'author: %s, ' % self.author
if self.rate:
result += 'note: %s, ' % self.rate
if self.text:
result += 'comment: %s' % self.text
return result
class Recipe(CapBaseObject):
"""
Recipe object.
"""
title = StringField('Title of the recipe')
author = StringField('Author name of the recipe')
thumbnail_url = StringField('Direct url to recipe thumbnail')
picture_url = StringField('Direct url to recipe picture')
short_description = StringField('Short description of a recipe')
nb_person = Field('The recipe was made for this amount of persons', list)
preparation_time = IntField('Preparation time of the recipe in minutes')
cooking_time = IntField('Cooking time of the recipe in minutes')
ingredients = Field('Ingredient list necessary for the recipe', list)
instructions = StringField('Instruction step list of the recipe')
comments = Field('User comments about the recipe', list)
def __init__(self, id, title):
CapBaseObject.__init__(self, id)
self.title = title
def toKrecipesXml(self, author=None):
"""
Export recipe to KRecipes XML string
"""
sauthor = u''
if not empty(self.author):
sauthor += '%s@' % self.author
if author is None:
sauthor += 'Cookboob'
else:
sauthor += author
header = u'<?xml version="1.0" encoding="UTF-8" ?>\n'
initial_xml = '''\
<krecipes version='2.0-beta2' lang='fr' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:noNamespaceSchemaLocation='krecipes.xsd'>
<krecipes-recipe id='1'>
</krecipes-recipe>
</krecipes>'''
doc = ET.fromstring(initial_xml)
recipe = doc.find('krecipes-recipe')
desc = ET.SubElement(recipe, 'krecipes-description')
title = ET.SubElement(desc, 'title')
title.text = self.title
authors = ET.SubElement(desc, 'author')
authors.text = sauthor
eyield = ET.SubElement(desc, 'yield')
if not empty(self.nb_person):
amount = ET.SubElement(eyield, 'amount')
if len(self.nb_person) == 1:
amount.text = '%s' % self.nb_person[0]
else:
mini = ET.SubElement(amount, 'min')
mini.text = u'%s' % self.nb_person[0]
maxi = ET.SubElement(amount, 'max')
maxi.text = u'%s' % self.nb_person[1]
etype = ET.SubElement(eyield, 'type')
etype.text = 'persons'
if not empty(self.preparation_time):
preptime = ET.SubElement(desc, 'preparation-time')
preptime.text = '%02d:%02d' % (self.preparation_time / 60, self.preparation_time % 60)
if not empty(self.picture_url):
data = urllib.urlopen(self.picture_url).read()
datab64 = base64.encodestring(data)[:-1]
pictures = ET.SubElement(desc, 'pictures')
pic = ET.SubElement(pictures, 'pic', {'format': 'JPEG', 'id': '1'})
pic.text = ET.CDATA(datab64)
if not empty(self.ingredients):
ings = ET.SubElement(recipe, 'krecipes-ingredients')
pat = re.compile('^[0-9]*')
for i in self.ingredients:
sname = u'%s' % i
samount = ''
sunit = ''
first_nums = pat.match(i).group()
if first_nums != '':
samount = first_nums
sname = i.lstrip('0123456789 ')
ing = ET.SubElement(ings, 'ingredient')
am = ET.SubElement(ing, 'amount')
am.text = samount
unit = ET.SubElement(ing, 'unit')
unit.text = sunit
name = ET.SubElement(ing, 'name')
name.text = sname
if not empty(self.instructions):
instructions = ET.SubElement(recipe, 'krecipes-instructions')
instructions.text = self.instructions
if not empty(self.comments):
ratings = ET.SubElement(recipe, 'krecipes-ratings')
for c in self.comments:
rating = ET.SubElement(ratings, 'rating')
if c.author:
rater = ET.SubElement(rating, 'rater')
rater.text = c.author
if c.text:
com = ET.SubElement(rating, 'comment')
com.text = c.text
crits = ET.SubElement(rating, 'criterion')
if c.rate:
crit = ET.SubElement(crits, 'criteria')
critname = ET.SubElement(crit, 'name')
critname.text = 'Overall'
critstars = ET.SubElement(crit, 'stars')
critstars.text = c.rate.split('/')[0]
return header + ET.tostring(doc, encoding='UTF-8', pretty_print=True).decode('utf-8')
class ICapRecipe(IBaseCap):
"""
Recipe providers.
"""
def iter_recipes(self, pattern):
"""
Search recipes and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`Recipe`]
"""
raise NotImplementedError()
def get_recipe(self, _id):
"""
Get a recipe object from an ID.
:param _id: ID of recipe
:type _id: str
:rtype: :class:`Recipe`
"""
raise NotImplementedError()
|
flavour/ssf
|
models/00_tables.py
|
Python
|
mit
| 23,677
| 0.007011
|
# -*- coding: utf-8 -*-
"""
Global tables and re-usable fields
"""
# =============================================================================
# Import models
#
from s3.s3model import S3Model
import eden as models
current.models = models
current.s3db = s3db = S3Model()
# Explicit import statements to have them reload automatically in debug mode
import eden.asset
import eden.auth
import eden.cms
import eden.delphi
import eden.doc
import eden.dvi
import eden.event
import eden.fire
import eden.gis
import eden.hms
i
|
mport eden.hrm
import eden.inv
import eden.irs
import eden.msg
import eden.ocr
import eden.org
|
import eden.patient
import eden.pr
import eden.sit
import eden.proc
import eden.project
import eden.req
import eden.scenario
import eden.supply
import eden.support
import eden.survey
import eden.sync
import eden.vehicle
# =============================================================================
# Import S3 meta fields into global namespace
#
from s3.s3fields import *
# =============================================================================
# Record authorship meta-fields
# Author of a record
s3_meta_created_by = S3ReusableField("created_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=s3_user_represent,
ondelete="RESTRICT")
# Last author of a record
s3_meta_modified_by = S3ReusableField("modified_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
update=session.auth.user.id
if auth.is_logged_in()
else None,
represent=s3_user_represent,
ondelete="RESTRICT")
def s3_authorstamp():
return (s3_meta_created_by(),
s3_meta_modified_by())
# =============================================================================
# Record ownership meta-fields
# Individual user who owns the record
s3_meta_owned_by_user = S3ReusableField("owned_by_user", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=lambda id: \
id and s3_user_represent(id) or UNKNOWN_OPT,
ondelete="RESTRICT")
# Role of users who collectively own the record
s3_meta_owned_by_role = S3ReusableField("owned_by_role", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_role_represent)
# Role of the Organisation the record belongs to
s3_meta_owned_by_organisation = S3ReusableField("owned_by_organisation", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_role_represent)
# Role of the Facility the record belongs to
s3_meta_owned_by_facility = S3ReusableField("owned_by_facility", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_role_represent)
def s3_ownerstamp():
return (s3_meta_owned_by_user(),
s3_meta_owned_by_role(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_facility())
# Make available for S3Models
s3.ownerstamp = s3_ownerstamp
# =============================================================================
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on(),
)
# Make available for S3Models
s3.timestamp = s3_timestamp
# =============================================================================
# Common meta-fields
# @todo: can this be moved into s3fields.py?
#
def s3_meta_fields():
fields = (s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_owned_by_user(),
s3_meta_owned_by_role(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_facility())
return fields
# Make available for S3Models
s3.meta_fields = s3_meta_fields
# =============================================================================
response.s3.all_meta_field_names = [field.name for field in
[s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_owned_by_user(),
s3_meta_owned_by_role(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_facility()
]]
# =============================================================================
# Reusable field for scheduler task links
#
scheduler_task_id = S3ReusableField("scheduler_task_id",
"reference %s" % s3base.S3Task.TASK_TABLENAME,
ondelete="CASCADE")
s3.scheduler_task_id = scheduler_task_id
# =============================================================================
# Reusable roles fields for map layer permissions management (GIS)
role_required = S3ReusableField("role_required", db.auth_group,
sortby="role",
requires = IS_NULL_OR(IS_ONE_OF(db,
"auth_group.id",
"%(role)s",
zero=T("Public"))),
widget = S3AutocompleteWidget(
"auth",
"group",
fieldname="role"),
represent = s3_role_represent,
label = T("Role Required"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role Required"),
T("If this record should be restricted then select which role is required to access the record here."))),
ondelete = "RESTRICT")
roles_permitted = S3ReusableField("roles_permitted", 'list:reference auth_group',
sortby="role",
|
igor-sfdc/qt-wk
|
doc/src/diagrams/contentspropagation/customwidget.py
|
Python
|
lgpl-2.1
| 6,222
| 0.007232
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the documentation of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## No Commercial Usage
## This file contains pre-release code and may not be distributed.
## You may use this file in accordance with the terms and conditions
## contained in the Technology Preview License Agreement accompanying
## this package.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
##
##
##
##
##
##
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class CustomWidget(QWidget):
def __init__(self, parent, fak
|
e = False):
QWidget.__init__(self, parent)
gradient = QLinearGradient(QPointF(0, 0), QPointF(100.0, 100.0))
baseColor = QColor(0xa6, 0xce, 0x39, 0x7f)
grad
|
ient.setColorAt(0.0, baseColor.light(150))
gradient.setColorAt(0.75, baseColor.light(75))
self.brush = QBrush(gradient)
self.fake = fake
self.fakeBrush = QBrush(Qt.red, Qt.DiagCrossPattern)
qtPath = QPainterPath()
qtPath.setFillRule(Qt.OddEvenFill)
qtPath.moveTo(-45.0, -20.0)
qtPath.lineTo(0.0, -45.0)
qtPath.lineTo(45.0, -20.0)
qtPath.lineTo(45.0, 45.0)
qtPath.lineTo(-45.0, 45.0)
qtPath.lineTo(-45.0, -20.0)
qtPath.closeSubpath()
qtPath.moveTo(15.0, 5.0)
qtPath.lineTo(35.0, 5.0)
qtPath.lineTo(35.0, 40.0)
qtPath.lineTo(15.0, 40.0)
qtPath.lineTo(15.0, 5.0)
qtPath.moveTo(-35.0, -15.0)
qtPath.closeSubpath()
qtPath.lineTo(-10.0, -15.0)
qtPath.lineTo(-10.0, 10.0)
qtPath.lineTo(-35.0, 10.0)
qtPath.lineTo(-35.0, -15.0)
qtPath.closeSubpath()
self.path = qtPath
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.fake:
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.fillRect(event.rect(), self.fakeBrush)
painter.setBrush(self.brush)
painter.translate(60, 60)
painter.drawPath(self.path)
painter.end()
def sizeHint(self):
return QSize(120, 120)
def minimumSizeHint(self):
return QSize(120, 120)
if __name__ == "__main__":
try:
qt = sys.argv[1]
except IndexError:
qt = "4.1"
if qt != "4.0" and qt != "4.1":
sys.stderr.write("Usage: %s [4.0|4.1]\n" % sys.argv[0])
sys.exit(1)
app = QApplication(sys.argv)
exec_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
label = QLabel()
label.setPixmap(QPixmap(os.path.join(exec_dir, "background.png")))
layout = QGridLayout()
label.setLayout(layout)
if qt == "4.0":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Opaque (Default)", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Contents Propagated (Default)", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
contentsWidget = CustomWidget(label)
contentsWidget.setAttribute(Qt.WA_ContentsPropagated, True)
layout.addWidget(contentsWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With WA_ContentsPropagated set", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
autoFillWidget = CustomWidget(label)
autoFillWidget.setAutoFillBackground(True)
layout.addWidget(autoFillWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With autoFillBackground set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
noBackgroundWidget = CustomWidget(label, fake = True)
noBackgroundWidget.setAttribute(Qt.WA_NoBackground, True)
layout.addWidget(noBackgroundWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_NoBackground set", label)
caption.setWordWrap(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
opaqueWidget = CustomWidget(label, fake = True)
opaqueWidget.setAttribute(Qt.WA_OpaquePaintEvent, True)
layout.addWidget(opaqueWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_OpaquePaintEvent set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
label.setWindowTitle("Qt 4.0: Painting Custom Widgets")
elif qt == "4.1":
label.setWindowTitle("Qt 4.1: Painting Custom Widgets")
label.resize(404, 160)
label.show()
sys.exit(app.exec_())
|
wizzard/sdk
|
tests/sync_test_megacli.py
|
Python
|
bsd-2-clause
| 5,819
| 0.003781
|
"""
Application for testing syncing algorithm
(c) 2013-2014 by Mega Limited, Wellsford, New Zealand
This file is part of the MEGA SDK - Client Access Engine.
Applications using the MEGA API must present a valid application key
and comply with the the rules set forth in the Terms of Service.
The MEGA SDK is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@copyright Simplified (2-clause) BSD License.
You should have received a copy of the license along with this
program.
"""
import sys
import os
import time
import shutil
import unittest
import xmlrunner
import subprocess
import re
from sync_test_app import SyncTestApp
from sync_test import SyncTest
import logging
import argparse
class SyncTestMegaCliApp(SyncTestApp):
"""
operates with megacli application
"""
def __init__(self, local_mount_in, local_mount_out, delete_tmp_files=True, use_large_files=True, check_if_alive=True):
"""
local_mount_in: local upsync folder
local_mount_out: local downsync folder
"""
self.work_dir = os.path.join(".", "work_dir")
SyncTestApp.__init__(self, local_mount_in, local_mount_out, self.work_dir, delete_tmp_files, use_large_files)
self.check_if_alive = check_if_alive
def sync(self):
time.sleep(5)
def start(self):
# try to create work dir
return True
def finish(self):
try:
shutil.rmtree(self.work_dir)
except OSError, e:
logging.error("Failed to remove dir: %s (%s)" % (self.work_dir, e))
def is_alive(self):
"""
return True if application instance is running
"""
if not self.check_if_alive:
return True
s = subprocess.Popen(["ps", "axw"], stdout=subprocess.PIPE)
for x in s.stdout:
if re.search("megacli", x):
return True
return False
def pause(self):
"""
pause application
"""
# TODO: implement this !
raise NotImplementedError("Not Implemented !")
def unpause(self):
"""
unpause application
"""
# TODO: implement this !
raise NotImplementedError("Not Implemented !")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--test1", help="test_create_delete_files", action="store_true")
parser.add_argument("--test2", help="test_create_rename_delete_files", action="store_true")
parser.add_argument("--test3", help="test_create_delete_dirs", action="store_true")
parser.add_argument("--test4", help="test_create_rename_delete_dirs", action="store_true")
parser.add_argument("--test5", help="test_sync_files_write", action="store_true")
parser.add_argument("--test6", help="test_local_operations", action="store_true")
parser.add_argument("--test7", help="test_update_mtime", action="store_true")
parser.add_argument("--test8", help="test_create_rename_delete_unicode_files_dirs", action="store_true")
parser.add_argument("-a", "--all", help="run all tests", action="store_true")
parser.add_argument("-b", "--basic", help="run basic, stable tests", action="store_true")
parser.add_argument("-d", "--debug", help="use debug output", action="store_true")
parser.add_argument("-l", "--large", help="use large files for testing", action="store_true")
parser.add_argument("-n", "--nodelete", help="Do not delete work files", action="store_false")
parser.add_argument("-c", "--check", help="Do not check if megacli is running (useful, if other application is used for testing)", action="store_false")
parser.add_argument("upsync_dir", help="local upsync directory")
parser.add_argument("downsync_dir", help="local downsync directory")
args = parser.parse_args()
if args.debug:
lvl = logging.DEBUG
else:
lvl = logging.INFO
if args.all:
args.test1 = args.test2 = args.test3 = args.test4 = args.test5 = args.test6 = args.test7 = args.test8 = True
if args.basic:
args.test1 = args.test2
|
= args.test3 = args.test4 = True
logging.StreamHandler(sys.stdout)
logging.basicConfig(format='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=lvl)
logging.info("")
logging.info("1) Start the first [megacli] and run the following command: sync " + args.upsync_dir + " [remote folder]")
logging.info("2) Start the second [megacli] and run the following command: sync " + args.downsync_dir + " [remote f
|
older]")
logging.info("3) Wait for both folders get fully synced")
logging.info("4) Run: python %s", sys.argv[0])
logging.info("")
time.sleep(5)
with SyncTestMegaCliApp(args.upsync_dir, args.downsync_dir, args.nodelete, args.large, args.check) as app:
suite = unittest.TestSuite()
if args.test1:
suite.addTest(SyncTest("test_create_delete_files", app))
if args.test2:
suite.addTest(SyncTest("test_create_rename_delete_files", app))
if args.test3:
suite.addTest(SyncTest("test_create_delete_dirs", app, ))
if args.test4:
suite.addTest(SyncTest("test_create_rename_delete_dirs", app))
if args.test5:
suite.addTest(SyncTest("test_sync_files_write", app))
if args.test6:
suite.addTest(SyncTest("test_local_operations", app))
if args.test7:
suite.addTest(SyncTest("test_update_mtime", app))
if args.test8:
suite.addTest(SyncTest("test_create_rename_delete_unicode_files_dirs", app))
testRunner = xmlrunner.XMLTestRunner(output='test-reports')
testRunner.run(suite)
|
anselmobd/fo2
|
src/estoque/queries/grade_estoque.py
|
Python
|
mit
| 7,421
| 0.000404
|
from utils.functions.models import rows_to_dict_list_lower, GradeQtd
def grade_estoque(
cursor, ref=None, dep=None, data_ini=None, tipo_grade=None,
modelo=None, referencia=None):
filtro_modelo = ''
filtro_modelo_mask = ''
if modelo is not None:
filtro_modelo_mask = f'''--
AND
TRIM(
LEADING '0' FROM (
REGEXP_REPLACE(
{{}},
'^[abAB]?([0-9]+)[a-zA-Z]*$',
'\\1'
)
)
) = '{modelo}'
'''
filtro_referencia = ''
filtro_referencia_mask = ''
if referencia is not None:
filtro_referencia_mask = f'''--
AND {{}} = '{referencia}'
'''
teste_dep = ''
if type(dep) is tuple:
teste_dep = ",".join(map(str, dep))
teste_dep = f" IN ({teste_dep})"
else:
teste_dep = f" = '{dep}'"
filtro_data_ini = ''
if data_ini is not None:
filtro_data_ini = (
"AND ee.DATA_MOVIMENTO >= "
f"TO_DATE('{data_ini}', 'yyyy-mm-dd')"
)
if tipo_grade is None:
tipo_grade = {
't': 'c', # tamanho como cadastrado
'c': 'e', # cores com estoque
}
# Grade de OP
grade = GradeQtd(cursor)
# tamanhos
if tipo_grade['t'] == 'm': # com movimento
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND ee.GRUPO_ESTRUTURA = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('ee.GRUPO_ESTRUTURA')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('ee.GRUPO_ESTRUTURA')
sql = f'''
SELECT DISTINCT
ee.SUBGRUPO_ESTRUTURA TAMANHO
, tam.ORDEM_TAMANHO SEQUENCIA_TAMANHO
FROM ESTQ_300_ESTQ_310 ee -- mov.de estoque em aberto e fechado
LEFT JOIN BASI_220 tam
ON tam.TAMANHO_REF = ee.SUBGRUPO_ESTRUTURA
WHERE ee.NIVEL_ESTRUTURA = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND ee.CODIGO_DEPOSITO {teste_dep}
{filtro_data_ini} -- filtro_data_ini
ORDER BY
2
'''
elif tipo_grade['t'] == 'e': # com estoque
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND e.CDITEM_GRUPO = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('e.CDITEM_GRUPO')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('e.CDITEM_GRUPO')
sql = f'''
SELECT DISTINCT
e.CDITEM_SUBGRUPO TAMANHO
, tam.ORDEM_TAMANHO SEQUENCIA_TAMANHO
FROM ESTQ_040 e
LEFT JOIN BASI_220 tam
ON tam.TAMANHO_REF = e.CDITEM_SUBGRUPO
WHERE e.CDITEM_NIVEL99 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND e.DEPOSITO {teste_dep}
AND e.QTDE_ESTOQUE_ATU <> 0
ORDER BY
2
'''
elif tipo_grade['t'] == 'c': # como cadastrado
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND t.BASI030_REFERENC = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('t.BASI030_REFERENC')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('t.BASI030_REFERENC')
sql = f'''
SELECT DISTINCT
t.TAMANHO_REF TAMANHO
, tam.ORDEM_TAMANHO SEQUENCIA_TAMANHO
FROM basi_020 t
LEFT JOIN BASI_220 tam
ON tam.TAMANHO_REF = t.TAMANHO_REF
WHERE t.BASI030_NIVEL030 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
ORDER BY
2
'''
grade.col
|
(
id='TAMANHO',
name='Tamanho',
total='Total',
forca_total=True,
sql=sql,
)
# cores
if tipo_grade['c'] == 'm': # com movimento
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND ee.GRUPO_ESTRUTURA = '{ref}'"
if modelo is not None:
filtro_mode
|
lo = filtro_modelo_mask.format('ee.GRUPO_ESTRUTURA')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('ee.GRUPO_ESTRUTURA')
sql = f'''
SELECT DISTINCT
ee.ITEM_ESTRUTURA SORTIMENTO
FROM ESTQ_300_ESTQ_310 ee -- mov. de estoque em aberto e fechado
WHERE ee.NIVEL_ESTRUTURA = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND ee.CODIGO_DEPOSITO {teste_dep}
{filtro_data_ini} -- filtro_data_ini
ORDER BY
ee.ITEM_ESTRUTURA
'''
elif tipo_grade['c'] == 'e': # com estoque
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND e.CDITEM_GRUPO = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('e.CDITEM_GRUPO')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('e.CDITEM_GRUPO')
sql = f'''
SELECT DISTINCT
e.CDITEM_ITEM SORTIMENTO
FROM ESTQ_040 e
WHERE e.CDITEM_NIVEL99 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND e.DEPOSITO {teste_dep}
AND e.QTDE_ESTOQUE_ATU <> 0
ORDER BY
e.CDITEM_ITEM
'''
grade.row(
id='SORTIMENTO',
name='Cor',
name_plural='Cores',
total='Total',
forca_total=True,
sql=sql,
)
# sortimento
filtro_ref = ''
if ref is not None:
filtro_ref = f"AND e.CDITEM_GRUPO = '{ref}'"
if modelo is not None:
filtro_modelo = filtro_modelo_mask.format('e.CDITEM_GRUPO')
if referencia is not None:
filtro_referencia = filtro_referencia_mask.format('e.CDITEM_GRUPO')
sql = f'''
SELECT
e.CDITEM_SUBGRUPO TAMANHO
, e.CDITEM_ITEM SORTIMENTO
, SUM(e.QTDE_ESTOQUE_ATU) QUANTIDADE
FROM ESTQ_040 e
WHERE e.LOTE_ACOMP = 0
AND e.CDITEM_NIVEL99 = 1
{filtro_ref} -- filtro_ref
{filtro_modelo} -- filtro_modelo
{filtro_referencia} -- filtro_referencia
AND e.DEPOSITO {teste_dep}
GROUP BY
e.CDITEM_SUBGRUPO
, e.CDITEM_ITEM
ORDER BY
e.CDITEM_SUBGRUPO
, e.CDITEM_ITEM
'''
grade.value(
id='QUANTIDADE',
sql=sql,
)
fields = grade.table_data['fields']
data = grade.table_data['data']
style = grade.table_data['style']
result = (
grade.table_data['header'],
fields,
data,
style,
grade.total,
)
return result
|
ContinuumIO/dask
|
dask/array/tests/test_xarray.py
|
Python
|
bsd-3-clause
| 474
| 0
|
import pytest
import dask.array as da
from
|
..utils import assert_eq
xr = pytest.importorskip("xarray")
def test_mean():
y = da.mean(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.Array)
assert_eq(y, y)
def test_asarray():
y = da.asarray(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.Array)
assert_eq(y, y)
def test_asanyarray():
y = da.asanyarray(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.Array)
assert_eq(y,
|
y)
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.py
|
Python
|
mit
| 1,585
| 0
|
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
http://dlmf.nist.gov/8.20#ii
"""
from __future__ import division, print_function, absolute_import
import os
import warnings
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for
|
x in Ak.coeffs()])
f.write("double A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("int Adegs[] = {{{}}}
|
;\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
|
trosa/forca
|
applications/admin/languages/pl.py
|
Python
|
gpl-2.0
| 15,887
| 0.020455
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyrażeniem postaci "pole1=\'nowawartość\'". Nie możesz uaktualnić lub usunąć wyników z JOIN:',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': 'Wierszy usuniętych: %s',
'%s rows updated': 'Wierszy uaktualnionych: %s',
'(requires internet access)': '(requires internet access)',
'(something like "it-it")': '(coś podobnego do "it-it")',
'A new version of web2py is available': 'Nowa wersja web2py jest dostępna',
'A new version of web2py is available: %s': 'Nowa wersja web2py jest dostępna: %s',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'UWAGA: Wymagane jest bezpieczne (HTTPS) połączenie lub połączenie z lokalnego adresu.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'UWAGA: TESTOWANIE NIE JEST BEZPIECZNE W ŚRODOWISKU WIELOWĄTKOWYM, TAK WIĘC NIE URUCHAMIAJ WIELU TESTÓW JEDNOCZEŚNIE.',
'ATTENTION: you cannot edit the running application!': 'UWAGA: nie można edytować uruchomionych aplikacji!',
'About': 'Informacje o',
'About application': 'Informacje o aplikacji',
'Additional code for your application': 'Additional code for your application',
'Admin is disabled because insecure channel': 'Panel administracyjny wyłączony z powodu braku bezpiecznego połączenia',
'Admin is disabled because unsecure channel': 'Panel administracyjny wyłączony z powodu braku bezpiecznego połączenia',
'Administrator Password:': 'Hasło administratora:',
'Application name:': 'Application name:',
'Are you sure you want to delete file "%s"?': 'Czy na pewno chcesz usunąć plik "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Czy na pewno chcesz usunąć wtyczkę "%s"?',
'Are you sure you want to uninstall application "%s"': 'Czy na pewno chcesz usunąć aplikację "%s"',
'Are you sure you want to uninstall application "%s"?': 'Czy na pewno chcesz usunąć aplikację "%s"?',
'Are you sure you want to upgrade web2py now?': 'Are you sure you want to upgrade web2py now?',
'Available databases and tables': 'Dostępne bazy danych i tabele',
'Cannot be empty': 'Nie może być puste',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Nie można skompilować: w Twojej aplikacji są błędy . Znajdź je, popraw a następnie spróbój ponownie.',
'Cannot compile: there are errors in your app:': 'Cannot compile: there are errors in your app:',
'Check to delete': 'Zaznacz aby usunąć',
'Checking for upgrades...': 'Sprawdzanie aktualizacji...',
'Controllers': 'Kontrolery',
'
|
Create new simple application': 'Utwórz nową aplikację',
'Current request': 'Aktualne żądanie',
'Current response': 'Aktualna odpowiedź',
'Current session': 'Aktualna sesja',
'DESIGN': 'PROJEKTUJ',
'Date and Time': 'Data i godzina',
'Delete': 'Usuń',
'Delete:': 'Usuń:',
'Deploy on Google App Engine': 'Umieść na Google App Engine',
'Design for': 'Projekt dla',
'EDIT': 'EDYTUJ',
'Edit application': 'Ed
|
ycja aplikacji',
'Edit current record': 'Edytuj aktualny rekord',
'Editing Language file': 'Edytuj plik tłumaczeń',
'Editing file': 'Edycja pliku',
'Editing file "%s"': 'Edycja pliku "%s"',
'Enterprise Web Framework': 'Enterprise Web Framework',
'Error logs for "%(app)s"': 'Wpisy błędów dla "%(app)s"',
'Exception instance attributes': 'Exception instance attributes',
'Functions with no doctests will result in [passed] tests.': 'Funkcje bez doctestów będą dołączone do [zaliczonych] testów.',
'Hello World': 'Witaj Świecie',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Jeżeli powyższy raport zawiera numer biletu błędu, oznacza to błąd podczas wykonywania kontrolera przez próbą uruchomienia doctestów. Zazwyczaj jest to spowodowane nieprawidłowymi wcięciami linii kodu lub błędami w module poza ciałem funkcji.\nTytuł w kolorze zielonym oznacza, ze wszystkie (zdefiniowane) testy zakończyły się sukcesem. W tej sytuacji ich wyniki nie są pokazane.',
'Import/Export': 'Importuj/eksportuj',
'Installed applications': 'Zainstalowane aplikacje',
'Internal State': 'Stan wewnętrzny',
'Invalid Query': 'Błędne zapytanie',
'Invalid action': 'Błędna akcja',
'Language files (static strings) updated': 'Pliki tłumaczeń (ciągi statyczne) zostały uaktualnione',
'Languages': 'Tłumaczenia',
'Last saved on:': 'Ostatnio zapisany:',
'License for': 'Licencja dla',
'Login': 'Zaloguj',
'Login to the Administrative Interface': 'Logowanie do panelu administracyjnego',
'Models': 'Modele',
'Modules': 'Moduły',
'NO': 'NIE',
'New Record': 'Nowy rekord',
'New application wizard': 'New application wizard',
'New simple application': 'New simple application',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'Original/Translation': 'Oryginał/tłumaczenie',
'PAM authenticated user, cannot change password here': 'PAM authenticated user, cannot change password here',
'Peeking at file': 'Podgląd pliku',
'Plugin "%s" in application': 'Wtyczka "%s" w aplikacji',
'Plugins': 'Wtyczki',
'Powered by': 'Zasilane przez',
'Query:': 'Zapytanie:',
'Resolve Conflict file': 'Rozwiąż konflikt plików',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wierszy wybranych',
'Saved file hash:': 'Suma kontrolna zapisanego pliku:',
'Searching:': 'Searching:',
'Static files': 'Pliki statyczne',
'Sure you want to delete this object?': 'Czy na pewno chcesz usunąć ten obiekt?',
'TM': 'TM',
'Testing application': 'Testowanie aplikacji',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'wartość\'". Takie coś jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'There are no controllers': 'Brak kontrolerów',
'There are no models': 'Brak modeli',
'There are no modules': 'Brak modułów',
'There are no plugins': 'There are no plugins',
'There are no static files': 'Brak plików statycznych',
'There are no translators, only default language is supported': 'Brak plików tłumaczeń, wspierany jest tylko domyślny język',
'There are no views': 'Brak widoków',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'This is the %(filename)s template': 'To jest szablon %(filename)s',
'Ticket': 'Bilet',
'To create a plugin, name a file/folder plugin_[name]': 'Aby utworzyć wtyczkę, nazwij plik/katalog plugin_[nazwa]',
'Translation strings for the application': 'Translation strings for the application',
'Unable to check for upgrades': 'Nie można sprawdzić aktualizacji',
'Unable to download': 'Nie można ściągnąć',
'Unable to download app': 'Nie można ściągnąć aplikacji',
'Unable to download app because:': 'Unable to download app because:',
'Unable to download because': 'Unable to download because',
'Update:': 'Uaktualnij:',
'Upload & install packed application': 'Upload & install packed application',
'Upload a package:': 'Upload a package:',
'Upload existing application': 'Wyślij istniejącą aplikację',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Użyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapytań.',
'Use an url:': 'Use an url:',
'Version': 'Wersja',
'Views': 'Widoki',
'Welcome to web2py': 'Witaj w web2py',
'YES': 'TAK',
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/network/f5/bigip_gtm_server.py
|
Python
|
gpl-3.0
| 61,691
| 0.001897
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_server
short_description: Manages F5 BIG-IP GTM servers
description:
- Manage BIG-IP server configuration. This module is able to manipulate the server
definitions in a BIG-IP.
version_added: 2.5
options:
name:
description:
- The name of the server.
required: True
state:
description:
- The server state. If C(absent), an a
|
ttempt to delete the server will be mad
|
e.
This will only succeed if this server is not in use by a virtual server.
C(present) creates the server and enables it. If C(enabled), enable the server
if it exists. If C(disabled), create the server if needed, and set state to
C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
datacenter:
description:
- Data center the server belongs to. When creating a new GTM server, this value
is required.
devices:
description:
- Lists the self IP addresses and translations for each device. When creating a
new GTM server, this value is required. This list is a complex list that
specifies a number of keys.
- The C(name) key specifies a name for the device. The device name must
be unique per server. This key is required.
- The C(address) key contains an IP address, or list of IP addresses, for the
destination server. This key is required.
- The C(translation) key contains an IP address to translate the C(address)
value above to. This key is optional.
- Specifying duplicate C(name) fields is a supported means of providing device
addresses. In this scenario, the addresses will be assigned to the C(name)'s list
of addresses.
server_type:
description:
- Specifies the server type. The server type determines the metrics that the
system can collect from the server. When creating a new GTM server, the default
value C(bigip) is used.
choices:
- alteon-ace-director
- cisco-css
- cisco-server-load-balancer
- generic-host
- radware-wsd
- windows-nt-4.0
- bigip
- cisco-local-director-v2
- extreme
- generic-load-balancer
- sun-solaris
- cacheflow
- cisco-local-director-v3
- foundry-server-iron
- netapp
- windows-2000-server
aliases:
- product
link_discovery:
description:
- Specifies whether the system auto-discovers the links for this server. When
creating a new GTM server, if this parameter is not specified, the default
value C(disabled) is used.
- If you set this parameter to C(enabled) or C(enabled-no-delete), you must
also ensure that the C(virtual_server_discovery) parameter is also set to
C(enabled) or C(enabled-no-delete).
choices:
- enabled
- disabled
- enabled-no-delete
virtual_server_discovery:
description:
- Specifies whether the system auto-discovers the virtual servers for this server.
When creating a new GTM server, if this parameter is not specified, the default
value C(disabled) is used.
choices:
- enabled
- disabled
- enabled-no-delete
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
iquery_options:
description:
- Specifies whether the Global Traffic Manager uses this BIG-IP
system to conduct a variety of probes before delegating traffic to it.
suboptions:
allow_path:
description:
- Specifies that the system verifies the logical network route between a data
center server and a local DNS server.
type: bool
allow_service_check:
description:
- Specifies that the system verifies that an application on a server is running,
by remotely running the application using an external service checker program.
type: bool
allow_snmp:
description:
- Specifies that the system checks the performance of a server running an SNMP
agent.
type: bool
version_added: 2.7
monitors:
description:
- Specifies the health monitors that the system currently uses to monitor this resource.
- When C(availability_requirements.type) is C(require), you may only have a single monitor in the
C(monitors) list.
version_added: 2.8
availability_requirements:
description:
- Specifies, if you activate more than one health monitor, the number of health
monitors that must receive successful responses in order for the link to be
considered available.
suboptions:
type:
description:
- Monitor rule type when C(monitors) is specified.
- When creating a new pool, if this value is not specified, the default of 'all' will be used.
choices: ['all', 'at_least', 'require']
at_least:
description:
- Specifies the minimum number of active health monitors that must be successful
before the link is considered up.
- This parameter is only relevant when a C(type) of C(at_least) is used.
- This parameter will be ignored if a type of either C(all) or C(require) is used.
number_of_probes:
description:
- Specifies the minimum number of probes that must succeed for this server to be declared up.
- When creating a new virtual server, if this parameter is specified, then the C(number_of_probers)
parameter must also be specified.
- The value of this parameter should always be B(lower) than, or B(equal to), the value of C(number_of_probers).
- This parameter is only relevant when a C(type) of C(require) is used.
- This parameter will be ignored if a type of either C(all) or C(at_least) is used.
number_of_probers:
description:
- Specifies the number of probers that should be used when running probes.
- When creating a new virtual server, if this parameter is specified, then the C(number_of_probes)
parameter must also be specified.
- The value of this parameter should always be B(higher) than, or B(equal to), the value of C(number_of_probers).
- This parameter is only relevant when a C(type) of C(require) is used.
- This parameter will be ignored if a type of either C(all) or C(at_least) is used.
version_added: 2.8
prober_preference:
description:
- Specifies the type of prober to use to monitor this server's resources.
- This option is ignored in C(TMOS) version C(12.x).
- From C(TMOS) version C(13.x) and up, when prober_preference is set to C(pool)
a C(prober_pool) parameter must be specified.
choices:
- inside-datacenter
- outside-datacenter
- inherit
- pool
version_added: 2.8
prober_fallback:
description:
- Specifies the type of prober to use to monitor this server's resources
when the preferred prober is not available.
- This option is ignored in C(TMOS) version C(12.x).
- From C(TMOS) version C(13.x) and up, when prober_preference is set to C(pool)
a C(prober_pool) parameter must be specified.
- The choices are mutually exclusive with prober_preference parameter,
with the exception of C(any-available) or C(none) option.
choices:
- any
- inside-datacenter
- outside-datacenter
- inherit
- pool
- none
version_added: 2.8
prober_pool:
description:
- Specifies the name of the prober pool to use to monitor this se
|
slarosa/QGIS
|
python/plugins/sextante/algs/ftools/ExtentFromLayer.py
|
Python
|
gpl-2.0
| 5,976
| 0.002677
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentFromLayer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.parameters.ParameterBoolean import ParameterBoolean
from sextante.outputs.OutputVector import OutputVector
class ExtentFromLayer(GeoAlgorithm):
INPUT_LAYER = "INPUT_LAYER"
BY_FEATURE = "BY_FEATURE"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/layer_extent.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Polygon from layer extent"
self.group = "Vector general tools"
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input layer", ParameterVector.VECTOR_TYPE_ANY))
self.addParameter(ParameterBoolean(self.BY_FEATURE, "Calculate extent for each feature separately", False))
self.addOutput(OutputVector(self.OUTPUT, "Output layer"))
def processAlgorithm(self, progress):
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
byFeature = self.getParameterValue(self.BY_FEATURE)
fields = [ QgsField("MINX", QVariant.Double),
QgsField("MINY", QVariant.Double),
QgsField("MAXX", QVariant.Double),
QgsField("MAXY", QVariant.Double),
QgsField("CNTX", QVariant.Double),
QgsField("CNTY", QVariant.Double),
QgsField("AREA", QVariant.Double),
QgsField("PERIM", QVariant.Double),
QgsField("HEIGHT", QVariant.Double),
QgsField("WIDTH", QVariant.Double)
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
QGis.WKBPolygon, layer.crs())
if byFeature:
self.featureExtent(layer, writer, progress)
else:
self.layerExtent(layer, writer, progress)
del writer
def layerExtent(self, layer, writer, progress):
rect = layer.extent()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + (width / 2.0)
cnty = miny + (height / 2.0)
area = width * height
perim = (2 * width) + (2 * height)
rect = [QgsPoint(minx, miny),
QgsPoint(minx, maxy),
QgsPoint(maxx, maxy),
QgsPoint(maxx, miny),
QgsPoint(minx, miny)
]
geometry = QgsGeometry().fromPolygon([rect])
feat = QgsFeature()
feat.setGeometry(geometry)
attrs = [QVariant(minx),
QVariant(miny),
QVariant(maxx),
QVariant(maxy),
QVariant(cntx),
QVariant(cnty),
QVariant(area),
QVariant(perim),
QVariant(height),
QVariant(width)
]
feat.setAttributes(attrs)
writer.addFeature(feat)
def featureExtent(self, layer, writer, progress):
current = 0
features = QGisLayers.features(layer)
total = 100.0 / float(len(features))
feat = QgsFeature()
for f in features:
rect = f.geometry().boundingBox()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.heig
|
ht()
wi
|
dth = rect.width()
cntx = minx + (width / 2.0)
cnty = miny + (height / 2.0)
area = width * height
perim = (2 * width) + (2 * height)
rect = [QgsPoint(minx, miny),
QgsPoint(minx, maxy),
QgsPoint(maxx, maxy),
QgsPoint(maxx, miny),
QgsPoint(minx, miny)
]
geometry = QgsGeometry().fromPolygon([rect])
feat.setGeometry(geometry)
attrs = [QVariant(minx),
QVariant(miny),
QVariant(maxx),
QVariant(maxy),
QVariant(cntx),
QVariant(cnty),
QVariant(area),
QVariant(perim),
QVariant(height),
QVariant(width)
]
feat.setAttributes(attrs)
writer.addFeature(feat)
current += 1
progress.setPercentage(int(current * total))
|
pinellolab/haystack_bio
|
haystack/run_pipeline.py
|
Python
|
agpl-3.0
| 14,360
| 0.005292
|
from __future__ import division
import os
import sys
import glob
import shutil
import argparse
import multiprocessing
import subprocess as sb
from haystack_common import check_file, HAYSTACK_VERSION
import logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)-5s @ %(asctime)s:\n\t %(message)s \n',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
error = logging.critical
warn = logging.warning
debug = logging.debug
info = logging.info
#from memory_profiler import profile
#f = open('pipeline_memory.txt', 'w+')
def get_args_pipeline():
# mandatory
parser = argparse.ArgumentParser(description='HAYSTACK Parameters')
parser.add_argument('samples_filename_or_bam_folder', type=str,
help='A tab delimeted file with in each row (1) a sample name, (2) the path to the corresponding bam filename, (3 optional) the path to the corresponding gene expression filename.')
parser.add_argument('genome_name', type=str, help='Genome assembly to use from UCSC (for example hg19, mm9, etc.)')
# optional
parser.add_argument('--name', help='Define a custom output filename for the report', default='')
parser.add_argument('--output_directory', type=str, help='Output directory (default: current directory)',
default='')
parser.add_argument('--bin_size', type=int, help='bin size to use(default: 500bp)', default=500)
parser.add_argument('--do_not_recompute',
help='Keep any file previously precalculated',
action='store_true')
parser.add_argument('--do_not_filter_bams',
help='Use BAM files as provided. Do not remove reads that are unmapped, mate unmapped,'
' not primary aligned or low MAPQ reads, reads failing qc and optical duplicates',
action='store_true')
parser.add_argument('--depleted',
help='Look for cell type specific regions with depletion of signal instead of enrichment',
action='store_true')
parser.add_argument('--input_is_bigwig',
help='Use the bigwig format instead of the bam format for the input. Note: The files must have extension .bw',
action='store_true')
parser.add_argument('--disable_quantile_normalization', help='Disable quantile normalization (default: False)',
action='store_true')
parser.add_argument('--transformation', type=str,
help='Variance stabilizing transformation among: none, log2, angle (default: angle)',
default='angle', choices=['angle', 'log2', 'none'])
parser.add_argument('--z_score_high', type=float, help='z-score value to select the specific regions(default: 1.5)',
default=1.5)
parser.add_argument('--z_score_l
|
ow', type=float,
help='z-score value to select the not specific regions(default: 0.25)', default=0.25)
parser.add_argument('--th_rpm', type=float,
help='Percentile on the signal intensity to consider for the hotspots (default: 99)',
default=99)
parser.add_argument('--meme_motifs_filename',
|
type=str,
help='Motifs database in MEME format (default JASPAR CORE 2016)')
parser.add_argument('--motif_mapping_filename', type=str,
help='Custom motif to gene mapping file (the default is for JASPAR CORE 2016 database)')
parser.add_argument('--plot_all',
help='Disable the filter on the TF activity and correlation (default z-score TF>0 and rho>0.3)',
action='store_true')
parser.add_argument('--keep_intermediate_files',
help='keep intermediate bedgraph files ',
action='store_true')
parser.add_argument('--n_processes', type=int,
help='Specify the number of processes to use. The default is #cores available.',
default=min(4, multiprocessing.cpu_count()))
parser.add_argument('--blacklist',
type=str,
help='Exclude blacklisted regions. Blacklisted regions are not excluded by default. '
'Use hg19 to blacklist regions for the human genome 19, '
'otherwise provide the filepath for a bed file with blacklisted regions.',
default='none')
parser.add_argument('--chrom_exclude',
type=str,
help='Exclude chromosomes that contain given (regex) string. For example _random|chrX|chrY excludes random, X, and Y chromosome regions',
default='_|chrX|chrY')
parser.add_argument('--read_ext', type=int, help='Read extension in bps (default: 200)', default=200)
parser.add_argument('--temp_directory', help='Directory to store temporary files (default: /tmp)', default='/tmp')
parser.add_argument('--rho_cutoff',
type=float,
default=0.3,
help='The cutoff absolute correlation value (0.0 to 1) for which activity plots are generated (default: 0.3)')
parser.add_argument('--tf_value_cuttoff',
type=float,
default=0.0,
help='The cutoff z-score tf_value for which activity plots are generated (default: 0.0) ')
parser.add_argument('--version', help='Print version and exit.', action='version',
version='Version %s' % HAYSTACK_VERSION)
return parser
#@profile
def main(input_args=None):
print '\n[H A Y S T A C K P I P E L I N E]'
print('\n-SELECTION OF HOTSPOTS OF VARIABILITY AND ENRICHED MOTIFS-\n')
print 'Version %s\n' % HAYSTACK_VERSION
parser = get_args_pipeline()
args = parser.parse_args(input_args)
args_dict = vars(args)
for key, value in args_dict.items():
exec ('%s=%s' % (key, repr(value)))
if meme_motifs_filename:
check_file(meme_motifs_filename)
if motif_mapping_filename:
check_file(motif_mapping_filename)
if not os.path.exists(temp_directory):
error('The folder specified with --temp_directory: %s does not exist!' % temp_directory)
sys.exit(1)
if input_is_bigwig:
extension_to_check = '.bw'
info('Input is set BigWig (.bw)')
else:
extension_to_check = '.bam'
info('Input is set compressed SAM (.bam)')
if name:
directory_name = 'HAYSTACK_PIPELINE_RESULTS_on_%s' % name
else:
directory_name = 'HAYSTACK_PIPELINE_RESULTS'
if output_directory:
output_directory = os.path.join(output_directory, directory_name)
else:
output_directory = directory_name
# check folder or sample filename
USE_GENE_EXPRESSION = True
if not os.path.exists(samples_filename_or_bam_folder):
error("The file or folder %s doesn't exist. Exiting." %
samples_filename_or_bam_folder)
sys.exit(1)
if os.path.isfile(samples_filename_or_bam_folder):
BAM_FOLDER = False
data_filenames = []
gene_expression_filenames = []
sample_names = []
with open(samples_filename_or_bam_folder) as infile:
for line in infile:
if not line.strip():
continue
if line.startswith('#'): # skip optional header line or empty lines
info('Skipping header/comment line:%s' % line)
continue
fields = line.strip().split()
n_fields = len(fields)
if n_fields == 2:
USE_GENE_EXPRESSION = False
sample_names.append(fields[0])
data_filenames.append(fields[1])
elif n_fields == 3:
USE_GENE_EXPRESSION = USE_GEN
|
tagn/plex-stack
|
lib/host.py
|
Python
|
gpl-3.0
| 1,728
| 0.001157
|
"""Module for configuring the host environment"""
import os
import json
import sys
class HostConfig():
"""Sets up the required components on the host environment"""
def __init__(self):
"""Host"""
self.path = None
self.apps = None
def get_path(self):
"""Gets the root path to build folders in"""
self.path = input(
'Enter the root directory, on this host, to store docker \
configurations (ex: /home/user): ')
if not self.path:
print('Path is required. Re-execute script and specify a path.')
sys.exit(1)
if self.path[0] != '/':
self.path = '/{}'.format(self.path)
if self.path[-1] != '/':
self.path = '{}/'.format(self.path)
if not os.path.exists(self.path):
print(
'Specified path does not exist. Please create path and re-execute script.')
sys.exit(1)
return se
|
lf.path
def get_apps(self):
"""Gets the json file containing app information"""
with open('data/apps.json', 'r') as file:
content = json.load(file)
|
self.apps = [app for app in content.get('apps')]
def build(self):
"""Builds folders using the specified path and apps.json"""
print('Building folders...')
for app in self.apps:
try:
app_path = '{}{}'.format(self.path, app.get('name'))
os.mkdir(app_path)
print('Created {}'.format(app_path))
except OSError:
print('Folder for {} already exists. Skipping..')
if __name__ == '__main__':
pass
|
0todd0000/rft1d
|
rft1d/examples/random_fields_broken_1.py
|
Python
|
gpl-3.0
| 862
| 0.018561
|
'''
Broken (piecewise continuous) random field generation using rft1d.randn1d
Note:
When FWHM gets large (2FWHM>nNodes), the data should be padded
using the *pad* keyword.
'''
import numpy as np
from matplotlib import pyplot
import rft1d
#(0) Set parameters:
np.random.seed(12345)
nResponses = 5
nNodes = 101
FWHM = 20.0
### create a boolean mask:
nodes = np.array([True]*nNodes) #nothing masked out
node
|
s[20:30] = False #this reg
|
ion will be masked out
nodes[60:80] = False #this region will be masked out
#(1) Generate Gaussian 1D fields:
y = rft1d.randn1d(nResponses, nodes, FWHM)
#(2) Plot:
pyplot.close('all')
pyplot.plot(y.T)
pyplot.plot([0,100], [0,0], 'k:')
pyplot.xlabel('Field position', size=16)
pyplot.ylabel('z', size=20)
pyplot.title('Broken (piecewise continuous) random fields', size=20)
pyplot.show()
|
yunity/foodsaving-frontend
|
cordova/playstoreHelper/publish_to_beta.py
|
Python
|
mit
| 5,755
| 0.001911
|
"""Uploads apk to rollout track with user fraction."""
import sys
import socket
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import subprocess
import xml.etree.ElementTree as ET
import os
from pathlib import Path
TRACK = 'beta'
USER_FRACTION = 1
APK_FILE = '../platforms/android/build/outputs/apk/release/android-release.apk'
CREDENTIALS_JSON = 'playstore-service-account.json'
def main(argv):
package_name = os.environ.get('PACKAGE_NAME')
if package_name:
print('using provided package name', package_name)
else:
# get package name from somewhere
print('finding package name')
package_name = ET.parse('../platforms/android/res/xml/config.xml').getroot().attrib['id']
print('found package name', package_name)
print()
apk_file = os.environ.get('APK_FILE')
if apk_file is None:
print('using default apk file path', APK_FILE)
apk_file = APK_FILE
print('Retrieving release notes from CHANGELOG.md...')
releaseText = subprocess.run('../../scripts/get_newest_release.js', stdout=subprocess.PIPE).stdout.decode()
if len(releaseText) > 500:
releaseText = releaseText[:495] + '\n...'
print()
print(releaseText)
print()
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CREDENTIALS_JSON, scopes=['https://www.googleapis.com/auth/androidpublisher']
)
print('Found credentials, trying to connect...')
socket.setdefaulttimeout(900)
service = build('androidpublisher', 'v3', credentials=credentials)
edit_response = service.edits().insert(body={}, packageName=package_name).execute()
edit_id = edit_response['id']
print('Inserted edit with ID', edit_id)
print('Uploading APK...')
apk_response = service.edits().apks().upload(
editId=edit_id, packageName=package_name, media_body=apk_file
).execute()
print('Version code %d has been uploaded' % apk_response['versionCode'])
track_response = service.edits().tracks().patch(
editId=edit_id,
track=TR
|
ACK,
packageName=package_name,
body={
|
'releases': [{
'releaseNotes': [{
'text': releaseText,
'language': 'en-US'
}],
'versionCodes': [apk_response['versionCode']],
'userFraction': USER_FRACTION,
'status': 'inProgress',
}]
}
).execute()
print('Track %s is set with releases: %s' % (track_response['track'], str(track_response['releases'])))
if package_name == 'world.karrot':
assets = Path('../playstoreAssets')
language = 'en-US'
listing = assets / language / 'listing'
with (listing / 'shortDescription.txt').open() as shortDescription, \
(listing / 'fullDescription.txt').open() as fullDescription:
service.edits().listings().update(
editId=edit_id,
packageName=package_name,
language=language,
body={
'title': 'Karrot',
'language': language,
'shortDescription': shortDescription.read(),
'fullDescription': fullDescription.read(),
'video': '',
}
).execute()
print('Listing of %s has been updated' % package_name)
images_path = assets / language / 'images'
imageTypes = (
'featureGraphic',
'icon',
'phoneScreenshots',
'promoGraphic',
'sevenInchScreenshots',
'tenInchScreenshots',
'tvBanner',
'tvScreenshots',
'wearScreenshots',
)
images = [str(p) for p in images_path.iterdir()]
sha1 = subprocess.run(['sha1sum', *images], stdout=subprocess.PIPE).stdout.decode()
sha1_images = {sha1: path for (sha1, path) in [i.split() for i in sha1.splitlines()]}
for imageType in imageTypes:
our_images = {
sha1: path
for (sha1, path) in sha1_images.items() if path.split('/')[-1].startswith(imageType)
}
images_response = service.edits().images().list(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
).execute()
their_images = images_response.get('images') or []
their_images = {i['sha1']: i['id'] for i in their_images}
to_upload = [our_images.get(k) for k in (our_images.keys() - their_images.keys())]
to_delete = [their_images.get(k) for k in (their_images.keys() - our_images.keys())]
for image_id in to_delete:
service.edits().images().delete(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
imageId=image_id,
).execute()
print('Deleted', image_id)
for path in to_upload:
service.edits().images().upload(
editId=edit_id,
packageName=package_name,
language=language,
imageType=imageType,
media_body=path,
).execute()
print('Uploaded', path)
commit_request = service.edits().commit(editId=edit_id, packageName=package_name).execute()
print('Edit "%s" has been committed' % (commit_request['id']))
if __name__ == '__main__':
main(sys.argv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.