repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
kslundberg/pants | contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py | Python | apache-2.0 | 5,162 | 0.004262 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from mock import MagicMock
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.exceptions import TaskError
from pants.goal.context import Context
from pants.util.dirutil import safe_rmtree
from pants_test.tasks.task_test_base import TaskTestBase
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.scrooge_gen import ScroogeGen
# TODO (tdesai) Issue-240: Use JvmToolTaskTestBase for ScroogeGenTest
class ScroogeGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ScroogeGen
@property
def alias_groups(self):
return BuildFileAliases(targets={'java_thrift_library': JavaThriftLibrary})
def setUp(self):
super(ScroogeGenTest, self).setUp()
self.task_outdir = os.path.join(self.build_root, 'scrooge', 'gen-java')
def tearDown(self):
super(ScroogeGenTest, self).tearDown()
safe_rmtree(self.task_outdir)
def test_validate_compiler_configs(self):
# Set synthetic defaults for the global scope.
self.set_options_for_scope('thrift-defaults',
compiler='unchecked',
language='uniform',
rpc_style='async')
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='one',
sources=[],
dependencies=[],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='two',
sources=[],
dependencies=[':one'],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='three',
sources=[],
dependencies=[':one'],
rpc_style='finagle',
)
'''))
target = self.target('test_validate:one')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._validate_compiler_configs([self.target('test_validate:one')])
task._validate_compiler_configs([self.target('test_validate:two')])
with self.assertRaises(TaskError):
task._validate_compiler_configs([self.target('test_validate:three')])
def test_scala(self):
build_string = '''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='scala',
rpc_style='finagle'
)
'''
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/example/Example.scala')]
self._test_help(build_string, ScalaLibrary, sources)
def test_android(self):
build_string = '''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='android',
rpc_style='finagle'
)
'''
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/android_example/Example.java')]
self._test_help(build_string, JavaLibrary, sources)
def _test_help(self, build_string, library_type, sources):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
self.add_to_build_file('test_smoke', dedent(build_string))
target = self.target('test_smoke:a')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._declares_service = lambda source: False
task._outdir = MagicMock()
task._outdir.return_value = self.task_outdir
task.gen = MagicMock()
task.gen.return_value = {'test_smoke/a.thrift': sources}
saved_add_new_target = Context.add_new_target
try:
Context.add_new_target = MagicMock()
task.execute()
relative_task_outdir = os.path.relpath(self.task_outdir, get_buildroot())
spec = '{spec_path}:{name}'.format(spec_path=relative_task_outdir, name='test_smoke.a')
address = Address.p | arse(spec=spec)
Context.add_new_target.assert_called_once_with(address,
library_type,
| sources=sources,
excludes=OrderedSet(),
dependencies=OrderedSet(),
provides=None,
derived_from=target)
finally:
Context.add_new_target = saved_add_new_target
|
StartupsPoleEmploi/labonneboite | labonneboite/tests/scripts/test_prepare_mailing_data.py | Python | agpl-3.0 | 761 | 0.001314 | from labonneboite.scripts import prepare_mailing_data as script
from labonneboite.tests.test_base import DatabaseTest
from labonneboite.common.models import Office
class PrepareMailingDataBaseTest(DatabaseTest):
"""
Create Elasticsearch and DB content for the unit tests.
| """
def setUp(self, *args, **kwargs):
super(PrepareMailingDataBaseTest, self).s | etUp(*args, **kwargs)
# We should have 0 offices in the DB.
self.assertEqual(Office.query.count(), 0)
class MinimalisticTest(PrepareMailingDataBaseTest):
"""
Test prepare_mailing_data script.
This test is quite minimalistic as there is no office in DB (nor in ES).
"""
def test_prepare_mailing_data(self):
script.prepare_mailing_data()
|
clusterpy/clusterpy | clusterpy/core/layer.py | Python | bsd-3-clause | 75,262 | 0.006072 | # encoding: latin2
"""Repository of clusterPy's main class "Layer"
"""
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2009-10 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
__all__ = ['Layer']
import copy
import cPickle
import numpy
import os
import re
import time
import itertools
from data import generateSAR
from data import generateSMA
from data import generateCAR
from data import generateSpots
from data import generatePositiveSpots
from data import generateUniform
from data import generateGBinomial
from data import generateLBinomial
from data import dissolveData
from data import fieldOperation
from data import spatialLag
from geometry import dissolveLayer
from geometry import transportLayer
from geometry import expandLayer
from geometry import getBbox
from geometry import getGeometricAreas
from geometry import getCentroids
# Clustering
from toolboxes import execAZP
from toolboxes import execArisel
from toolboxes import execAZPRTabu
from toolboxes import execAZPSA
from toolboxes import execAZPTabu
from toolboxes import execRandom
from toolboxes.cluster.pRegionsExact import execPregionsExact
from toolboxes.cluster.pRegionsExactCP import execPregionsExactCP
from toolboxes.cluster.minpOrder import execMinpOrder
from toolboxes.cluster.minpFlow import execMinpFlow
from toolboxes import execMaxpTabu
from toolboxes import execAMOEBA
from toolboxes import originalSOM
from toolboxes import geoSom
from toolboxes import geoAssociationCoef
from toolboxes import redistributionCoef
from toolboxes import similarityCoef
# Irregular Maps
try:
from toolboxes import topoStatistics
from toolboxes import noFrontiersW
except:
pass
# Spatial statistics
from toolboxes import globalInequalityChanges
from toolboxes import inequalityMultivar
from toolboxes import interregionalInequalityTest
from toolboxes import interregionalInequalityDifferences
from outputs import dbfWriter
from outputs import shpWriterDis
from outputs import csvWriter
# Contiguity function
from contiguity import dict2matrix
from contiguity import dict2gal
from contiguity import dict2csv
# Layer
# Layer.dissolveMap
# Layer.addVariable
# Layer.getVars
# Layer.generateData
# Layer.resetData
# Layer.cluster
# Layer.getVars
# Layer.resetData
# Layer.cluster
# Layer.esda
# Layer.exportArcData
# Layer.save
# Layer.exportDBFY
# Layer.exportCSVY
# Layer.exportGALW
# Layer.exportCSVW
# Layer.exportOutputs
# Layer.transport
# Layer.expand
class Layer():
"""Main class in clusterPy
It is an object that represents an original map and all the
other maps derived from it after running any algorithm.
The layer object can be also represented as an inverse tree
with an upper root representing the original map and the
different branches representing other layers related to the
root.
"""
def __init__(self):
"""
**Attributes**
* Y: dictionary (attribute values of each feature)
* fieldNames: list (fieldNames List of attribute names)
* areas: list (list containing the coordinates of each feature)
* region2areas: list (list of lenght N (number of areas) with the ID of the region to which each area has been assigned during the last algorithm run)
* Wqueen: dictionary (spatial contiguity based on queen criterion)
* Wrook: dictionary (spatial contiguity based on rook criterion)
* Wcustom: dictionary (custom spatial contiguity based on any other criterion)
* type: string (layer's geometry type ('polygons','lines','points'))
* results: list (repository of layer instances from running an algorithm)
* outputCluster: dictionary (information about different characteristics of a solution (time, parameters, OS, among others))
* name: string (layer's name; default is 'root')
* outputDissolve: dictionary (keep information from which the current layer has been created)
* father: Layer (layer from which the current layer has been generated)
* bbox: tuple (bounding box)
"""
# Object Attributes
self.Y = {}
self.fieldNames = []
self.areas = []
self.region2areas = []
self.Wqueen = {}
self.Wrook = {}
self.customW = {}
self.shpType = ''
self.results = []
self.name = ""
self.outputCluster = {}
self.outputCluster['r2a'] = []
self.outputCluster['r2aRoot'] = []
self.outputDissolve = {}
self.outputDissolve['r2a'] = []
self.outputDissolve['r2aRoot'] = []
self.father = []
self.bbox = []
self.tStats = []
def dissolveMap(self, var=None, dataOperations={}):
"""
**Description**
Once you run an aggregation algorithm you can use the dissolve function to create a new map where the new polygons result from dissolving borders between areas assigned to the same region.
The dissolve map is an instance of a layer that is located inside the original layer. The dissolved map is then a "child" layer to which you can apply the same methods available for any layer. It implies that you can easily perform nested aggregation by applying aggregation algorithms to already dissolved maps.
:param var: It is the variable that indicates which areas belong to the same regions. This variable is usually the variable that is saved to a layer once an aggregation algorithm is executed. This variable can also be already included in your map, or it can be added from an external file.
:type var: string
:param dataOperations: Dictionary which maps a variable to a list of operations to run on it. The dissolved layer will contain in it's data all the variables specified in this dictionary. Be sure to check the dissolved layer fieldNames before use it's variables.
:type dataOperations: dictionary
The dictionary structure must be as showed bellow.::
>>> X = {}
>>> X[variableName1] = [function1, function2,....]
>>> X[variableName2] = [function1, function2,....]
Where functions are strings which represent the names of the functions to be used on the given variable (variableName). Functions could be,'sum','mean','min','max','meanDesv','stdDesv','med', 'mode','range','first','last','numberOfAreas'.
If you do not use this structure, the new layer (i.e.., the dissolved
map) will have just the ID field.
**Examples**
Dissolve china using the result from an aggregation algorithm ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.cluster('azpSa', ['Y1990', 'Y991'], 5)
china.dissolveMap()
Dissolve a China layer using a stored result on BELS ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.dissolveMap(var="BELS")
Dissolve c | hina using the result from an aggregation algorithm. It also generates two | new variables in the dissolved map. These new variables are the regional mean and sum of attributes "Y1978" and "Y1979" ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.cluster('azpSa', ['Y1990', 'Y991'], 5)
dataOperations = {'Y1978':['sum', 'mean'],'Y1979':['sum', 'mean']}
china.dissolveMap(dataOperations=dataOperations)
"""
print "Dissolving lines"
sh = Layer()
if var is not None:
if var in self.fieldNames:
region2areas = map(lambda x: x[0],self.getVars(var).values())
dissolveLayer(self, sh, region2areas)
sh.outputDissolve = {"objectiveFunction": "Unknown",\
"runningTime": "Unknown", "aggregationVariables": "Unknown",\
"algorithm":"Unknown", "weightType":"Unknown", \
"regions": len(sh.areas |
xguse/spartan | src/spartan/utils/seqs.py | Python | mit | 1,664 | 0.024639 |
compl_iupacdict = {'A':'T',
'C':'G',
'G':'C',
'T':'A',
'M':'K',
'R':'Y',
'W':'W',
'S':'S',
'Y':'R',
'K':'M',
'V':'B',
'H':'D',
'D':'H',
'B':'V',
'X':'X',
'N':'N'}
def compliment(seq, compl_iupacdict):
compl_seq = ""
| for i in range(0,len(seq)):
letter = seq[i]
compl_seq = compl_seq + compl_iupacdict[letter]
return compl_seq
def reverse(text):
return text[::-1]
def revcomp(seq):
revCompSeq = reverse(compliment(seq, compl_iupacdict))
return revCompSeq
#======================================================================== | =
def iupacList_2_regExList(motifList):
i = 0
while i < len(motifList):
motifList[i] = [motifList[i], iupac2regex(motifList[i])]
i += 1
def iupac2regex(motif):
iupacdict = {'A':'A',
'C':'C',
'G':'G',
'T':'T',
'M':'[AC]',
'R':'[AG]',
'W':'[AT]',
'S':'[CG]',
'Y':'[CT]',
'K':'[GT]',
'V':'[ACG]',
'H':'[ACT]',
'D':'[AGT]',
'B':'[CGT]',
'X':'[ACGT]',
'N':'[ACGT]'}
transl_motif = ""
for i in range(0,len(motif)):
letter = motif[i]
transl_motif = transl_motif + iupacdict[letter]
return transl_motif |
LLNL/spack | var/spack/repos/builtin/packages/r-absseq/package.py | Python | lgpl-2.1 | 1,317 | 0.000759 | # Copyright | 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAbsseq(RPackage):
"""ABSSeq: a new RNA-Seq analysis method based on modelling absolute
expression differences
Inferring differential expression genes by absolute counts difference
between two groups, utilizing Negative binomial distributio | n and
moderating fold-change according to heterogeneity of dispersion across
expression level."""
homepage = "https://bioconductor.org/packages/ABSSeq"
git = "https://git.bioconductor.org/packages/ABSSeq.git"
version('1.44.0', commit='c202b4a059021ed1228ccee7303c69b0aa4ca1ee')
version('1.38.0', commit='b686d92f0f0efdb835982efe761d059bc24b34ce')
version('1.36.0', commit='bd419072432cba4ef58b4b37b3c69c85d78b1c4a')
version('1.34.1', commit='0c3a2514ef644c6e0de3714bc91959a302c9e006')
version('1.32.3', commit='189d81c3d70f957bf50780f76a6ddcee499b4784')
version('1.22.8', commit='a67ba49bc156a4522092519644f3ec83d58ebd6a')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
|
Sound-Colour-Space/sound-colour-space | website/apps/museum/models.py | Python | mit | 11,285 | 0.004342 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import uuid
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.db.models import permalink
from taggit.managers import TaggableManager
from taggit.models import TagBase, GenericTaggedItemBase
from .tasks import generate_thumbnails_task
from common.storage import DataStorage
import logging
log = logging.getLogger(__name__)
# generic data path based on uuid for folder and filename
def generate_data_path(obj, filename):
ext = filename.split('.')[-1]
directory = '{}'.format(obj.uuid)
directory = directory.replace('-', '/')
path = '{}/{}.{}'.format(directory, obj.uuid, ext)
return path
class Base(models.Model):
"""Base model."""
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
created = models.DateTimeField(_('created'), auto_now_add=True)
modified = models.DateTimeField(_('modified'), auto_now=True)
class Meta:
abstract = True
ordering = ('-created',)
get_latest_by = 'created'
class Keyword(Base, TagBase):
description = models.TextField(_('description'), blank=True, null=True)
remote_uuid = models.CharField(_('remote_uuid'), max_length=200, blank=True, null=True)
class Meta:
verbose_name = _("keyword")
verbose_name_plural = _("keywords")
ordering = ('name', )
def get_absolute_url(self):
return 'keywords/%s' % self.slug
class TaggedObject(GenericTaggedItemBase):
tag = models.ForeignKey(Keyword, related_name="museums_tagged_objects")
class License(Base):
'''
madek mapping:
label -> license:label
url -> license: url
usage -> license:copyright_usage
'''
label = models.CharField(_('label'), max_length=200, blank=True, null=True)
url = models.URLField(_('url'), blank=True, null=True)
usage = models.TextField(_('usage'), blank=True, null=True)
remote_uuid = models.CharField(_('remote_uuid'), max_length=200, blank=True, null=True)
remote_href = models.CharField(_('remote_href'), max_length=200, blank=True, null=True)
class Meta:
verbose_name = _('license')
verbose_name_plural = _('licenses')
db_table = 'museum_license'
ordering = ('-label',)
def __unicode__(self):
return u'%s' % self.label
class Source(Base):
ref = models.CharField(_('reference'), max_length=16, unique=True, blank=False, null=False)
title = models.CharField(_('title'), max_length=200, blank=True, null=True)
text = models.TextField(_('text'), blank=True, null=True)
# primary or secondary source
PRIMARY_SOURCE = 'PS'
SECONDARY_SOURCE = 'SS'
SOURCE_CHOICES = (
(PRIMARY_SOURCE, 'Primary Source'),
(SECONDARY_SOURCE, 'Secondary Source'),
)
type = models.CharField(
max_length=2,
choices=SOURCE_CHOICES,
default=PRIMARY_SOURCE,
)
url = models.URLField(_('url'), null=True, blank=True)
attachment = models.FilePathField(_('attachment'), path=settings.ATTACHMENT_PATH,
max_length=2048, null=True, blank=True)
class Meta:
verbose_name = _('source')
verbose_name_plural = _('sources')
db_table = 'museum_source'
ordering = ('ref',)
def __unicode__(self):
return u'%s' % self.ref
def get_absolute_url(self):
return 'sources/%s' % self.ref
class Author(Base):
date_of_birth = models.CharField(_('date of birth'), max_length=200, blank=True, null=True)
date_of_death = models.CharField(_('date of death'), max_length=200, blank=True, null=True)
first_name = models.CharField(_('first name'), max_length=200, blank=True, null=True)
last_name = models.CharField(_('last name'), max_length=200, blank=True, null=True)
pseudonym = models.CharField(_('pseudonym'), max_length=200, blank=True, null=True)
remote_uuid = models.CharField(_('remote_uuid'), max_length=200, blank=True, null=True)
remote_href = models.CharField(_('remote_href'), max_length=200, blank=True, null=True)
class Meta:
verbose_name = _('author')
verbose_name_plural = _('authors')
db_table = 'museum_author'
ordering = ('first_name',)
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
def get_absolute_url(self):
return 'authors/%s/' % self.uuid
def get_full_name(self):
return u'%s %s' % (self.first_name, self.last_name)
class Attachment(Base):
"""Base Attachment Model"""
title = models.CharField(_('title'), max_length=200)
class Meta:
abstract = True
experiment_store = DataStorage(location=settings.EXPERIMENTS_ROOT, base_url=settings.EXPERIMENTS_URL)
class Experiment(Attachment):
# slug for url
slug = models.SlugField(_('slug'), allow_unicode=True)
cover = models.ImageField(_('cover'), upload_to=generate_data_path, storage=experiment_store, null=True,
blank=True)
# iframe url
url = models.URLField(_('url'), null=True, blank=True)
description = models.TextField(_('description'), blank=True, null=True)
class Meta:
verbose_name = _('experiment')
verbose_name_plural = _('experiments')
db_table = 'museum_experiment'
ordering = ('title',)
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return 'virtuallab/%s' % self.slug
exhibition_store = DataStorage(location=settings.EXHIBITIONS_ROOT, base_url=settings.EXHIBITIONS_URL)
class Exhibition(Attachment):
# slug for url
slug = models.SlugField(_('slug'), allow_unicode=True)
cover = models.ImageField(_('cover'), upload_to=generate_data_path, storage=exhibition_store, null=True,
blank=True)
# iframe url
url = models.URLField(_('url'), null=True, blank=True)
description = models.TextField(_('description'), blank=True, null=True)
class Meta:
verbose_name = _('exhibition')
verbose_name_plural = _('exhibition')
db_table = 'museum_exhibition'
ordering = ('title',)
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return 'exhibition/%s' % self.slug
class Link(Attachment):
url = models.URLField(_('url'))
| class Meta:
verbose_name = _('link')
verbose_name_plural = _('links')
db_table = 'museum_link'
ordering = ('-created',)
get_latest_by = 'created'
def __unicode__(self):
return u'%s' % self.title
museum_store = DataStorage(location=settings.DIAGRAMS_ROOT, base_url=settings.DIAGR | AMS_URL)
ACCURACY_CHOICES = (
(1, _("exact")),
(2, _("month")),
(3, _("year")),
(4, _("decennium")),
(5, _("century")),
(6, _("unknown"))
)
class Entry(Base):
"""
Entry model.
"""
author = models.ManyToManyField(Author, related_name='museums_entries',blank=True)
license = models.ManyToManyField(License, related_name='museums_entries', blank=True)
image = models.ImageField(_('image'), max_length=200, upload_to=generate_data_path, storage=museum_store, null=True, blank=True)
image_name = models.CharField(_('image name'), max_length=1024, null=True, blank=True)
portrayed_object_date = models.CharField(_('portrayed_object_date'), max_length=200, blank=True, null=True)
date = models.DateField(_('date'), null=True, blank=True)
date_accuracy = models.IntegerField(choices=ACCURACY_CHOICES, default=3, null=True, blank=True)
title = models.CharField(_('title'), max_length=200, blank=True, null=True)
subtitle = models.CharField(_('subtitle'), max_length=200, blank=True, null=True)
description = models.TextField(_('description'), blank=True, null=True)
tags = TaggableManager(blank=True, through=TaggedObject)
uploader = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, related_name=' |
bithinalangot/ecidadania-dev | src/core/spaces/views.py | Python | gpl-3.0 | 25,152 | 0.006958 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
"""
These are the views that control the spaces, meetings and documents.
"""
import datetime
import itertools
import hashlib
# Generic class-based views
from django.views.generic.base import TemplateView, RedirectView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.views.generic import FormView
# Decorators. the first is a wrapper to convert function-based decorators
# to method decorators that can be put in subclass methods.
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.contrib.admin.views.decorators import staff_member_required
# Response types
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404, redirect
# Some extras
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.template import RequestContext
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.db import connection
from django.core.mail import send_mail
# Function-based views
from django.views.generic.list_detail import object_list, object_detail
from django.views.generic.create_update import create_object, update_object
from django.views.generic.create_update import delete_object
# e-cidadania data models
from core.spaces.models import Space, Entity, Document, Event, Intent
from apps.ecidadania.news.models import Post
from core.spaces.forms import SpaceForm, DocForm, EventForm, \
EntityFormSet, UserRoleForm
from apps.ecidadania.proposals.models import Proposal, ProposalSet
from apps.ecidadania.staticpages.models import StaticPage
from apps.ecidadania.debate.models import Debate
from django.conf import settings
#thirdparty
from apps.thirdparty.userroles import roles
from apps.thirdparty.userroles.models import set_user_role
#
# RSS FEED
#
class SpaceFeed(Feed):
"""
Returns a space feed with the content of various applciations. In the future
this function must detect applications and returns their own feeds.
"""
def get_object(self, request, space_url):
current_space = get_object_or_404(Space, url=space_url)
return current_space
def title(self, obj):
return _("%s feed") % obj.name
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return _("All the recent activity in %s ") % obj.name
def items(self, obj):
results = itertools.chain(
Post.objects.all().filter(space=obj).order_by('-pub_date')[:10],
Proposal.objects.all().filter(space=obj).order_by('-pub_date')[:10],
Event.objects.all().filter(space=obj).order_by('-pub_date')[:10],
)
return sorted(results, key=lambda x: x.pub_date, reverse=True)
#
# INTENT VIEWS
#
@login_required
def add_intent(request, space_url):
"""
Returns a page where the logged in user can click on a "I want to
participate" button, which after sends an email to the administrator of
the space with a link to approve the user to use the space.
:attributes: space, intent, token
:rtype: Multiple entity objects.
:context: space_url, heading
"""
space = get_object_or_404(Space, url=space_url)
try:
intent = Intent.objects.get(user=request.user, space=space)
heading = _("Access has been already authorized")
except Intent.DoesNotExist:
token = hashlib.md5("%s%s%s" % (request.user, space,
datetime.datetime.now())).hexdigest()
intent = Intent(user=request.user, space=space, token=token)
intent.save()
subject = _("New participation request")
body = _("User {0} wants to participate in space {1}.\n \
Plese click on the link below to approve.\n {2}"\
.format(request.user.username, space.name, intent.get_approve_url()))
heading = _("Your request is being processed.")
send_mail(subject=subject, message=body,
from_email="noreply@ecidadania.org",
recipient_list=[space.author.email])
return render_to_response('space_intent.html', \
{'space_name': space.name, 'heading': heading}, \
context_instance=RequestContext(request))
class ValidateIntent(DetailView):
context_object_name = 'space_name'
template_name = 'spaces/validate_intent.html'
heading = _("The requested intent does not exist!")
def get_object(self):
space_object = get_object_or_404(Space, url=self.kwargs['space_url'])
if self.request.user.is_staff:
intent = get_object_or_404(Intent, token=self.kwargs['token'])
intent.user.profile.spaces.add(space_object)
self.heading = _("The user has been authorized to participate in space \"%s\"." % space_object.name)
messages.info(self.request, _("Authorization successful"))
self.template_name = 'validate_intent.html'
return space_object
def get_context_data(self, **kwargs):
context = super(ValidateIntent, self).get_context_data(**kwargs)
context['heading'] = self.heading
| return context
#
# User roles.
#
@user_passes_test(lambda u: u.is_superuser)
def add_role(request):
"""
This function will allow the site admin to assign roles to the users.
"""
userrole_form = UserRoleForm(request.POST or None)
if request.method == 'POST':
if userrole_form.is_valid():
userrole_uncommitted = userrole_form.save(commit=False)
set_user_role(userrole_uncommitted.user, userrole_uncommi | tted.name)
return redirect('/spaces/')
else:
return render_to_response('spaces/space_roles.html', {'form':userrole_form}, context_instance = RequestContext(request))
else:
return render_to_response('spaces/space_roles.html', {'form':userrole_form}, context_instance = RequestContext(request))
# SPACE VIEWS
#
# Please take in mind that the create_space view can't be replaced by a CBV
# (class-based view) since it manipulates two forms at the same time. Apparently
# that creates some trouble in the django API. See this ticket:
# https://code.djangoproject.com/ticket/16256
@permission_required('spaces.add_space')
def create_space(request):
"""
Returns a SpaceForm form to fill with data to create a new space. There
is an attached EntityFormset to save the entities related to the space. Only
site administrators are allowed to create spaces.
:attributes: - space_form: empty SpaceForm instance
- entity_forms: empty EntityFormSet
:rtype: Space object, multiple entity objects.
:context: form, entityformset
"""
space_form = SpaceForm(request.POST or None, request.FILES or None)
entity_forms = EntityFormSet(request.POST or None, request.FILES or None,
queryset=Entity.objects.none())
if request.user.is_staff:
if request.method == 'POST':
if space_form.is_valid() and en |
sayar/mcj2011 | sms/settings.py | Python | apache-2.0 | 5,993 | 0.002169 | # Django settings for mcjsms project.
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
ADMINS = []
MANAGERS = ADMINS
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'data/dev.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mcj_sms', # Or path to database file if using sqlite3.
'USER': 'mcj_sms', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value o | f None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installatio | n. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
if DEBUG:
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
else:
MEDIA_ROOT = '/home/ramisayar/public/mcj/mcj2011/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/site_media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
if DEBUG:
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
else:
STATIC_ROOT = '/home/ramisayar/public/mcj/mcj2011/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'global_static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3+pefpl6rsg&#smr*4$f(18nasrr0u)wp_4q=lkn50n-qz0rjt'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'sms.urls'
TEMPLATE_DIRS = (os.path.join(SITE_ROOT, 'templates'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.admin',
'django.contrib.localflavor',
'django_twilio',
'sms.twilio_sms'
)
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
camilonova/django | django/db/backends/mysql/introspection.py | Python | bsd-3-clause | 9,426 | 0.001485 | import warnings
from collections import namedtuple
from MySQLdb.constants import FIELD_TYPE
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra',))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: ' | CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'Text | Field',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT column_name, data_type, character_maximum_length, numeric_precision,
numeric_scale, extra, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
col_name = force_text(line[0])
fields.append(
FieldInfo(*(
(col_name,) +
line[1:3] +
(
to_int(field_info[col_name].max_len) or line[3],
to_int(field_info[col_name].num_prec) or line[4],
to_int(field_info[col_name].num_scale) or line[5],
line[6],
field_info[col_name].column_default,
field_info[col_name].extra,
)
))
)
return fields
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = DATABASE() AND
kc.table_name = %s
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = DATABASE() AND
c.table_name = %s
"""
cursor.execute(type |
carthach/essentia | test/src/unittests/spectral/test_triangularbarkbands.py | Python | agpl-3.0 | 5,769 | 0.017334 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import numpy as np
class TestTriangularBarkBands(TestCase):
def InitTriangularBarkBands(self, nbands):
return TriangularBarkBands(inputSize=1024,
numberBands=nbands,
lowFrequencyBound=0,
highFrequencyBound=44100*.5)
def testRegression(self):
spectrum = [1]*1024
mbands = self.InitTriangularBarkBands(24)(spectrum)
self.assertEqual(len(mbands), 24 )
self.assert_(not any(numpy.isnan(mbands)))
self.assert_(not any(numpy.isinf(mbands)))
self.assertAlmostEqualVector(mbands, [1]*24, 1e-5)
mbands = self.InitTriangularBarkBands(128)(spectrum)
self.assertEqual(len(mbands), 128 )
self.assert_(not any(numpy.isnan(mbands)))
self.assert_(not any(numpy.isinf(mbands)))
self.assertAlmostEqualVector(mbands, [1]*128, 1e-5)
def testRegressionRastaMode(self):
# Test the BFCC extractor compared to Rastamat specifications
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/vignesh.wav'),
sampleRate = 44100)()*2**15
#Expected values generated in Rastamat/MATLAB
expected = [ 20.28919141, 23.80362425, 26.69797305, 27.10461133, 26.64508125,
26.7758322, 27.1787682, 27.10699792, 26.29040982, 25.04243486,
24.24791966, 24.17377063, 24.61976518, 25.29554584, 24.87617598,
23.79018513, 23.04026225, 23.20707811, 23.09716777, 23.33050168,
22.8201923, 21.49477903, 21.63639095, 22.12937291, 22.01981441,
21.70728156]
frameSize = 1102
hopSize = 441
fftsize = 2048
paddingSize = fftsize - frameSize
spectrumSize = int(fftsize/2) + 1
w = Windowing(type = 'hann',
size = frameSize,
zeroPadding = paddingSize,
normalized = False,
zeroPhase = False)
spectrum = Spectrum(size = fftsize)
mbands = TriangularBarkBands(inputSize= spectrumSize,
type = 'power',
highFrequencyBound = 8000,
lowFrequencyBound = 0,
numberBands = 26,
weighting = 'linear',
normalize = 'unit_max')
pool = Pool()
for frame in FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize, startFromZero = True, validFrameThresholdRatio = 1):
pool.add('TriangularBarkBands', mbands(spectrum(w(frame))))
np.savetxt("out.csv", np.mean(np.log(pool['TriangularBarkBands']),0), delimiter=',')
self.assertAlmostEqualVector( np.mean(np.log(pool['TriangularBarkBands']),0), expected,1e-2)
def testZero(self):
# Inputting zeros should return zero. Try with different sizes
size = 1024
while (size >= 256 ):
self.assertEqualVector(TriangularBarkBands()(zeros(size)), zeros(24))
size /= 2
def testInvalidInput(self):
# mel bands should fail for a spectrum with less than 2 bins
self.assertComputeFails(TriangularBarkBands(), [])
self.assertComputeFails(TriangularBarkBands(), [0.5])
def testInvalidParam(self):
self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 0 })
self.assertConfigureFails(TriangularBarkBands(), { | 'numberBands': 1 })
self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': -100 })
self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': 100,
'highFrequencyBound': 50 })
self.assertConfigureFails(TriangularBarkBands(), { 'highFrequencyBound': 30000,
'sampleRate': 22050} | )
def testWrongInputSize(self):
# This test makes sure that even though the inputSize given at
# configure time does not match the input spectrum, the algorithm does
# not crash and correctly resizes internal structures to avoid errors.
spec = [.1,.4,.5,.2,.1,.01,.04]*100
np.savetxt("out.csv", TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), delimiter=',')
self.assertAlmostEqualVector(
TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec),
[0.0460643246769905]*24,
1e-6)
"""
def testNotEnoughSpectrumBins(self):
self.assertConfigureFails(TriangularBarkBands(), {'numberBands': 256,
'inputSize': 1025})
"""
suite = allTests(TestTriangularBarkBands)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
mkuiack/tkp | tkp/sourcefinder/image.py | Python | bsd-2-clause | 37,453 | 0.001762 | """
Some generic utility routines for number handling and
calculating (specific) variances
"""
import logging
import itertools
import numpy
from tkp.utility import containers
from tkp.utility.memoize import Memoize
from tkp.sourcefinder import utils
from tkp.sourcefinder import stats
from tkp.sourcefinder import extract
try:
import ndimage
except ImportError:
from scipy import ndimage
logger = logging.getLogger(__name__)
#
# Hard-coded configuration parameters; not user settable.
#
INTERPOLATE_ORDER = 1 # Spline order for grid interpolation
MEDIAN_FILTER = 0 # If non-zero, apply a median filter of size
# MEDIAN_FILTER to the background and RMS grids prior
# to interpolating.
MF_THRESHOLD = 0 # If MEDIAN_FILTER is non-zero, only use the filtered
# grid when the (absolute) difference between the raw
# and filtered grids is larger than MF_THRESHOLD.
DEBLEND_MINCONT = 0.005 # Min. fraction of island flux in deblended subisland
STRUCTURING_ELEMENT = [[0,1,0], [1,1,1], [0,1,0]] # Island connectiivty
class ImageData(object):
"""Encapsulates an image in terms of a numpy array + meta/headerdata.
This is your primary contact point for interaction with images: it icludes
facilities for source extraction and measurement, etc.
"""
def __init__(self, data, beam, wcs, margin=0, radius=0, back_size_x=32,
back_size_y=32, residuals=True
):
"""Sets up an ImageData object.
*Args:*
- data (2D numpy.ndarray): actual image data
- wcs (utility.coordinates.wcs): world coordinate system
specification
- beam (3-tuple): beam shape specification as
(semimajor, semiminor, theta)
"""
# Do data, wcs and beam need deepcopy?
# Probably not (memory overhead, in particular for data),
# but then the user shouldn't change them outside ImageData in the
# mean time
self.rawdata = data # a 2D numpy array
self.wcs = wcs # a utility.coordinates.wcs instance
self.beam = beam # tuple of (semimaj, semimin, theta)
self.clip = {}
self.labels = {}
self.freq_low = 1
self.freq_high = 1
self.back_size_x = back_size_x
self.back_size_y= back_size_y
self.margin = margin
self.radius = radius
self.residuals = residuals
###########################################################################
# #
# Properties and attributes. #
# | #
# Properties are attributes managed by methods; rather than calling the #
# method directly, the attribute automatically invokes it. We can use #
# this to do cunning transparent caching ("memoizing") etc; see the | #
# Memoize class. #
# #
# clearcache() clears all the memoized data, which can get quite large. #
# It may be wise to call this, for example, in an exception handler #
# dealing with MemoryErrors. #
# #
###########################################################################
@Memoize
def _grids(self):
"""Gridded RMS and background data for interpolating"""
return self.__grids()
grids = property(fget=_grids, fdel=_grids.delete)
@Memoize
def _backmap(self):
"""Background map"""
if not hasattr(self, "_user_backmap"):
return self._interpolate(self.grids['bg'])
else:
return self._user_backmap
def _set_backmap(self, bgmap):
self._user_backmap = bgmap
del(self.backmap)
del(self.data_bgsubbed)
backmap = property(fget=_backmap, fdel=_backmap.delete, fset=_set_backmap)
@Memoize
def _get_rm(self):
"""RMS map"""
if not hasattr(self, "_user_noisemap"):
return self._interpolate(self.grids['rms'], roundup=True)
else:
return self._user_noisemap
def _set_rm(self, noisemap):
self._user_noisemap = noisemap
del(self.rmsmap)
rmsmap = property(fget=_get_rm, fdel=_get_rm.delete, fset=_set_rm)
@Memoize
def _get_data(self):
"""Masked image data"""
# We will ignore all the data which is masked for the rest of the
# sourcefinding process. We build up the mask by stacking ("or-ing
# together") a number of different effects:
#
# * A margin from the edge of the image;
# * Any data outside a given radius from the centre of the image;
# * Data which is "obviously" bad (equal to 0 or NaN).
mask = numpy.zeros((self.xdim, self.ydim))
if self.margin:
margin_mask = numpy.ones((self.xdim, self.ydim))
margin_mask[self.margin:-self.margin, self.margin:-self.margin] = 0
mask = numpy.logical_or(mask, margin_mask)
if self.radius:
radius_mask = utils.circular_mask(self.xdim, self.ydim, self.radius)
mask = numpy.logical_or(mask, radius_mask)
mask = numpy.logical_or(mask, numpy.where(self.rawdata == 0, 1, 0))
mask = numpy.logical_or(mask, numpy.isnan(self.rawdata))
return numpy.ma.array(self.rawdata, mask=mask)
data = property(fget=_get_data, fdel=_get_data.delete)
@Memoize
def _get_data_bgsubbed(self):
"""Background subtracted masked image data"""
return self.data - self.backmap
data_bgsubbed = property(fget=_get_data_bgsubbed,
fdel=_get_data_bgsubbed.delete)
@property
def xdim(self):
"""X pixel dimension of (unmasked) data"""
return self.rawdata.shape[0]
@property
def ydim(self):
"""Y pixel dimension of (unmasked) data"""
return self.rawdata.shape[1]
@property
def pixmax(self):
"""Maximum pixel value (pre-background subtraction)"""
return self.data.max()
@property
def pixmin(self):
"""Minimum pixel value (pre-background subtraction)"""
return self.data.min()
def clearcache(self):
"""Zap any calculated data stored in this object.
Clear the background and rms maps, labels, clip, and any locally held
data. All of these can be reconstructed from the data accessor.
Note that this *must* be run to pick up any new settings.
"""
self.labels.clear()
self.clip.clear()
del(self.backmap)
del(self.rmsmap)
del(self.data)
del(self.data_bgsubbed)
del(self.grids)
if hasattr(self, 'residuals_from_gauss_fitting'):
del(self.residuals_from_gauss_fitting)
if hasattr(self, 'residuals_from_deblending'):
del(self.residuals_from_deblending)
###########################################################################
# #
# General purpose image handling. #
# #
# Routines for saving and trimming data, and calculating background/RMS #
# maps (in conjuntion with the properties above). #
# #
###########################################################################
# Private "support" methods
def __grids(self):
"""Calculate background and RMS grids of this image.
These grids can be interpolated up to make maps of the original image
dimensions: see _interpolate().
This is called automatically when ImageData.backmap,
Image |
LoLab-VU/pysb | pysb/examples/run_tutorial_a.py | Python | bsd-2-clause | 244 | 0 | from __future__ import print_function
from pysb.simulator import ScipyOdeSimulator
from tutorial_a import model
t = [0, | 10, 20, 30, 40, 50, 60]
simulator = ScipyOdeS | imulator(model, tspan=t)
simresult = simulator.run()
print(simresult.species)
|
Schamnad/cclib | src/cclib/parser/daltonparser.py | Python | bsd-3-clause | 52,550 | 0.002873 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for DALTON output files"""
from __future__ import print_function
import numpy
from . import logfileparser
from . import utils
class DALTON(logfileparser.Logfile):
"""A DALTON log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(DALTON, self).__init__(logname="DALTON", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "DALTON log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'DALTON("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by DALTON."""
# It appears that DALTON is using the correct labels.
return label
def before_parsing(self):
# Used to decide whether to wipe the atomcoords clean.
self.firststdorient = True
# Use to track which section/program output we are parsing,
# since some programs print out the same headers, which we
# would like to use as triggers.
self.section = None
# If there is no symmetry, assume this.
self.symlabels = ['Ag']
# Is the basis set from a single library file? This is true
# when the first line is BASIS, false for INTGRL/ATOMBASIS.
self.basislibrary = True
def parse_geometry(self, lines):
"""Parse DALTON geometry lines into an atomcoords array."""
coords = []
for lin in lines:
# Without symmetry there are simply four columns, and with symmetry
# an extra label is printed after the atom type.
cols = lin.split()
if cols[1][0] == "_":
xyz = cols[2:]
else:
xyz = cols[1:]
# The assumption is that DALTON always print in atomic units.
xyz = [utils.convertor(float(x), 'bohr', 'Angstrom') for x in xyz]
coords.append(xyz)
return coords
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# extract the version number first
if line[4:30] == "This is output from DALTON":
if line.split()[5] == "release" or line.split()[5] == "(Release":
self.metadata["package_version"] = line.split()[6][6:]
else:
self.metadata["package_version"] = line.split()[5]
# Is the basis set from a single library file, or is it
# manually specified? See before_parsing().
if line[:6] == 'INTGRL'or line[:9] == 'ATOMBASIS':
self.basislibrary = False
# This section at the start of geometry optimization jobs gives us information
# about optimization targets (geotargets) and possibly other things as well.
# Notice how the number of criteria required to converge is set to 2 here,
# but this parameter can (probably) be tweaked in the input.
#
# Chosen parameters for *OPTIMI :
# -------------------------------
#
# Default 1st order method will be used: BFGS update.
# Optimization will be performed in redundant internal coordinates (by default).
# Model Hessian will be used as initial Hessian.
# The model Hessian parameters of Roland Lindh will be used.
#
#
# Trust region method will be used to control step (default).
#
# Convergence threshold for gradient set to : 1.00D-04
# Convergence threshold for energy set to : 1.00D-06
# Convergence threshold for step set to : 1.00D-04
# Number of convergence criteria set to : 2
#
if line.strip()[:25] == "Convergence threshold for":
if not hasattr(self, 'geotargets'):
self.geotargets = []
self.geotargets_names = []
target = self.float(line.split()[-1])
name = line.strip()[25:].split()[0]
self.geotargets.append(target)
self.geotargets_names.append(name)
# This is probably the first place where atomic symmetry labels are printed,
# somewhere afer the SYMGRP point group information section. We need to know
# which atom is in which symmetry, since this influences how some things are
# print later on. We can also get some generic attributes along the way.
#
# Isotopic Masses
# ---------------
#
# C _1 12.000000
# C _2 12.000000
# C _1 12.000000
# C _2 12.000000
# ...
#
# Note that when there is no symmetry there are only two columns here.
#
# It is also a good idea to keep in mind that DALTON, with symmetry on, operates
# in a specific point group, so sy | mmetry atoms have no internal representation.
# Therefore only atoms marked as "_1" or "#1" in other places are actually
# represented in the model. The symmetry atoms (h | igher symmetry indices) are
# generated on the fly when writing the output. We will save the symmetry indices
# here for later use.
#
# Additional note: the symmetry labels are printed only for atoms that have
# symmetry images... so assume "_1" if a label is missing. For example, there will
# be no label for atoms on an axes, such as the oxygen in water in C2v:
#
# O 15.994915
# H _1 1.007825
# H _2 1.007825
#
if line.strip() == "Isotopic Masses":
self.skip_lines(inputfile, ['d', 'b'])
# Since some symmetry labels may be missing, read in all lines first.
lines = []
line = next(inputfile)
while line.strip():
lines.append(line)
line = next(inputfile)
# Split lines into columsn and dd any missing symmetry labels, if needed.
lines = [l.split() for l in lines]
if any([len(l) == 3 for l in lines]):
for il, l in enumerate(lines):
if len(l) == 2:
lines[il] = [l[0], "_1", l[1]]
atomnos = []
symmetry_atoms = []
atommasses = []
for cols in lines:
cols0 = ''.join([i for i in cols[0] if not i.isdigit()]) #remove numbers
atomnos.append(self.table.number[cols0])
if len(cols) == 3:
symmetry_atoms.append(int(cols[1][1]))
atommasses.append(float(cols[2]))
else:
atommasses.append(float(cols[1]))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atommasses', atommasses)
self.set_attribute('natom', len(atomnos))
self.set_attribute('natom', len(atommasses))
# Save this for later if there were any labels.
self.symmetry_atoms = symmetry_atoms or None
# This section is close to the beginning of the file, and can be used
# to parse natom, nbasis and atomnos. We also construct atombasis here,
# although that is symmetry-dependent (see inline comments). Note that
# DALTON operates on the idea of atom type, which are not necessarily
# unique element-wise.
#
# Atoms and basis sets
# --------------------
#
# Number of atom types : 6
# Total number of atoms: 20
#
# Basis set used is "STO-3G" from th |
cilcoberlin/panoptes | panoptes/tracking/admin.py | Python | bsd-3-clause | 277 | 0.01444 |
from django.contrib import | admin
from panoptes.tracking.models import AccountFilter
class AccountFilterAdmin(admin.ModelAdmin):
list_display = ('location', 'include_users', 'exclude_users')
ordering | = ('location',)
admin.site.register(AccountFilter, AccountFilterAdmin)
|
absoludity/django-cumulus | cumulus/management/commands/container_create.py | Python | bsd-3-clause | 2,713 | 0.00258 | import optparse
import pyrax
import swiftclient
from django.core.management.base import BaseCommand, CommandError
from cumulus.settings import CUMULUS
def cdn_enabled_for_container(container):
"""pyrax.cf_wrapper.CFClient assumes cdn_connection.
Currently the pyrax swift client wrapper assumes that if
you're using pyrax, you're using the CDN support that's
only available with the rackspace openstack.
This can be removed once t | he following pull-request lands
(or is otherwise resolved):
https://github.com/rackspace/pyrax/pull/254
"""
try:
return container.cdn_enabled
except AttributeError:
return False
class Command(BaseCommand):
help = "Create a container."
args = "[container_name]"
option_list = BaseCommand.option_list + (
optparse.make_option("-p", "--private", action="store_true", def | ault=False,
dest="private", help="Make a private container."),)
def connect(self):
"""
Connects using the swiftclient api.
"""
self.conn = swiftclient.Connection(authurl=CUMULUS["AUTH_URL"],
user=CUMULUS["USERNAME"],
key=CUMULUS["API_KEY"],
snet=CUMULUS["SERVICENET"],
auth_version=CUMULUS["AUTH_VERSION"],
tenant_name=CUMULUS["AUTH_TENANT_NAME"])
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Pass one and only one [container_name] as an argument")
self.connect()
container_name = args[0]
print("Creating container: {0}".format(container_name))
self.conn.put_container(container_name)
if not options.get("private"):
print("Publish container: {0}".format(container_name))
headers = {"X-Container-Read": ".r:*"}
self.conn.post_container(container_name, headers=headers)
if CUMULUS["USE_PYRAX"]:
if CUMULUS["PYRAX_IDENTITY_TYPE"]:
pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"])
pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"])
public = not CUMULUS["SERVICENET"]
connection = pyrax.connect_to_cloudfiles(region=CUMULUS["REGION"],
public=public)
container = connection.get_container(container_name)
if cdn_enabled_for_container(container):
container.make_public(ttl=CUMULUS["TTL"])
|
futurice/schedule | schedulesite/middleware.py | Python | bsd-3-clause | 659 | 0.013657 | from django.contrib.auth.middleware import RemoteUserMiddleware
from django.conf import settings
import os
#This middleware adds header REMOTE_USER with current REMOTE_USER from settings to every request.
#This is required when running app with uwsgi locally (with runserver this is unneces | sary)
#In production, when FAKE_LOGIN=False, the REMOTE_USER header should be set by sso
class SetUserMiddleware():
def process_request(self, request):
if settings.FAKE_LOGIN:
req | uest.META['REMOTE_USER'] = os.getenv('REMOTE_USER')
class CustomHeaderMiddleware(RemoteUserMiddleware):
header = os.getenv('REMOTE_USER_HEADER', 'REMOTE_USER') |
xingjian-f/Leetcode-solution | 354. Russian Doll Envelopes.py | Python | mit | 123 | 0.065041 | cla | ss Solution(object):
def maxEnvelopes(self, envelopes) | :
"""
:type envelopes: List[List[int]]
:rtype: int
"""
|
jiadaizhao/LeetCode | 0101-0200/0116-Populating Next Right Pointers in Each Node/0116-Populating Next Right Pointers in Each Node.py | Python | mit | 612 | 0.003268 | # Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.righ | t = right
self.next = next
class Solution:
def connect(self, root: 'Node') -> 'Node':
curr = root
while curr:
p = curr
while p:
| if p.left:
p.left.next = p.right
if p.next:
p.right.next = p.next.left
p = p.next
curr = curr.left
return root
|
farr/LIGOHamlet | test.py | Python | gpl-3.0 | 685 | 0.008759 | import numpy as np
import numpy.random as nr
def draw_bg(size=(1,), snr_min=5.5, snr_std=2):
bgs = nr.normal(loc=snr_min, scale=snr_std, size=size)
sel = bgs < snr_min
while np.count_nonzero(sel) > 0:
bgs[ | sel] = nr.normal(loc=snr_min, scale=snr_std, size=np.count_nonzero(sel))
sel = bgs < snr_min
return bgs
def draw_fg(size=(1,), snr_min=5.5):
us = nr.uniform(size=size)
return snr_min/(1 - us)**(1.0/3.0)
def data_set(ncoinc, ffore, nbgs):
bgs = [draw_bg(size=nb) for nb in nbgs]
nf = int(round(ncoinc*ffore))
fgs = [np.concatenate((draw_bg(ncoinc-nf), draw_fg(nf))) for nb in nbgs]
return np.a | rray(fgs), bgs
|
simontakite/sysadmin | pythonscripts/learningPython/lambdas1.py | Python | gpl-2.0 | 587 | 0.003407 | L = [lambda x: x ** 2, # Inline function definition
lambda x: x ** 3,
lambda | x: x ** 4] # A list of 3 callable functions
for f in L:
print(f(2)) # Prints 4, 8, 16
print(L[0](3)) # Prints 9
def f1(x): return x ** 2
def f2(x): return x ** 3 # Define named functions
def | f3(x): return x ** 4
L = [f1, f2, f3] # Reference by name
for f in L:
print(f(2)) # Prints 4, 8, 16
print(L[0](3)) # Prints 9
|
longmen21/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/split.py | Python | agpl-3.0 | 158,905 | 0.003656 | """
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an Obj | ectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick | determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from contracts import contract, new_contract
from importlib import import_module
from mongodb_proxy import autoretry_read
from path import Path as path
from pytz import UTC
from bson.objectid import ObjectId
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.course_module import CourseSummary
from xmodule.errortracker import null_error_tracker
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import (
BlockUsageLocator, DefinitionLocator, CourseLocator, LibraryLocator, VersionTree, LocalId,
)
from ccx_keys.locator import CCXLocator, CCXBlockUsageLocator
from xmodule.modulestore.exceptions import InsufficientSpecificationError, VersionConflictError, DuplicateItemError, \
DuplicateCourseError, MultipleCourseBlocksFound
from xmodule.modulestore import (
inheritance, ModuleStoreWriteBase, ModuleStoreEnum,
BulkOpsRecord, BulkOperationsMixin, SortedAssetList, BlockData
)
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, DuplicateKeyError
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.error_module import ErrorDescriptor
from collections import defaultdict
from types import NoneType
from xmodule.assetstore import AssetMetadata
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return self.index.get('versions', {}).keys()
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractWwwWhitesharkTk.py | Python | bsd-3-clause | 546 | 0.034799 |
def extractWwwWhitesharkTk(item):
'''
Parser for 'www.whiteshark.tk'
'''
vol, chp, | frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
|
return False
|
claudep/pootle | pootle/core/views/translate.py | Python | gpl-3.0 | 3,056 | 0.000327 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
from django.conf import settings
from pootle.core.url_helpers import get_previous_url
from pootle_app.models.permissions import check_permission
from pootle_misc.checks import (
CATEGORY_IDS, check_names,
get_qualitycheck_schema, get_qualitychecks)
from pootle_misc.forms import make_search_form
from pootle_store.constants import AMAGAMA_SOURCE_LANGUAGES
from .base import PootleDetailView
class PootleTranslateView(PootleDetailView):
template_name = "editor/main.html"
page_name | = "translate"
view_name = ""
@property
def check_data(self):
return self.object.data_tool.get_checks()
@property
| def checks(self):
check_data = self.check_data
checks = get_qualitychecks()
schema = {sc["code"]: sc for sc in get_qualitycheck_schema()}
_checks = {}
for check, checkid in checks.items():
if check not in check_data:
continue
_checkid = schema[checkid]["name"]
_checks[_checkid] = _checks.get(
_checkid, dict(checks=[], title=schema[checkid]["title"]))
_checks[_checkid]["checks"].append(
dict(
code=check,
title=check_names[check],
count=check_data[check]))
return OrderedDict(
(k, _checks[k])
for k in CATEGORY_IDS.keys()
if _checks.get(k))
@property
def ctx_path(self):
return self.pootle_path
@property
def vfolder_pk(self):
return ""
@property
def display_vfolder_priority(self):
return False
@property
def chunk_size(self):
return self.request.user.get_unit_rows()
def get_context_data(self, *args, **kwargs):
ctx = super(PootleTranslateView, self).get_context_data(*args, **kwargs)
ctx.update(
{'page': self.page_name,
'chunk_size': self.chunk_size,
'current_vfolder_pk': self.vfolder_pk,
'ctx_path': self.ctx_path,
'display_priority': self.display_vfolder_priority,
'checks': self.checks,
'cantranslate': check_permission("translate", self.request),
'cansuggest': check_permission("suggest", self.request),
'canreview': check_permission("review", self.request),
'search_form': make_search_form(request=self.request),
'previous_url': get_previous_url(self.request),
'POOTLE_MT_BACKENDS': settings.POOTLE_MT_BACKENDS,
'AMAGAMA_URL': settings.AMAGAMA_URL,
'AMAGAMA_SOURCE_LANGUAGES': AMAGAMA_SOURCE_LANGUAGES,
'editor_extends': self.template_extends})
return ctx
|
zestrada/nova-cs498cc | nova/virt/hyperv/volumeops.py | Python | apache-2.0 | 9,291 | 0.000969 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import time
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('volume_attach_retry_count',
default=10,
help='The number of times to retry to attach a volume'),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
help='Interval between volume attachment attempts, in seconds'),
cfg.BoolOpt('force_volumeutils_v1',
default=False,
help='Force volumeutils v1'),
]
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts, 'hyperv')
CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self):
self._hostutils = hostutils.HostUtils()
self._vmutils = vmutils.VMUtils()
self._volutils = self._get_volume_utils()
self._initiator = None
self._default_root_device = 'vda'
def _get_volume_utils(self):
if(not CONF.hyperv.force_volumeutils_v1 and
self._hostutils.get_windows_version() >= 6.2):
return volumeutilsv2.VolumeUtilsV2()
else:
return volumeutils.VolumeUtils()
def ebs_root_in_block_devices(self, block_device_info):
return self._volutils.volume_in_mapping(self._default_root_device,
block_device_info)
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
if ebs_root:
self.attach_volume(mapping[0]['connection_info'],
instance_name, True)
mapping = mapping[1:]
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def login_storage_targets(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self._login_storage_target(vol['connection_info'])
def _login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug(_("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s") % locals())
else:
LOG.debug(_("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s") % locals())
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal)
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""
Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
"""
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s")
% locals())
try:
self._login_storage_target(connection_info)
data = connection_i | nfo['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
if ebs_root:
#Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
#Attaching | to the first slot
slot = 0
else:
#Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
mounted_disk_path)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def _get_free_controller_slot(self, scsi_controller_path):
#Slots starts from 0, so the lenght of the disks gives us the free slot
return self._vmutils.get_attached_disks_count(scsi_controller_path)
def detach_volumes(self, block_device_info, instance_name):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
def logout_storage_target(self, target_iqn):
LOG.debug(_("Logging off storage target %(target_iqn)s") % locals())
self._volutils.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s") % locals())
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
LOG.debug(_("Detaching physical disk from instance: %s"),
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_ip,
'initiator': self._initiator,
}
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
device_number = self._volutils.get_device_number_for_target(target_iqn,
target_lun)
if device_number is None:
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.d |
theislab/anndata | anndata/compat/__init__.py | Python | bsd-3-clause | 10,424 | 0.001536 | from copy import deepcopy
from functools import reduce, singledispatch, wraps
from codecs import decode
from inspect import signature, Parameter
from typing import Any, Collection, Union, Mapping, MutableMapping, Optional
from warnings import warn
import h5py
from scipy.sparse import spmatrix
import numpy as np
import pandas as pd
from ._overloaded_dict import _overloaded_uns, OverloadedDict
from .._core.index import _subset
class Empty:
pass
H5Group = Union[h5py.Group, h5py.File]
H5Array = h5py.Dataset
# try importing zarr, dask, and zappy
from packaging import version
try:
from zarr.core import Array as ZarrArray
from zarr.hierarchy import Group as ZarrGroup
except ImportError:
class ZarrArray:
@staticmethod
def __repr__():
return "mock zarr.core.Array"
class ZarrGroup:
@staticmethod
def __repr__():
return "mock zarr.core.Group"
try:
from zappy.base import ZappyArray
except ImportError:
class ZappyArray:
@staticmethod
def __repr__():
return "mock zappy.base.ZappyArray"
try:
from dask.array import Array as DaskArray
except ImportError:
class DaskArray:
@staticmethod
def __repr__():
return "mock dask.array.core.Array"
try:
from typing import Literal
except ImportError:
try:
from typing_extensions import Literal
except ImportError:
class LiteralMeta(type):
def __getitem__(cls, values):
if not isinstance(values, tuple):
values = (values,)
return type("Literal_", (Literal,), dict(__args__=values))
class Literal(metaclass=LiteralMeta):
pass
@singledispatch
def _read_attr(attrs: Mapping, name: str, default: Optional[Any] = Empty):
if default is Empty:
return attrs[name]
else:
return attrs.get(name, default=default)
@_read_attr.register(h5py.AttributeManager)
def _read_attr_hdf5(
attrs: h5py.AttributeManager, name: str, default: Optional[Any] = Empty
):
"""
Read an HDF5 attribute and perform all necessary conversions.
At the moment, this only implements conversions for string attributes, other types
are passed through. String conversion is needed compatibility with other languages.
For example Julia's HDF5.jl writes string attributes as fixed-size strings, which
are read as bytes by h5py.
"""
if name not in attrs and default is not Empty:
return default
attr = attrs[name]
attr_id = attrs.get_id(name)
dtype = h5py.check_string_dtype(attr_id.dtype)
if dtype is None:
return attr
else:
if dtype.length is None: # variable-length string, no problem
return attr
elif len(attr_id.shape) == 0: # Python bytestring
return attr.decode("utf-8")
else: # NumPy array
return [decode(s, "utf-8") for s in attr]
def _from_fixed_length_strings(value):
"""\
Convert from fixed length strings to unicode.
For backwards compatability with older h5ad and zarr files.
"""
new_dtype = []
for dt in value.dtype.descr:
dt_list = list(dt)
dt_type = dt[1]
# could probably match better
is_annotated = isinstance(dt_type, tuple)
if is_annotated:
dt_type = dt_type[0]
# Fixing issue introduced with h5py v2.10.0, see:
# https://github.com/h5py/h5py/issues/1307
if issubclass(np.dtype(dt_type).type, np.string_):
dt_list[1] = f"U{int(dt_type[2:])}"
elif is_annotated or np.issubdtype(np.dtype(dt_type), np.str_):
dt_list[1] = "O" # Assumption that it’s a vlen str
new_dtype.append(tuple(dt_list))
return value.astype(new_dtype)
def _decode_structured_array(
arr: np.ndarray, dtype: Optional[np.dtype] = None, copy: bool = False
) -> np.ndarray:
"""
h5py 3.0 now reads all strings as bytes. There is a helper method which can convert these to strings,
but there isn't anything for fields of structured dtypes.
Params
------
arr
An array with structured dtype
dtype
dtype of the array. This is checked for h5py string data types.
Passing this is allowed for cases where array may have been processed by another function before hand.
"""
if copy:
arr = arr.copy()
if dtype is None:
dtype = arr.dtype
# codecs.decode is 2x slower than this lambda, go figure
decode = np.frompyfunc(lambda x: x.decode("utf-8"), 1, 1)
for k, (dt, _) in dtype.fields.items():
check = h5py.check_string_dtype(dt)
if check is not None and check.encoding == "utf-8":
decode(arr[k], out=arr[k])
return arr
def _to_fixed_length_strings(value: np.ndarray) -> np.ndarray:
"""\
Convert variable length strings to fixed length.
Currently a workaround for
https://github.com/zarr-developers/zarr-python/pull/422
"""
new_dtype = []
for dt_name, (dt_type, dt_offset) in value.dtype.fields.items():
if dt_type.kind == "O":
# Assuming the objects are str
size = max(len(x.encode()) for x in value.getfield("O", dt_offset))
new_dtype.append((dt_name, ("U", size)))
else:
new_dtype.append((dt_name, dt_type))
return value.astype(new_dtype)
#############################
# Dealing with uns
#############################
def _clean_uns(d: Mapping[str, MutableMapping[str, Union[pd.Series, str, int]]]):
"""
Compat function for when categorical keys were stored in uns.
This used to be buggy because when storing categorical columns in obs and var with
the same column name, only one `<colname>_categories` is retained.
"""
k_to_delete = set()
for cats_name, cats in d.get("uns", {}).items():
if not cats_name.endswith("_categories"):
continue
name = cats_name.replace("_categories", "")
# fix categories with a single category
if isinstance(cats, (str, int)):
cats = [cats]
for ann in ["obs", "var"]:
if name | not in d[ann]:
continue
codes: np.ndarray = d[ann][name].values
# hack to maybe find the axis the categories were for
if not np.all(codes < len(cats)):
continue
d[ann][name] = pd.Categorical.from_codes(codes, cats)
k_to_delete.add(cats_name)
for cats_name | in k_to_delete:
del d["uns"][cats_name]
def _move_adj_mtx(d):
"""
Read-time fix for moving adjacency matrices from uns to obsp
"""
n = d.get("uns", {}).get("neighbors", {})
obsp = d.setdefault("obsp", {})
for k in ("distances", "connectivities"):
if (
(k in n)
and isinstance(n[k], (spmatrix, np.ndarray))
and len(n[k].shape) == 2
):
warn(
f"Moving element from .uns['neighbors']['{k}'] to .obsp['{k}'].\n\n"
"This is where adjacency matrices should go now.",
FutureWarning,
)
obsp[k] = n.pop(k)
def _find_sparse_matrices(d: Mapping, n: int, keys: tuple, paths: list):
"""Find paths to sparse matrices with shape (n, n)."""
for k, v in d.items():
if isinstance(v, Mapping):
_find_sparse_matrices(v, n, (*keys, k), paths)
elif isinstance(v, spmatrix) and v.shape == (n, n):
paths.append((*keys, k))
return paths
def _slice_uns_sparse_matrices(uns: MutableMapping, oidx: "Index1d", orig_n_obs: int):
"""slice sparse spatrices of n_obs × n_obs in self.uns"""
if isinstance(oidx, slice) and len(range(*oidx.indices(orig_n_obs))) == orig_n_obs:
return uns # slice of entire dimension is a no-op
paths = _find_sparse_matrices(uns, orig_n_obs, (), [])
if not paths:
return uns
uns = deepcopy(uns)
for path in paths:
str_path = "".join(f"['{key}']" for key in path)
warn(
f"During AnnData slicing, found matrix at .uns{str_path} that |
bharling/django-gevent-socketio | django_socketio_tests/views.py | Python | bsd-3-clause | 127 | 0.015748 | f | rom django.s | hortcuts import render
# Create your views here.
def home(request):
return render(request, 'home.djhtml', {}) |
bpiwowar/lyx | lib/scripts/clean_dvi.py | Python | gpl-2.0 | 3,211 | 0.004983 | '''
file clean_dvi.py
This file is part of LyX, the document processor.
Licence details can be found in the file COPYING
or at http://www.lyx.org/about/licence.php
author Angus Leeming
Full author contact details are available in the file CREDITS
or at http://www.lyx.org/about/credits.php
Usage:
python clean_dvi.py infile.dvi outfile.dvi
c | lean_dvi modifies the input .dvi file so that
dvips and yap (a dvi viewer on Windows) can find
any embedded PostScript files whose names are protected
with "-quotes.
It works by:
1 translating the machine readable .dvi file to human
readable .dtl form,
2 manipulating any references to external files
3 translating the .dtl file back to .dvi format.
It requires dv2dt and dt2dv from the DTL dviware package
http://www.ctan.org/tex-archive/dviware/dtl/
'''
import os, re, subprocess, sys
def usage(prog_ | name):
return 'Usage: %s in.dvi out.dvi\n' \
% os.path.basename(prog_name)
def warning(message):
sys.stderr.write(message + '\n')
def error(message):
sys.stderr.write(message + '\n')
sys.exit(1)
def manipulated_dtl(data):
psfile_re = re.compile(r'(special1 +)([0-9]+)( +\'PSfile=")(.*)(" llx=.*)')
lines = data.split('\n')
for i in range(len(lines)):
line = lines[i]
match = psfile_re.match(line)
if match != None:
file = match.group(4)
filelen = len(file)
file = file.replace('"', '')
# Don't forget to update the length of the string too...
strlen = int(match.group(2)) - (filelen - len(file))
lines[i] = '%s%d%s%s%s' \
% ( match.group(1), strlen, match.group(3),
file, match.group(5) )
return '\n'.join(lines)
def main(argv):
# First establish that the expected information has
# been input on the command line and whether the
# required executables exist.
if len(argv) != 3:
error(usage(argv[0]))
infile = argv[1]
outfile = argv[2]
if not os.path.exists(infile):
error('Unable to read "%s"\n' % infile)
# Convert the input .dvi file to .dtl format.
if os.name == 'nt':
unix = False
else:
unix = True
dv2dt_call = 'dv2dt "%s"' % infile
dv2dt_pipe = subprocess.Popen(dv2dt_call, universal_newlines=True, \
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \
shell=unix, close_fds=unix)
(dv2dt_stdin, dv2dt_stdout, dv2dt_stderr) = \
(dv2dt_pipe.stdin, dv2dt_pipe.stdout, dv2dt_pipe.stderr)
dv2dt_stdin.close()
dv2dt_data = dv2dt_stdout.read()
dv2dt_status = dv2dt_stdout.close()
if dv2dt_status != None or len(dv2dt_data) == 0:
dv2dt_err = dv2dt_stderr.read()
error("Failed: %s\n%s\n" % ( dv2dt_call, dv2dt_err) )
# Manipulate the .dtl file.
dtl_data = manipulated_dtl(dv2dt_data)
if dtl_data == None:
error("Failed to manipulate the dtl file")
# Convert this .dtl file back to .dvi format.
dt2dv_call = 'dt2dv -si "%s"' % outfile
dt2dv_stdin = os.popen(dt2dv_call, 'w')
dt2dv_stdin.write(dtl_data)
if __name__ == "__main__":
main(sys.argv)
|
heuer/segno | tests/test_svg.py | Python | bsd-3-clause | 16,883 | 0.000237 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2022 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
SVG related tests.
"""
from __future__ import absolute_import, unicode_literals
import os
import re
import io
import tempfile
import xml.etree.ElementTree as etree
import pytest
import segno
_SVG_NS = 'http://www.w3.org/2000/svg'
_CSS_CLASS = 'segno'
_PATH_CLASS = 'qrline'
def _get_svg_el(root, name):
return root.find('{%s}%s' % (_SVG_NS, name))
def _get_group(root):
return _get_svg_el(root, 'g')
def _get_first_path(root):
g = _get_group(root)
return _get_svg_el(root if g is None else g, 'path')
def _get_title(root):
return _get_svg_el(root, 'title')
def _get_desc(root):
return _get_svg_el(root, 'desc')
def _parse_xml(buff):
"""\
Parses XML and returns the root element.
"""
buff.seek(0)
return etree.parse(buff).getroot()
def test_write_svg():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg')
xml_str = out.getvalue()
assert xml_str.startswith(b'<?xml')
root = _parse_xml(out)
# No background (and scaling) -> no group
assert _get_group(root) is None
assert 'viewBox' not in root.attrib
assert 'height' in root.attrib
assert 'width' in root.attrib
css_class = root.attrib.get('class')
assert css_class
assert _CSS_CLASS == css_class
path_el = _get_first_path(root)
assert path_el is not None
path_class = path_el.get('class')
assert _PATH_CLASS == path_class
stroke = path_el.get('stroke')
assert stroke == '#000'
title_el = _get_title(root)
assert title_el is None
desc_el = _get_desc(root)
assert desc_el is None
@pytest.mark.parametrize('dark', ['bLack', '#000000', (0, 0, 0)])
def test_write_svg_black(dark):
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark=dark)
xml_str = out.getvalue()
assert xml_str.startswith(b'<?xml')
root = _parse_xml(out)
# No background (and scaling) -> no group
assert _get_group(root) is None
assert 'viewBox' not in root.attrib
assert 'height' in root.attrib
assert 'width' in root.attrib
css_class = root.attrib.get('class')
assert css_class
assert _CSS_CLASS == css_class
path_el = _get_first_path(root)
assert path_el is not None
path_class = path_el.get('class')
assert _PATH_CLASS == path_class
stroke = path_el.get('stroke')
assert stroke == '#000'
title_el = _get_title(root)
assert title_el is None
desc_el = _get_desc(root)
assert desc_el is None
def test_write_svg_background_omitted():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg')
xml_str = out.getvalue()
assert xml_str.startswith(b'<?xml')
root = _parse_xml(out)
# No background (and scaling) -> no group
assert _get_group(root) is None
# Background should be the first path in the doc
path = _get_first_path(root)
assert path is not None
assert not path.attrib.get('fill')
@pytest.mark.parametrize('light', ['wHitE', '#fff', (255, 255, 255), '#ffffff'])
def test_write_svg_background_white(light):
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', light=light)
xml_str = out.getvalue()
assert xml_str.startswith(b'<?xml')
root = _parse_xml(out)
# No scaling -> no group
assert _get_group(root) is None
# Background should be the first path in the doc
path = _get_first_path(root)
assert path is not None
assert '#fff' == path.attrib.get('fill')
assert path.attrib.get('class') is None
d = path.attrib.get('d')
assert d
expected = 'M0 0h{1}v{0}h-{1}z'.format(*qr.symbol_size())
assert expected == d
g = _get_group(root)
assert g is None
def test_scale_background():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='green', light='yellow', scale=10)
root = _parse_xml(out)
g = _get_group(root)
assert g is not None
assert 'scale(10)' == g.attrib.get('transform')
def test_write_svg_color_rgb():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark=(76, 131, 205))
xml_str = out.getvalue()
assert xml_str.startswith(b'<?xml')
root = _parse_xml(out)
assert 'viewBox' not in root.attrib
assert 'height' in root.attrib
assert 'width' in root.attrib
css_class = root.attrib.get('class')
assert css_class
assert _CSS_CLASS == css_class
path_el = _get_first_path(root)
assert path_el is not None
path_class = path_el.get('class')
assert _PATH_CLASS == path_class
stroke = path_el.get('stroke')
assert stroke == '#4c83cd'
title_el = _get_title(root)
assert title_el is None
desc_el = _get_desc(root)
assert desc_el is None
def test_write_svg_color_rgba():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='#0000ffcc')
assert b'stroke-opacity' in out.getvalue()
def test_write_svg_color_rgba_svg2():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', dark='#0000ffcc', svgversion=2.0)
assert b'stroke-opacity' not in out.getvalue()
root = _parse_xml(out)
path = _get_first_path(root)
assert path.attrib['stroke'].startswith('rgba')
def test_write_svg_background_rgba():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', light='#0000ffcc')
assert b'fill-opacity' in out.getvalue()
def test_write_svg_background_rgba_svg2():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', light='#0000ffcc', svgversion=2.0)
assert b'fill-opacity' not in out.getvalue()
root = _parse_xml(out)
path = _get_first_path(root)
assert path.attrib['fill'].startswith('rgba')
def test_write_no_xmldecl():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', xmldecl=False)
xml_str = out.getvalue()
assert xml_str.startswith(b'<svg')
d | ef test_viewbox():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', omitsize=True)
| root = _parse_xml(out)
assert 'viewBox' in root.attrib
assert 'height' not in root.attrib
assert 'width' not in root.attrib
def test_svgid():
qr = segno.make_qr('test')
out = io.BytesIO()
ident = 'svgid'
qr.save(out, kind='svg', svgid=ident)
root = _parse_xml(out)
assert 'id' in root.attrib
assert ident == root.attrib['id']
def test_svgid_default():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg')
root = _parse_xml(out)
assert 'id' not in root.attrib
def test_svgid_empty_string():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', svgid='')
root = _parse_xml(out)
assert 'id' not in root.attrib
def test_svgversion():
qr = segno.make_qr('test')
out = io.BytesIO()
version = 1.0
qr.save(out, kind='svg', svgversion=version)
root = _parse_xml(out)
assert 'version' in root.attrib
assert str(version) == root.attrib['version']
def test_svgversion_default():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg')
root = _parse_xml(out)
assert 'version' not in root.attrib
def test_no_svg_class():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', svgclass=None)
root = _parse_xml(out)
assert 'class' not in root.attrib
def test_no_svg_class_empty_str():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', svgclass='')
root = _parse_xml(out)
assert 'class' not in root.attrib
def test_custom_svg_class():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', svgclass='test-class')
root = _parse_xml(out)
assert 'class' in root.attrib
assert 'test-class' == root.attrib.get('class')
def test_no_line_class():
qr = segno.make_qr('test')
out = io.BytesIO()
qr.save(out, kind='svg', lineclass=None)
root = _parse_xm |
opendatakosovo/kosovolunteer | ve/views/create_event.py | Python | gpl-2.0 | 583 | 0.013722 | from flask import Flask
from flask.views import View
from flask import Response, request
import urllib2
from ve import utils
class CreateEvent(View):
def dispatch_request(self):
api_base_url = utils.get_api_url()
url | = '%s/create/event'%(api_base_url)
data = request.data
r | = urllib2.Request(url, data=data, headers={"Content-Type": "application/json"})
res = urllib2.urlopen(r)
resp = Response(
response=data,
mimetype='application/json')
return resp
#TODO: return json_response
|
arcturusannamalai/open-tamil | solthiruthi/data/tamilvu_wordlist.py | Python | mit | 1,438 | 0.020862 | #!/usr/bin/python
# (C) 2015 Muthiah Annamalai, <ezhillang@gmail.com>
# Ezhil Language Foundation
#
from __future__ import print_function
import sys
import codecs
import tamil
import json
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
class WordList:
@staticmethod
def extract_words(filename):
ht = json.load( codecs.open(filename,'r','utf-8') )
for word in sorted(ht.keys()):
print(word)
return
@staticmethod
def pull_words_from_json():
for itr in range(1,25):
filename = u"v%02d.json"%itr
WordList.extract_words(filename)
return
class WordFilter:
@staticmethod
def filter_and_save(word_size=4):
match_word_length = lambda word: len(tamil.utf8.get_letters(word.strip().replace(' ',''))) == word_size
filename = u'tamilvu_dictionary_words.txt'
matches = []
with codecs.open(filename,'r','utf-8') as fp:
ma | tches = filter( match_word_length, fp.readlines())
with codecs.open('word_filter_%0 | 2d.txt'%word_size,'w','utf-8') as fp:
for word in matches:
fp.write(u'%s\n'%word.replace(' ','').strip())
print(u'we found %d words of length %d\n'%(len(matches),word_size))
return
if __name__ == u"__main__":
# WordList.pull_words_from_json()
for wlen in range(3,20):
WordFilter.filter_and_save( wlen )
|
andrewyoung1991/abjad | setup.py | Python | gpl-3.0 | 2,910 | 0.000688 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import setuptools
import sys
from distutils.version import StrictVersion
version_file_path = os.path.join(
os.path.dirname(__file__),
'abjad',
'_version.py'
)
with open(version_file_path, 'r') as file_pointer:
file_contents_string = file_pointer.read()
local_dict = {}
exec(file_contents_string, None, local_dict)
__version__ = local_dict['__version__']
description = 'Abjad is a Python API for Formalized Score Control.'
long_description = 'Abjad is an interactive software system designed'
long_description += ' to help composers build up complex pieces of'
long_description += ' music notation in an iterative and incremental way.'
long_description += ' Use Abjad to create a symbolic representation of all'
long_description += ' the notes, rests, staves, tuplets, beams and slurs'
long_description += ' in any score.'
long_description += ' Because Abjad extends the Python programming language,'
long_description += ' you can use Abjad to make systematic changes to'
long_description += ' your music as you work.'
long_description += ' And because Abjad wraps the powerful LilyPond music'
long_description += ' notation package, you can use Abjad to control'
long_description += ' the typographic details of all the symbols on the page.'
author = [
'Trevor Bača',
| 'Josiah Wolf Oberholtzer',
'Víctor Adán',
]
author = ', '.join(author)
author_email = [
'trevorbaca@gmail.com',
'josiah.oberholtzer@gmail.com',
'contact@victoradan.net',
]
author_email = ', '.join(author_email)
keywords = [
'music | composition',
'music notation',
'formalized score control',
'lilypond',
]
keywords = ', '.join(keywords)
install_requires = [
'configobj',
'ply',
'six',
]
version = '.'.join(str(x) for x in sys.version_info[:3])
if StrictVersion(version) < StrictVersion('3.4.0'):
install_requires.append('enum34')
extras_require = {
'development': [
'pytest',
'sphinx==1.2.3', # TODO: Remove version once Sphinx fixes https://github.com/sphinx-doc/sphinx/issues/1822
'sphinx_rtd_theme',
'sphinxcontrib-images',
'PyPDF2',
],
'ipython': [
'ipython',
],
}
entry_points = {
'console_scripts': [
'abjad = abjad.tools.systemtools.run_abjad:run_abjad',
'ajv = abjad.tools.developerscripttools.run_ajv:run_ajv',
]
}
setuptools.setup(
author=author,
author_email=author_email,
description=description,
include_package_data=True,
install_requires=install_requires,
extras_require=extras_require,
entry_points=entry_points,
keywords=keywords,
license='GPL',
long_description=long_description,
name='Abjad',
packages=['abjad'],
platforms='Any',
url='http://www.projectabjad.org',
version=__version__,
) |
MarxMustermann/OfMiceAndMechs | src/itemFolder/plants/tree.py | Python | gpl-3.0 | 2,733 | 0.004025 | import src
import random
class Tree(src.items.Item):
"""
ingame item serving as an infinite food source
"""
type = "Tree"
bolted = True
walkable = False
numMaggots = 0
name = "tree"
def __init__(self):
"""
initialise internal state
"""
super().__init__(display=src.canvas.displayChars.tree)
self.maxMaggot = random.randint(75, 150)
try:
self.lastUse = src.gamestate.gamestate.tick
except:
self.lastUse = -100000
self.attributesToStore.extend(["maggot", "maxMaggot", "lastUse"])
def regenerateMaggots(self):
"""
regenerate maggots to account for passed time
"""
self.numMaggots += (src.gamestate.gamestate.tick - self.lastUse) // 100
self.numMaggots = min(self.numMaggots, self.maxMaggot)
def apply(self, character):
"""
handle a character trying to use this item
by dropping some food
Parameters:
character: the character
"""
if not self.container:
character.addMessage("The tree has to be on the outside to be used")
return
self.regenerateMaggots()
self.lastUse = src.gamestate.gamestate.tick
character.addMessage("you harvest a vat maggot")
character.frustration += 1
|
targetFull = False
targetPos = (self.xPosition + 1, self.yPosition, self.zPosition)
items = self.container.getItemByPosition(targetPos)
if len(items) > 15:
targetFull = True
for item in items:
if item.walkable == False:
targetFull = True
if targetFull:
| character.addMessage("the target area is full, the machine does not work")
return
if not self.numMaggots:
character.addMessage("The tree has no maggots left")
return
# spawn new item
self.numMaggots -= 1
new = src.items.itemMap["VatMaggot"]()
new.bolted = False
self.container.addItem(new,(self.xPosition+1,self.yPosition,self.zPosition))
def getLongInfo(self):
"""
returns a longer than normal description text
this recalculates the internal state and has side effects
Returns:
the decription text
"""
self.regenerateMaggots()
self.lastUse = src.gamestate.gamestate.tick
text = super().getLongInfo()
text += """
numMaggots: %s
description:
A tree can be used as a source for vat maggots.
Activate the tree to harvest a vat maggot.
""" % (
self.numMaggots,
)
return text
src.items.addType(Tree)
|
cemsbr/python-openflow | pyof/v0x01/common/utils.py | Python | mit | 5,060 | 0 | """Helper python-openflow functions."""
# System imports
# Third-party imports
# Local source tree imports
# Importing asynchronous messages
from pyof.v0x01.asynchronous.error_msg import ErrorMsg
from pyof.v0x01.asynchronous.flow_removed import FlowRemoved
from pyof.v0x01.asynchronous.packet_in import PacketIn
from pyof.v0x01.asynchronous.port_status import PortStatus
# Importing controller2switch messages
from pyof.v0x01.common.header import Header, Type
from pyof.v0x01.controller2switch.barrier_reply import BarrierReply
from pyof.v0x01.controller2switch.barrier_request import BarrierRequest
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
from pyof.v0x01.controller2switch.features_request import FeaturesRequest
from pyof.v0x01.controller2switch.flow_mod import FlowMod
from pyof.v0x01.controller2switch.get_config_reply import GetConfigReply
from pyof.v0x01.controller2switch.get_config_request import GetConfigRequest
from pyof.v0x01.controller2switch.packet_out import PacketOut
from pyof.v0x01.controller2switch.port_mod import PortMod
from pyof.v0x01.controller2switch.queue_get_config_reply import (
QueueGetConfigReply)
from pyof.v0x01.controller2switch.queue_get_config_request import (
QueueGetConfigRequest)
from pyof.v0x01.controller2switch.set_config import SetConfig
from pyof.v0x01.controller2switch.stats_reply import StatsReply
from pyof.v0x01.controller2switch.stats_request import StatsRequest
# Importing symmetric messages
from pyof.v0x01.symmetric.echo_reply import EchoReply
from pyof.v0x01.symmetric.echo_request import EchoRequest
from pyof.v0x01.symmetric.hello import Hello
from pyof.v0x01.symmetric.vendor_header import VendorHeader
__all__ = ('new_message_from_header', 'new_message_from_message_type',
'unpack_message')
def new_message_from_message_type(message_type):
"""Given an OpenFlow Message Type, return an empty message of that type.
Args:
messageType (:class:`~pyof.v0x01.common.header.Type`):
Python-openflow message.
Returns:
Empty OpenFlow message of the requested message type.
Raises:
KytosUndefinedMessageType: Unkown Message_Type.
"""
message_type = str(message_type)
available_classes = {
str(Type.OFPT_HELLO): Hello,
str(Type.OFPT_ERROR): ErrorMsg,
str(Type.OFPT_ECHO_REQUEST): EchoRequest,
str(Type.OFPT_ECHO_REPLY): EchoReply,
str(Type.OFPT_VENDOR): VendorHeader,
str(Type.OFPT_FEATURES_REQUEST): FeaturesRequest,
str(Type.OFPT_FEATURES_REPLY): FeaturesReply,
str(Type.OFPT_GET_CONFIG_REQUEST): GetConfigRequest,
str(Type.OFPT_GET_CONFIG_REPLY): GetConfigReply,
str(Type.OFPT_SET_CONFIG): SetConfig,
str(Type.OFPT_PACKET_IN): PacketIn,
str(Type.OFPT_FLOW_REMOVED): FlowRemoved,
str(Type.OFPT_PORT_STATUS): PortStatus,
str(Type.OFPT_PACKET_OUT): PacketOut,
str(Type.OFPT_FLOW_MOD): FlowMod,
str(Type.OFPT_PORT_MOD): PortMod,
str(Type.OFPT_STATS_REQUEST): StatsRequest,
str(Type.OFPT_STATS_REPLY): StatsReply,
str(Type.OFPT_BARRIER_REQUEST): BarrierRequest,
str(Type.OFPT_BARRIER_REPLY): BarrierReply,
str(Type.OFPT_QUEUE_GET_CONFIG_REQUEST): QueueGetConfigRequest,
str(Type.OFPT_QUEUE_GET_CONFIG_REPLY): QueueGetConfigReply
}
if message_type not in available_classes:
raise ValueError('"{}" is not known.'.format(message_type))
message_class = available_classes.get(message_type)
message_instance = message_class()
return message_instance
def new_message_from_header(header):
"""Given an OF Header, return an empty message of header's message_type.
Args:
header (~pyof.v0x01.common.header.Header): Unpacked OpenFlow Header.
Returns:
Empty OpenFlow message of the same type of message_type attribute from
the given header.
The header attribute of the message will be populated.
Raises:
KytosUndefinedMessageType: Unkown Message_Type.
"""
message_type = header.message_type
if not isinstance(message_type, Type):
try:
if isinstance(message_type, str):
message_type = Type[message_type]
elif isinstance(message_type, int):
message_type = Type(message_type)
except ValueError:
raise ValueError
message = new_message_from_message_type(message_type)
message.header.xid = header.xid
message.header.length = header.length
return message
def unpack_message(buffer):
"""Unpack the whole | buffer, inclu | ding header pack.
Args:
buffer (bytes): Bytes representation of a openflow message.
Returns:
object: Instance of openflow message.
"""
hdr_size = Header().get_size()
hdr_buff, msg_buff = buffer[:hdr_size], buffer[hdr_size:]
header = Header()
header.unpack(hdr_buff)
message = new_message_from_header(header)
message.unpack(msg_buff)
return message
|
ychenracing/Spiders | flhhkkSpider/flhhkk/items.py | Python | apache-2.0 | 420 | 0 | # -*- coding: utf-8 -*-
# Define here | the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class FlhhkkItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
_id = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field()
url = scrapy.Field()
download_content = scrapy.F | ield()
|
phate89/tvdbsimple | tvdbsimple/__init__.py | Python | gpl-3.0 | 2,681 | 0.00597 | # -*- coding: utf-8 -*-
"""
`tvdbsimple` is a wrapper, written in Python, for TheTVDb.com
API v2. By calling the functions available in `tvdbsimple` you can simplify
your code and easily access a vast amount of tv and cast data. To find
out more about TheTVDb API, check out the [official api page](https://api.thetvdb.com/swagger/)
Features
--------
- Full API implementation. Supports Search, Series, Episode, Updated, User and Languages methods.
- Updated with the latest JSON API.
- Fully tested with automated tests and travis.ci.
- Supports Python 2.7, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8.
- Easy to access data using Python class attributes.
- Easy to experiment with `tvdbsimple` functions inside the Python interpreter.
Installation
------------
`tvdbsimple` is available on the [Python Package Index](https://pypi.python.org/pypi/tvdbsimple).
You can install `tvdbsimple` using one of the following techniques:
- Use pip: `pip install tvdbsimple`
- Download the .zip or .tar.gz file from PyPI and install it yourself
- Download the [source from Github](http://github.com/phate89/tvdbsimple) and install it yourself
If you install it yourself, also install [requests](http://www.python-requests.org/en/latest).
API Key
-------
You will need an API key to TheTVDb to access the API. To obtain a key, follow these steps:
1) Register for and verify an [account](http://thetvdb.com/?tab=register).
2) [Log into](http://thetvdb.com/?tab=login) your account.
3) [Go to this page](http://thetvdb.com/?tab=apiregister) and fill your details to generate a new API key.
Examples
--------
All the functions are fully documented here but you can find several use examples in the [examples page](https://github.com/phate89/tvdbsimple/blob/master/EXAMPLES.rst).
License
-------
The module is distributed with a GPLv3 license, see [LICENSE](https://www.gnu.org/licenses/gpl-3.0.en.html) for more details
copyright (c) 2017 by phate89.
"""
__title__ = 'tvdbsimple'
__version__ = '1.0.6'
__author__ = 'phate89'
__copyright__ = 'Copyright © 2017 phate89'
__license__ = 'GPLv3'
from .base import APIKeyError
from .keys import keys
from .search import Search
from .series import Series, Series_Episodes, Series_Images
from .languages import Languages
from .episode import Episode
from .updates im | port Updates
from .user import User, User_Ratings
KEYS = keys()
"""
Contains `API_KEY` and `API_TOKEN`.
To use the module you have to set at least the `API_KEY` value (THETVDb api key).
You can also provide an `API_TOKEN` if you already have a valid one stored. If the
valid token doesn't work anymore the module will try to retrieve a new one
using the `A | PI_KEY` variable
"""
|
dagothar/gripperz | ext/rbfopt/doc/conf.py | Python | gpl-2.0 | 11,493 | 0.006352 | # -*- coding: utf-8 -*-
#
# RBFOpt documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 11 00:01:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from mock import Mock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src/'))
# -- Mock modules for autodoc
MOCK_MODULES = ['argparse', 'numpy', 'scipy', 'pyDOE']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RBFOpt'
copyright = u'2015, Singapore University of Technology and Design'
author = u'Giacomo Nannicini'
# The versi | on info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.2'
# The language for content a | utogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'RBFOptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RBFOpt.tex', u'RBFOpt Documentation',
u'Giacomo Nannicini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addr |
asurve/arvind-sysml | src/main/python/systemml/project_info.py | Python | apache-2.0 | 1,177 | 0.001699 | #-------- | -----------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for addit | ional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# This file can be used to pass maven project properties to python
# via string substitutions using the maven-resources-plugin
__project_group_id__ = "org.apache.systemml"
__project_artifact_id__ = "systemml"
__project_version__ = "0.13.0-incubating-SNAPSHOT"
|
hackultura/siscult-migration | models/mixins.py | Python | gpl-2.0 | 593 | 0 | # -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, String
from settings import DATABASE_NAMES
class EntesMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('entes')}
class ProfileMixin(object):
__table_args__ = {'schem | a': DATABASE_NAMES.get('perfis')}
class AdminMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('admin')}
class UserMixin(object):
__table_args__ = {'schema': DATABASE_NAMES.get('usuarios')}
c | lass ClassificacaoArtisticaMixin(object):
id = Column(Integer, primary_key=True)
descricao = Column(String(50))
|
plotly/python-api | packages/python/plotly/plotly/validators/scattercarpet/marker/_gradient.py | Python | mit | 1,063 | 0.000941 | import _plotly_utils.basevalidators
class GradientValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="gradient", parent_name="scattercarpet.marker", **kwargs
):
super(GradientValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Gradient"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical | .
colorsrc
| Sets the source reference on Chart Studio Cloud
for color .
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on Chart Studio Cloud
for type .
""",
),
**kwargs
)
|
shobhitmishra/CodingProblems | LeetCode/Session3/DeleteNodeBST.py | Python | mit | 2,274 | 0.004837 | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
node = parent = None
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
# search for the node and its parent
self.findNodeAndParent(root, key)
if self.node == root and not root.left and not root.right:
return None
if self.node:
self.deleteNodeHelper(self.node, self.parent)
return root
def deleteNodeHelper(self, node, parent):
# if node is a leaf
if not node.left and not node.right:
if parent:
if parent.left == | node:
| parent.left = None
else:
parent.right = None
return
# if node has only one child
if not node.left or not node.right:
child = node.left if not node.right else node.right
node.val = child.val
node.left = child.left
node.right = child.right
return
# node has two children
successor, succesorParent = self.getNodeSuccessor(node)
node.val = successor.val
self.deleteNodeHelper(successor, succesorParent)
def getNodeSuccessor(self, node):
succesorParent = node
successor = node.right
while successor.left:
succesorParent = successor
successor = successor.left
return successor, succesorParent
def findNodeAndParent(self, root, key):
if not root:
return
if root.val == key:
self.node = root
return
self.parent = root
if key < root.val:
self.findNodeAndParent(root.left, key)
else:
self.findNodeAndParent(root.right, key)
root = TreeNode(10)
root.left = TreeNode(3)
root.left.left = TreeNode(2)
root.left.right = TreeNode(8)
root.left.right.left = TreeNode(7)
root.left.right.right = TreeNode(9)
root.right = TreeNode(15)
root.right.left = TreeNode(13)
root.right.right = TreeNode(17)
root.right.right.right = TreeNode(19)
ob = Solution()
root = TreeNode(50)
root = ob.deleteNode(root, 50)
print(root)
|
pinax/pinax-likes | pinax/likes/templatetags/pinax_likes_tags.py | Python | mit | 4,981 | 0.000602 | from django import template
from django.contrib.contenttypes.models import ContentType
from django.template import loader
from django.template.loader import render_to_string
from ..conf import settings
from ..models import Like
from ..utils import _allowed, widget_context
register = template.Library()
@register.simple_tag
def who_likes(obj):
"""
Usage:
{% who_likes obj as var %}
"""
return Like.objects.filter(
receiver_content_type=ContentType.objects.get_for_model(obj),
receiver_object_id=obj.pk
)
@register.simple_tag
def likes(user, *models):
"""
Usage:
{% likes user as var %}
Or
{% likes user [model1, model2] as var %}
"""
content_types = []
model_list = models or settings.PINAX_LIKES_LIKABLE_MODELS.keys()
for model in model_list:
if not _allowed(model):
continue
app, model = model.split(".")
content_types.appe | nd(
ContentType.object | s.get(app_label=app, model__iexact=model)
)
return Like.objects.filter(sender=user, receiver_content_type__in=content_types)
@register.simple_tag
@register.filter
def likes_count(obj):
"""
Usage:
{% likes_count obj %}
or
{% likes_count obj as var %}
or
{{ obj|likes_count }}
"""
return Like.objects.filter(
receiver_content_type=ContentType.objects.get_for_model(obj),
receiver_object_id=obj.pk
).count()
@register.simple_tag(takes_context=True)
def likes_widget(context, user, obj, template_name="pinax/likes/_widget.html"):
"""
Usage:
{% likes_widget request.user post %}
or
{% likes_widget request.user post "pinax/likes/_widget_brief.html" %}
"""
request = context["request"]
return loader.get_template(template_name).render(
widget_context(user, obj, request))
class LikeRenderer(template.Node):
def __init__(self, varname):
self.varname = template.Variable(varname)
def render(self, context):
like = self.varname.resolve(context)
instance = like.receiver
content_type = like.receiver_content_type
app_name = content_type.app_label
model_name = content_type.model.lower()
like_context = {
"instance": instance,
"like": like,
}
return render_to_string([
f"pinax/likes/{app_name}/{model_name}.html",
f"pinax/likes/{app_name}/like.html",
"pinax/likes/_like.html",
], like_context, context)
@register.tag
def render_like(parser, token):
"""
{% likes user as like_list %}
<ul>
{% for like in like_list %}
<li>{% render_like like %}</li>
{% endfor %}
</ul>
"""
tokens = token.split_contents()
var = tokens[1]
return LikeRenderer(var)
class ObjectDecorator:
def __init__(self, user, objects):
self.user = user
self._objects = objects
self._is_stream = None
def is_stream(self):
if self._is_stream is None and len(self._objects) > 0:
self._is_stream = not hasattr(self._objects[0], "_meta")
return self._is_stream
def get_id(self, obj):
return self.is_stream() and obj.item.id or obj.id
@property
def indexed(self):
if not hasattr(self, "_indexed"):
self._indexed = {}
for obj in self._objects:
if hasattr(obj, "cast") and callable(obj.cast):
obj = obj.cast()
ct = ContentType.objects.get_for_model(self.is_stream() and obj.item or obj)
if ct not in self._indexed.keys():
self._indexed[ct] = []
obj.liked = False
self._indexed[ct].append(obj)
return self._indexed
def objects(self):
for ct in self.indexed.keys():
likes = Like.objects.filter(
sender=self.user,
receiver_content_type=ct,
receiver_object_id__in=[self.get_id(o) for o in self.indexed[ct]]
)
for obj in self.indexed[ct]:
for like in likes:
if like.receiver_object_id == self.get_id(obj):
obj.liked = True
yield obj
class LikedObjectsNode(template.Node):
def __init__(self, objects, user, varname):
self.objects = template.Variable(objects)
self.user = template.Variable(user)
self.varname = varname
def render(self, context):
user = self.user.resolve(context)
objects = self.objects.resolve(context)
context[self.varname] = ObjectDecorator(user, objects).objects()
return ""
@register.tag
def liked(parser, token):
"""
{% liked objects by user as varname %}
"""
tag, objects, _, user, _, varname = token.split_contents()
return LikedObjectsNode(objects, user, varname)
|
pymedusa/Medusa | ext/cloudscraper/captcha/__init__.py | Python | gpl-3.0 | 1,511 | 0.004633 | import abc
import logging
import sys
if sys.version_inf | o >= (3, 4):
ABC = abc.ABC # noqa
else:
ABC = abc.ABCMeta('ABC', (), {})
# ------------------------------------------------------------------------------- #
captchaSolvers = {}
# ------------------------------------------------------------------------------- #
class Captcha(ABC):
@abc.abstractmethod
def __init | __(self, name):
captchaSolvers[name] = self
# ------------------------------------------------------------------------------- #
@classmethod
def dynamicImport(cls, name):
if name not in captchaSolvers:
try:
__import__(f'{cls.__module__}.{name}')
if not isinstance(captchaSolvers.get(name), Captcha):
raise ImportError('The anti captcha provider was not initialized.')
except ImportError as e:
sys.tracebacklimit = 0
logging.error(f'Unable to load {name} anti captcha provider -> {e}')
raise
return captchaSolvers[name]
# ------------------------------------------------------------------------------- #
@abc.abstractmethod
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
pass
# ------------------------------------------------------------------------------- #
def solveCaptcha(self, captchaType, url, siteKey, captchaParams):
return self.getCaptchaAnswer(captchaType, url, siteKey, captchaParams)
|
jpn--/larch | larch/util/data_manipulation.py | Python | gpl-3.0 | 2,789 | 0.042668 |
import pandas
import numpy
import operator
from sklearn.preprocessing import OneHotEncoder
from typing import Mapping
def one_hot_encode(vector, dtype='float32', categories=None, index=None):
if isinstance(vector, pandas.Series):
index = vector.index
vector = vector.values
encoder = OneHotEncoder(
categories='auto' if categories is None else [categories,],
sparse=False,
dtype=dtype,
).fit(vector.reshape(-1,1))
return pandas.DataFrame(
data = encoder.transform(vector.reshape(-1,1)),
columns=encoder.categories_,
index=index
)
def periodize(
values,
| mapping=None,
default=None,
right=True,
left=True,
**kwargs,
):
"""
Label sections of a continuous variable.
This function contrasts with `pandas.cut` in that
there can be multiple non-contiguous se | ctions of the
underlying continuous interval that obtain the same
categorical value.
Parameters
----------
values : array-like
The values to label. If given as a pandas.Series,
the returned values will also be a Series,
with a categorical dtype.
mapping : Collection or Mapping
A mapping, or a collection of 2-tuples giving
key-value pairs (not necessarily unique keys).
The keys (or first values) will be the new values,
and the values (or second values) are 2-tuples
giving upper and lower bounds.
default : any, default None
Keys not inside any labeled interval will get
this value instead.
right : bool, default True
Whether to include the upper bound[s] in the
intervals for labeling.
left : bool, default True
Whether to include the lower bound[s] in the
intervals for labeling.
**kwargs :
Are added to `mapping`.
Returns
-------
array-like
Example
-------
>>> import pandas
>>> h = pandas.Series(range(1,24))
>>> periodize(h, default='OP', AM=(6.5, 9), PM=(16, 19))
0 OP
1 OP
2 OP
3 OP
4 OP
5 OP
6 AM
7 AM
8 AM
9 OP
10 OP
11 OP
12 OP
13 OP
14 OP
15 PM
16 PM
17 PM
18 PM
19 OP
20 OP
21 OP
22 OP
dtype: category
Categories (3, object): ['AM', 'OP', 'PM']
"""
if mapping is None:
mapping = []
if isinstance(mapping, Mapping):
mapping = list(mapping.items())
mapping.extend(kwargs.items())
if isinstance(values, pandas.Series):
x = pandas.Series(index=values.index, data=default)
else:
x = numpy.full(values.shape, default)
if right:
rop = operator.le
else:
rop = operator.lt
if left:
lop = operator.ge
else:
lop = operator.gt
for k,(lowerbound,upperbound) in mapping:
if lowerbound is None:
lowerbound = -numpy.inf
if upperbound is None:
upperbound = numpy.inf
x[lop(values,lowerbound) & rop(values,upperbound)] = k
if isinstance(x, pandas.Series):
x = x.astype('category')
return x
|
hebecked/DHT-displayNlog | pc/logNplot.py | Python | gpl-3.0 | 4,651 | 0.037411 | #!/usr/bin/python2.7
import numpy as numpy
import time
import serial
import os, sys
import argparse
from dynamic_plot import live_plots
class serialCOM:
"""A simple class to fetch temperature and humidity information via a arduino micro connected to a DHT22"""
def __init__(self, port, two, baud=9600, timeout=1):
self.ser = serial.Serial(port,baud,timeout=timeout)
self.two=two
time.sleep(1)
self.ser.read(100)
if(self.ser.isOpen()):
return
else:
print "error opening serial connection!"
return
def close(self):
self.ser.close()
def getHumidity(self):
self.ser.write('h')
self.ser.flush()
time.sleep(0.25)
self.latestHumidity=float(self.ser.read(50))
if(self.two):
self.ser.write('j')
self.ser.flush()
time.sleep(0.25)
self.latestHumidity2=float(self.ser.read(50))
def getTemperature(self):
self.ser.write('t')
self.ser.flush()
time.sleep(0.25)
self.latestTemperature=float(self.ser.read(50))
if(self.two):
self.ser.write('z')
self.ser.flush()
time.sleep(0.25)
self.latestTemperature2=float(self.ser.read(50))
def returnLatest(self):
self.getTemperature()
self.getHumidity()
return self.latestTemperature, self.latestHumidity
def returnLatest2(self):
self.getTemperature()
self.getHumidity()
return self.latestTemperature2, self.latestHumidity2
def writeFile(self,filename):
date=time.asctime()
if filename == "STDOUT":
if(self.two):
sys.stdout.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\t' + str(self.latestTemperature2) + '\t' + str(self.latestHumidity2) +'\n')
else:
sys.stdout.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\n')
return
f = open(filename, 'a')
if(self.two):
f.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\t' + str(self.latestTemperature2) + '\t' + str(self.latestHumidity2) +'\n')
else:
f.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\n')
f.close()
def ___writeFile(self,filename):
date=time.time()
if filename == "STDOUT":
if(self.two):
sys.stdout.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\t' + str(self.latestTemperature2) + '\t' + str(self.latestHumidity2) +'\n')
else:
sys.stdout.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\n')
return
f = open(filename, 'a')
if(self.two):
f.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\t' + str(self.latestTemperature2) + '\t' + str(self.latestHumidity2) +'\n')
else:
f.write(date + '\t' + str(self.latestTemperature) + '\t' + str(self.latestHumidity) + '\n')
f.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(description='This script is meant to read the humidity and temperature from an arduino connected to a DHT22 or alike. It allows to display the results in a dynamic plot or save them to file')
parser.add_argument('-P', '--plot', dest='SEC', action='store', type=int, help='The last SEC Seconds will be displayed in a dynamic plot. Leave empty for no Plot.')
parser.add_argument('-F', '--file', dest='FILE', action='store', type=str, help='A name for the output file. No output file if not set.')
parser.add_argument('-t', '--two', dest='TWO', action='store_true', default=False, help='Defines whether to read one or two Sensors.')
parser.add_argument('-T', '--time', dest='TIME', action='store', type=int, default=2, help='Defines the time interval between measurements. Measures only one value for -1.')
parser.add_argument('-p', '--port', dest='PORT', action='store', type=str, default="COM3", help='Defi | nes the Port to connect to the arduino.')
args = parser.parse_args()
if(args.SEC):
if((args.SEC < 2 or args.SEC > args.TIME )):
print "Error! Please choose a value greater than 2 seconds and the time interval."
exit
sC=serialCOM(args.PORT ,args.TWO)
if(args.SEC):
lp = live_plots(0,args.SEC,two_plots=True)
if(args.TWO):
lp2 = live_plots(0,args.SEC,two_plots=True)
if args.TIME != -1:
while True:
| time.sleep(args.TIME)
t,h=sC.returnLatest()
if(args.TWO):
t2,h2=sC.returnLatest2()
if(args.SEC):
lp.update_time(args.TIME,t,h)
lp.clean_arrays()
if(args.TWO):
lp2.update_time(args.TIME,t2,h2)
lp2.clean_arrays()
if(args.FILE):
sC.writeFile(args.FILE)
else:
time.sleep(2)
t,h=sC.returnLatest()
if(args.TWO):
t2,h2=sC.returnLatest2()
if(args.FILE):
sC.writeFile(args.FILE)
else:
sC.writeFile("STDOUT")
|
zachdj/ultimate-tic-tac-toe | generate_data.py | Python | mit | 4,760 | 0.001681 | """
Script to generate "labelled" data using random playouts
A lot of this can now be done with the RunExperiment scene
"""
from models.game import *
from models.data import DatabaseConnection as DB, GameDataModel
import timeit
def full_game_experiment(total_games, purge=10):
"""
Simulates many random games from start to finish and records each in the database
Board states that have fewer than "purge" records will be removed from the database
:return: None
"""
p1_wins = 0
p2_wins = 0
ties = 0
total_games = total_games
total_moves = 0
p1 = BogoBot(Board.X)
p2 = BogoBot(Board.O)
print("Playing %s games... \n" % total_games)
start_time = timeit.default_timer()
for i in list(range(0, total_games)):
print("Playing game %s..." % (i+1))
game = Game(p1, p2)
winner = game.finish_game()
if winner == Board.X:
p1_wins += 1
elif winner == Board.O:
p2_wins += 1
else:
ties += 1
total_moves += len(game.moves)
game_data = GameDataModel(game)
game_data.save()
if purge > 0:
DB.purge_boards(purge)
DB.close()
elapsed = timeit.default_timer() - start_time
print("Done in %s s \n" % elapsed)
print("Player 1 Won %s %% of the games" % round(p1_wins*100 / total_games))
print("Player 2 Won %s %% of the games" % round(p2_wins*100 / total_games))
print("The Cat got %s %% of the games" % round(ties*100 / total_games))
print("Average number of moves: %s" % round(total_moves / total_games))
def mid_game_experiment(starting_boards, games_per_board, purge=10):
STARTING_BOARDS = starting_boards
GAMES_PER_BOARD = games_per_board
MOVE_SEQUENCE_LENGTH = 25
print("Generating mid-game data for %s boards..." % STARTING_BOARDS)
p1 = BogoBot(Board.X)
p2 = BogoBot(Board.O)
print("Generating move sequences...")
move_sequences = []
for i in list(range(0, STARTING_BOARDS)):
game = Game(p1, p2)
# generate a move sequence that will take us to this board
sequence = []
for j in list | (range(0, MOVE_SEQUENCE_LENGTH)):
move = game._take_step()
sequence.append(move)
move_sequences.append(sequence)
# we now have several move sequences that will take us to a fixed mid-game state - generate data for each of these
for idx, sequence in enumerate(move_sequences):
print("Generating data for move sequence % | s" % (idx + 1))
for experiment in list(range(0, GAMES_PER_BOARD)):
game = Game(p1, p2)
for move in sequence: # bring the game to its mid-completed state
game.make_move(move)
# finish the game randomly and save
game.finish_game()
game_data = GameDataModel(game)
game_data.save()
# remove all the "junk" data that we don't need - this keeps the database from growing too large when long experiments are run
DB.purge_boards(purge)
print("Done!")
DB.close()
def late_game_experiment(starting_boards, games_per_board, purge=10):
STARTING_BOARDS = starting_boards
GAMES_PER_BOARD = games_per_board
MOVE_SEQUENCE_LENGTH = 45
print("Generating late-game data for %s boards..." % STARTING_BOARDS)
p1 = BogoBot(Board.X)
p2 = BogoBot(Board.O)
print("Generating move sequences...")
move_sequences = []
i = 0
while i < STARTING_BOARDS:
game = Game(p1, p2)
# generate a move sequence that will take us to this board
sequence = []
for j in list(range(0, MOVE_SEQUENCE_LENGTH)):
move = game._take_step()
sequence.append(move)
if not game.is_game_over():
move_sequences.append(sequence)
i += 1
# we now have several move sequences that will take us to a fixed mid-game state - generate data for each of these
for idx, sequence in enumerate(move_sequences):
print("Generating data for move sequence %s" % (idx + 1))
for experiment in list(range(0, GAMES_PER_BOARD)):
game = Game(p1, p2)
for move in sequence: # bring the game to its mid-completed state
game.make_move(move)
# finish the game randomly and save
game.finish_game()
game_data = GameDataModel(game)
game_data.save()
# remove all the "junk" data that we don't need - this keeps the database from growing too large when long experiments are run
DB.purge_boards(purge)
print("Done!")
DB.close()
# full_game_experiment(10)
# mid_game_experiment(1, 15)
# late_game_experiment(75, 100) |
geobricks/geobricks_data_scripts | geobricks_data_scripts/dev/storage/data/delete/delete_storage_metadata.py | Python | gpl-2.0 | 217 | 0 | from geobricks_da | ta_scripts.dev.utils.data_manager_util import get_data_manager
data_manager = get_data_manager()
# TODO How to handle the | fact that is in storage?
data_manager.delete("mod13a2", True, False, False)
|
argaghulamahmad/MNK-Game | StartGame.py | Python | mit | 531 | 0.003766 | # Nama Mahasiswa | : Arga Ghulam Ahmad
# NPM Mahasiswa : 1606821601
# Mata Kuliah : Dasar Dasar Pemrograman 1
# Kelas : A
# Tugas : MNK Game
# Tanggal Deadline : 16 November 2016
# Interpreter : Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:18:55)
from GameMNK import *
from win32api import GetSystemMetrics
"""jalankan file ini di konsol untuk memulai MNK Game"""
try:
width = GetSystemMetrics( | 0)
height = GetSystemMetrics(1)
except:
width = 800
height = 600
GameMNK(width, height) |
adrienpacifico/openfisca-france | openfisca_france/scripts/generate_columns_tree.py | Python | agpl-3.0 | 6,397 | 0.005471 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate the columns tree from flat dictionary of columns.
When tree already exists, don't change location of columns that have already been placed in tree.
"""
import argparse
import collections
import logging
import os
import pprint
import sys
from openfisca_core import formulas
from openfisca_france import init_country, model
try:
from openfisca_france.model.datatrees import columns_name_tree_by_entity
except ImportError:
columns_name_tree_by_entity = collections.OrderedDict()
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
TaxBenefitSystem = init_country()
tax_benefit_system = TaxBenefitSystem()
class PrettyPrinter(pprint.PrettyPrinter):
"""Override pprint PrettyPrinter to correctly handle diacritical characters."""
def format(self, object, context, maxlevels, level):
if isinstance(object, unicode):
return ('u"""{}"""'.format(object.encode('utf8')), True, False)
return pprint.PrettyPrinter.format(self, object, context, maxlevels, level)
pretty_printer = PrettyPrinter()
def cleanup_tree(entity, tree):
children = []
for child in (tree.get('children') or []):
if isinstance(child, basestring):
# Child is a column name.
column = tax_benefit_system.column_by_name.get(child)
if column is not None and column.entity == entity and is_valid_input_column(column):
children.append(child)
else:
assert isinstance(child, dict), child
if child.get('label') != u'Autres':
child = cleanup_tree(entity, child)
if child is not None:
children.append(child)
if not children:
return None
tree = tree.copy()
tree['children'] = children
return tree
def is_valid_input_column(column):
return column.name not in ('age', 'age_en_mois', 'idfam', 'idfoy', 'idmen', 'quifam', 'quifoy', 'quimen') \
and issubclass(column.formula_class, formulas.SimpleFormula) and column.formula_class.function is None \
and not column.survey_only
def iter_placed_tree(tree):
assert tree.get('children'), tree
for child in tree['children']:
if isinstance(child, basestring):
# Child is a column name.
yield child
else:
if child.get('label') != u'Autres':
for column_name in iter_placed_tree(child):
yield column_name
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
global columns_name_tree_by_entity
columns_name_tree_by_entity = collections.OrderedDict(
(entity, columns_name_tree)
for entity, columns_name_tree in (
(entity1, cleanup_tree(entity1, columns_name_tree1))
for entity1, columns_name_tree1 in columns_name_tree_by_entity.iteritems()
)
if columns_name_tree is not None
)
placed_columns_name = set(
column_name
for columns_name_tree in columns_name_tree_by_entity.itervalues()
for column_name in iter_placed_tree(columns_name_tree)
)
for name, column in tax_benefit_system.column_by_name.iteritems():
if not is_valid_input_colum | n(column):
continue
if name | in placed_columns_name:
continue
placed_columns_name.add(name)
entity_children = columns_name_tree_by_entity.setdefault(column.entity, collections.OrderedDict()).setdefault(
'children', [])
if entity_children and entity_children[-1].get('label') == u'Autres':
last_entity_child = entity_children[-1]
else:
last_entity_child = collections.OrderedDict(label = u'Autres')
entity_children.append(last_entity_child)
last_entity_child.setdefault('children', []).append(name)
datatrees_module_path = os.path.join(os.path.dirname(model.__file__), 'datatrees.py')
with open(datatrees_module_path, 'w') as datatree_file:
datatree_file.write('''\
# -*- coding: utf-8 -*-
import collections
columns_name_tree_by_entity = collections.OrderedDict([
''')
for entity in ('ind', 'fam', 'foy', 'men'):
datatree_file.write(' ({}, '.format(pretty_printer.pformat(entity)))
write_tree(datatree_file, columns_name_tree_by_entity[entity])
datatree_file.write('),\n')
datatree_file.write(' ])\n')
return 0
def write_tree(tree_file, tree, level = 1):
tree_file.write('collections.OrderedDict([\n')
label = tree.get('label')
if label is not None:
tree_file.write(' ' * (level + 1))
tree_file.write("('label', {}),\n".format(pretty_printer.pformat(label)))
children = tree.get('children')
if children is not None:
tree_file.write(' ' * (level + 1))
tree_file.write("('children', [\n".format(pretty_printer.pformat(label)))
for child in children:
tree_file.write(' ' * (level + 2))
if isinstance(child, basestring):
tree_file.write(pretty_printer.pformat(child))
tree_file.write(',')
column = tax_benefit_system.column_by_name[child]
label = column.label
if label is not None:
label = label.strip() or None
if label == child:
label = None
if label is not None:
tree_file.write(' # ')
tree_file.write(column.label.strip().encode('utf-8'))
tree_file.write('\n')
else:
write_tree(tree_file, child, level = level + 2)
tree_file.write(',\n')
tree_file.write(' ' * (level + 2))
tree_file.write("]),\n")
tree_file.write(' ' * (level + 1))
tree_file.write('])')
if __name__ == "__main__":
sys.exit(main())
|
olavurmortensen/gensim | gensim/test/test_doc2vec.py | Python | lgpl-2.1 | 20,905 | 0.003639 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
from __future__ import with_statement
import logging
import unittest
import os
import tempfile
from six.moves import zip as izip
from collections import namedtuple
from testfixtures import log_capture
import numpy as np
from gensim import utils, matutils
from gensim.models import doc2vec
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class DocsLeeCorpus(object):
def __init__(self, string_tags=False):
self.string_tags = string_tags
def _tag(self, i):
return i if not self.string_tags else '_*%d' % i
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for i, line in enumerate(f):
yield doc2vec.TaggedDocument(utils.simple_preprocess(line), [self._tag(i)])
list_corpus = list(DocsLeeCorpus())
raw_sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.tst')
class TestDoc2VecModel(unittest.TestCase):
def test_persistence(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(testfile())
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
def test_load_mmap(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(sentences, min_count=1)
# test storing the internal arrays into separate files
model.save(testfile(), sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
# make sure mmaping the arrays back works, too
self.models_equal(model, doc2vec.Doc2Vec.load(testfile(), mmap='r'))
def test_int_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
def test_missing_string_doctag(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertRaises(KeyError, model.docvecs.__getitem__, 'not_a_tag')
def test_string_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs['_*0'].shape, (100,))
self.assertTrue(all(model.docvecs['_*0'] == model.docvecs[0]))
self.assertTrue(max(d.offset for d in model.docvecs.doctags.values()) < len(model.docvecs.doctags))
self.assertTrue(max(model.docvecs._int_index(str_key) for str_key in model.docvecs.doctags.keys()) < len(model.docvecs.doctag_syn0))
# verify docvecs.most_similar() returns string doctags rather than indexes
self.assertEqual(model.docvecs.offset2doctag[0], model.docvecs.most_similar([model.docvecs[0]])[0][0])
def test_empty_errors(self):
# no input => "RuntimeError: you must first build vocabulary before training the model"
self. | assertRaises(RuntimeError, doc2vec.Doc2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000)
def test_simil | arity_unseen_docs(self):
"""Test similarity of out of training sentences"""
rome_str = ['rome', 'italy']
car_str = ['car']
corpus = list(DocsLeeCorpus(True))
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertTrue(model.docvecs.similarity_unseen_docs(model, rome_str, rome_str) > model.docvecs.similarity_unseen_docs(model, rome_str, car_str))
def model_sanity(self, model):
"""Any non-trivial model on DocsLeeCorpus can pass these sanity checks"""
fire1 = 0 # doc 0 sydney fires
fire2 = 8 # doc 8 sydney fires
tennis1 = 6 # doc 6 tennis
# inferred vector should be top10 close to bulk-trained one
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.docvecs.most_similar([doc0_inferred], topn=len(model.docvecs))
f_rank = [docid for docid, sim in sims_to_infer].index(fire1)
self.assertLess(f_rank, 10)
# fire2 should be top30 close to fire1
sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs))
f2_rank = [docid for docid, sim in sims].index(fire2)
self.assertLess(f2_rank, 30)
# same sims should appear in lookup by vec as by index
doc0_vec = model.docvecs[fire1]
sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists
# sim results should be in clip range if given
clip_sims = model.docvecs.most_similar(fire1, clip_start=len(model.docvecs) // 2, clip_end=len(model.docvecs) * 2 // 3)
sims_doc_id = [docid for docid, sim in clip_sims]
for s_id in sims_doc_id:
self.assertTrue(len(model.docvecs) // 2 <= s_id <= len(model.docvecs) * 2 // 3)
# tennis doc should be out-of-place among fire news
self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1)
# fire docs should be closer than fire-tennis
self.assertTrue(model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1))
def test_training(self):
"""Test doc2vec training."""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20, workers=1)
model.build_vocab(corpus)
self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100))
model.train(corpus)
self.model_sanity(model)
# build vocab and train in one step; must be the same as above
model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20, workers=1)
self.models_equal(model, model2)
def test_dbow_hs(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_hs(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dms_hs(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1, negative=0,
|
noirbizarre/django-absolute | absolute/context_processors.py | Python | lgpl-3.0 | 703 | 0.004267 | # -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from django.conf import settings
def get_site_url(request, slash=False):
domain = Site.objects.get_current().domain
protocol = 'https' if request.is_secure() else 'http'
root = "%s://%s" % (protocol, domain)
if slash:
root += '/'
return root
def absolute(request):
urls = {
'ABSOLUTE_ROOT' | : request.build_absolute_uri('/')[:-1],
'ABSOLUTE_ROOT_URL': request.build_absolute_uri('/'),
}
if 'django.contrib.sites' in settings.INSTALLED_APPS:
| urls['SITE_ROOT'] = get_site_url(request)
urls['SITE_ROOT_URL'] = get_site_url(request, True)
return urls |
FederatedAI/FATE | python/federatedml/statistic/statics.py | Python | apache-2.0 | 24,470 | 0.002248 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
import math
import numpy as np
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.feature.binning.quantile_summaries import QuantileSummaries
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.statistic import data_overview
# from federatedml.statistic.feature_statistic import feature_statistic
from federatedml.util import LOGGER
from federatedml.util import consts
class SummaryStatistics(object):
def __init__(self, length, abnormal_list=None, stat_order=2, bias=True):
self.abnormal_list = abnormal_list
self.sum = np.zeros(length)
self.sum_square = np.zeros(length)
self.max_value = -np.inf * np.ones(length)
self.min_value = np.inf * np.ones(length)
self.count = np.zeros(length)
self.length = length
self.stat_order = stat_order
self.bias = bias
m = 3
while m <= stat_order:
exp_sum_m = np.zeros(length)
setattr(self, f"exp_sum_{m}", exp_sum_m)
m += 1
def add_rows(self, rows):
"""
When getting E(x^n), the formula are:
.. math::
(i-1)/i * S_{i-1} + 1/i * x_i
where i is the current count, and S_i is the current expectation of x
"""
#if self.abnormal_list is None:
if not self.abnormal_list:
rows = np.array(rows, dtype=float)
self.count += 1
self.sum += rows
self.sum_square += rows ** 2
self.max_value = np.max([self.max_value, rows], axis=0)
self.min_value = np.min([self.min_value, rows], axis=0)
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
# exp_sum_m += rows ** m
exp_sum_m = (self.count - 1) / self.count * exp_sum_m + rows ** m / self.count
setattr(self, f"exp_sum_{m}", exp_sum_m)
else:
filter_rows = []
filter_idx = []
for idx, value in enumerate(rows):
if value in self.abnormal_list or (isinstance(value, float) and np.isnan(value)):
continue
try:
value = float(value)
except ValueError as e:
raise ValueError(f"In add func, value should be either a numeric input or be listed in "
f"abnormal list. Error info: {e}")
filter_rows.append(value)
filter_idx.append(idx)
if not filter_idx:
return
filter_rows = np.array(filter_rows, dtype=float)
filter_idx = np.array(filter_idx)
self.count[filter_idx] += 1
self.sum[filter_idx] += filter_rows
self.sum_square[filter_idx] += filter_rows ** 2
self.max_value[filter_idx] = np.max([self.max_value[filter_idx], filter_rows], axis=0)
self.min_value[filter_idx] = np.min([self.min_value[filter_idx], filter_rows], axis=0)
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
# exp_sum_m[filter_idx] += filter_rows ** m
exp_sum_m[filter_idx] = (self.count[filter_idx] - 1) / self.count[filter_idx] * exp_sum_m[filter_idx] + \
filter_rows ** m / self.count[filter_idx]
setattr(self, f"exp_sum_{m}", exp_sum_m)
"""
for idx, value in enumerate(rows):
if value in self.abnormal_list:
continue
try:
value = float(value)
except ValueError as e:
raise ValueError(f"In add func, value should be either a numeric input or be listed in "
f"abnormal list. Error info: {e}")
self.count[idx] += 1
self.sum[idx] += value
self.sum_square[idx] += value ** 2
self.max_value[idx] = np.max([self.max_value[idx], value])
self.min_value[idx] = np.min([self.min_value[idx], value])
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
exp_sum_m[idx] = (self.count[idx] - 1) / self.count[idx] * \
exp_sum_m[idx] + rows[idx] ** m / self.count[idx]
setattr(self, f"exp_sum_{m}", exp_sum_m)
"""
def merge(self, other):
if self.stat_order != other.stat_order:
raise AssertionError("Two merging summary should have same order.")
self.sum += other.sum
self.sum_square += other.sum_square
self.max_value = np.max([self.max_value, other.max_value], axis=0)
self.min_value = np.min([self.min_value, other.min_value], axis=0)
for m in range(3, self.stat_order + 1):
sum_m_1 = getattr(self, f"exp_sum_{m}")
sum_m_2 = getattr(other, f"exp_sum_{m}")
exp_sum = (sum_m_1 * self.count + sum_m_2 * other.count) / (self.count + other.count)
setattr(self, f"exp_sum_{m}", exp_sum)
self.count += other.count
return self
"""
def summary(self):
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
for idx, cnt in enumerate(self.count):
if np.abs(cnt) < consts.FLOAT_ZERO:
continue
exp_sum_m[idx] /= cnt
setattr(self, f"exp_sum_{m}", exp_sum_m)
"""
@property
def mean(self):
return self.sum / self.count
@property
def max(self):
return self.max_value
@property
def min(self):
return self.min_value
@property
def variance(self):
mean = self.mean
variance = self.sum_square / self.count - mean ** 2
variance = np.array([x if math | .fabs(x) >= consts.FLOAT_ZERO else 0.0 for x in variance])
return variance
@property
def coefficient_of_variance(self):
mean = np.array([consts.FLOAT_ZERO if math.fabs(x) < consts.FLOAT_ZERO else x
for x in self.mean])
return np.fabs(self.stddev / mean)
@property
def stddev(self) | :
return np.sqrt(self.variance)
@property
def moment_3(self):
"""
In mathematics, a moment is a specific quantitative measure of the shape of a function.
where the k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
the 3rd central moment is often used to calculate the coefficient of skewness
"""
if self.stat_order < 3:
raise ValueError("The third order of expectation sum has not been statistic.")
exp_sum_2 = self.sum_square / self.count
exp_sum_3 = getattr(self, "exp_sum_3")
mu = self.mean
return exp_sum_3 - 3 * mu * exp_sum_2 + 2 * mu ** 3
@property
def moment_4(self):
"""
In mathematics, a moment is a specific quantitative measure of the shape of a function.
where the k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \ sum_{i = 1}^n (x_i - \bar{x})^k
the 4th central moment |
vasily-v-ryabov/pywinauto | examples/notepad_slow.py | Python | bsd-3-clause | 9,099 | 0.003737 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/cont | ributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Re | distributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run some automations to test things"""
from __future__ import unicode_literals
from __future__ import print_function
import os.path
import sys
import time
try:
from pywinauto import application
except ImportError:
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
sys.path.append(pywinauto_path)
from pywinauto import application
from pywinauto import tests
from pywinauto.findbestmatch import MatchError
from pywinauto.timings import Timings
print("Setting timings to slow settings, may be necessary for")
print("slow applications or slow machines.")
Timings.slow()
#application.set_timing(3, .5, 10, .5, .4, .2, .2, .1, .2, .5)
def run_notepad():
"""Run notepad and do some small stuff with it"""
start = time.time()
app = application.Application()
## for distribution we don't want to connect to anybodies application
## because we may mess up something they are working on!
#try:
# app.connect_(path = r"c:\windows\system32\notepad.exe")
#except application.ProcessNotFoundError:
# app.start_(r"c:\windows\system32\notepad.exe")
app.start(r"notepad.exe")
app.Notepad.menu_select("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app.PageSetupDlg.SizeComboBox.select(4)
# Select the 'Letter' combobox item or the Letter
try:
app.PageSetupDlg.SizeComboBox.select("Letter")
except ValueError:
app.PageSetupDlg.SizeComboBox.select('Letter (8.5" x 11")')
app.PageSetupDlg.SizeComboBox.select(2)
# run some tests on the Dialog. List of available tests:
# "AllControls",
# "AsianHotkey",
# "ComboBoxDroppedHeight",
# "CompareToRefFont",
# "LeadTrailSpaces",
# "MiscValues",
# "Missalignment",
# "MissingExtraString",
# "Overlapping",
# "RepeatedHotkey",
# "Translation",
# "Truncation",
bugs = app.PageSetupDlg.run_tests('RepeatedHotkey Truncation')
# if there are any bugs they will be printed to the console
# and the controls will be highlighted
tests.print_bugs(bugs)
# ----- Next Page Setup Dialog ----
app.PageSetupDlg.Printer.click()
# do some radio button clicks
# Open the Connect to printer dialog so we can
# try out checking/unchecking a checkbox
app.PageSetupDlg.Network.click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app.ConnectToPrinter.ExpandByDefault.check()
app.ConnectToPrinter.ExpandByDefault.uncheck()
# try doing the same by using click
app.ConnectToPrinter.ExpandByDefault.click()
app.ConnectToPrinter.ExpandByDefault.click()
# close the dialog
app.ConnectToPrinter.Cancel.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.Properties.click()
doc_props = app.window(title_re = ".*Properties$")
doc_props.wait('exists', timeout=40)
# ----- Document Properties Dialog ----
# some tab control selections
# Two ways of selecting tabs with indices...
doc_props.TabCtrl.select(0)
doc_props.TabCtrl.select(1)
try:
doc_props.TabCtrl.select(2)
except IndexError:
# not all users have 3 tabs in this dialog
print('Skip 3rd tab selection...')
# or with text...
doc_props.TabCtrl.select("PaperQuality")
try:
doc_props.TabCtrl.select("JobRetention")
except MatchError:
# some people do not have the "Job Retention" tab
print('Skip "Job Retention" tab...')
# doc_props.TabCtrl.select("Layout")
#
# # do some radio button clicks
# doc_props.RotatedLandscape.click()
# doc_props.BackToFront.click()
# doc_props.FlipOnShortEdge.click()
#
# doc_props.Portrait.click()
# doc_props._None.click()
# doc_props.FrontToBack.click()
#
# # open the Advanced options dialog in two steps
# advbutton = doc_props.Advanced
# advbutton.click()
#
# # close the 4 windows
#
# # ----- Advanced Options Dialog ----
# app.window(title_re = ".* Advanced Options").Ok.click()
# ----- Document Properties Dialog again ----
doc_props.Cancel.close_click()
# for some reason my current printer driver
# window does not close cleanly :(
if doc_props.Cancel.Exists():
doc_props.OK.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.OK.close_click()
# ----- Page Setup Dialog ----
app.PageSetupDlg.Ok.close_click()
# type some text - note that extended characters ARE allowed
app.Notepad.Edit.set_edit_text("I am typing s\xe4me text to Notepad\r\n\r\n"
"And then I am going to quit")
app.Notepad.Edit.right_click()
app.Popup.menu_item("Right To Left Reading Order").click()
#app.PopupMenu.menu_select("Paste", app.Notepad.ctrl_())
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#app.PopupMenu.menu_select("Show unicode control characters", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Insert Unicode control character -> IAFS", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.type_keys("{ESC}")
# the following shows that Sendtext does not accept
# accented characters - but does allow 'control' characters
app.Notepad.Edit.type_keys("{END}{ENTER}SendText d\xf6\xe9s "
u"s\xfcpp\xf4rt \xe0cce\xf1ted characters!!!", with_spaces = True)
# Try and save
app.Notepad.menu_select("File->SaveAs")
app.SaveAs.EncodingComboBox.select("UTF-8")
app.SaveAs.FileNameEdit.set_edit_text("Example-utf8.txt")
app.SaveAs.Save.close_click()
# my machine has a weird problem - when connected to the network
# the SaveAs Dialog appears - but doing anything with it can
# cause a LONG delay - the easiest thing is to just wait
# until the dialog is no longer active
# - Dialog might just b |
snlab/alto-server | plugins/autoird/basicird.py | Python | apache-2.0 | 542 | 0.00369 | #!/usr/bin/env python3
class SimpleIRD():
"""
"""
def __init__(self, get_meta = None, **kargs):
self.get_meta = get_meta
for name, method in kargs.items():
setattr(self, name, method)
def get_capabilities(self):
return self.get_meta().get('capabilities', {})
def get_uses(self):
r | eturn self.get_meta().get('uses', [])
def get_resource_id(self):
return self.get_meta().get(' | resource-id')
def get_service(self):
return self.get_meta().get('output')
|
ahmad88me/PyGithub | tests/Deployment.py | Python | lgpl-3.0 | 3,609 | 0.007204 | ############################ Copyrights and license ############################
# #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2020 Pascal Hofmann <mail@pascalhofmann.de> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# | any later version. #
# #
# PyGithub is distributed in the | hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
from . import Framework
class Deployment(Framework.TestCase):
def setUp(self):
super().setUp()
self.deployment = (
self.g.get_user().get_repo("PyGithub").get_deployment(263877258)
)
def testAttributes(self):
self.assertEqual(self.deployment.id, 263877258)
self.assertEqual(
self.deployment.url,
"https://api.github.com/repos/jacquev6/PyGithub/deployments/263877258",
)
self.assertEqual(
self.deployment.sha, "743f5a58b0bce91c4eab744ff7e39dfca9e6e8a5"
)
self.assertEqual(self.deployment.task, "deploy")
self.assertEqual(self.deployment.payload, {"test": True})
self.assertEqual(self.deployment.original_environment, "test")
self.assertEqual(self.deployment.environment, "test")
self.assertEqual(self.deployment.description, "Test deployment")
self.assertEqual(self.deployment.creator.login, "jacquev6")
created_at = datetime.datetime(2020, 8, 26, 11, 44, 53)
self.assertEqual(self.deployment.created_at, created_at)
self.assertEqual(self.deployment.updated_at, created_at)
self.assertEqual(self.deployment.transient_environment, True)
self.assertEqual(self.deployment.production_environment, False)
self.assertEqual(
self.deployment.statuses_url,
"https://api.github.com/repos/jacquev6/PyGithub/deployments/263877258/statuses",
)
self.assertEqual(
self.deployment.repository_url,
"https://api.github.com/repos/jacquev6/PyGithub",
)
self.assertEqual(
repr(self.deployment),
'Deployment(url="https://api.github.com/repos/jacquev6/PyGithub/deployments/263877258", id=263877258)',
)
|
PullRequestFive/CmdrData | cmdrdata/cmdrdata.py | Python | mit | 1,690 | 0.012426 | import logging
import message
import os
import random
import datetime
import time
import collections
from google.appengine.ext.webapp import template
try: import simplejson as json
except ImportError: import json
from abstract_app import AbstractApp
class CmdrData(AbstractApp):
# A user who has authorized your app has checked in. This runs inside
# AppEngine's task queue, and contains the check-in payload for you to
# process.
def checkinTaskQueue(self, client, checkin_json):
logging.debug('Current checkin: %s', checkin_json)
user_id = checkin_json['user']['id']
categories = checkin_json['venue'][' | categories']
categ | ory_name = find_primary_category(categories)
now = datetime.datetime.now()
tsd = datetime.timedelta(days=7)
t = now - tsd
epoch_seconds = int(time.mktime(t.timetuple()))
limit = 100
parameters = {'limit': limit,
'afterTimestamp': epoch_seconds}
week_checkins = client.users.checkins(user_id, parameters)
logging.debug('Received the following JSON response from 4sq: %s', week_checkins)
checkins = week_checkins['checkins']['items']
frequency = collections.defaultdict(int)
for c in checkins:
categories = c['venue']['categories']
frequency[find_primary_category(categories)] += 1
message_text = message.GetText(category_name, frequency[category_name])
if message_text:
self.makeContentInfo(
checkin_json = checkin_json,
content = json.dumps({}),
text = message_text,
reply = True)
def find_primary_category(categories):
for category in categories:
if category['primary']:
return category['name']
|
OCA/bank-payment | account_payment_order_vendor_email/__manifest__.py | Python | agpl-3.0 | 634 | 0 | # Copyright (C) 2020 Open Source Integrators
# License AGPL-3.0 or later (http: | //www.gnu.org/licenses/agpl).
{
"name": "Account Payment Order Email",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "Open Source Integrators, Odoo Community Association (OCA)",
"maintainers": [],
"website": "https://github.com/OCA/bank-payment",
"category": "Accounting",
"depends": ["account_payment_order", "account_payment_mode"],
"data": [
"data/mail_template.xml",
| "views/account_payment_mode_view.xml",
"views/account_payment_order_view.xml",
],
"installable": True,
}
|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.6/Lib/lib2to3/fixes/fix_import.py | Python | mit | 1,817 | 0.001101 | """Fixer for import statements.
If spam is being imported from the local directory, this import:
from spam import eggs
Becomes:
from .spam import eggs
And this import:
import spam
Becomes:
from . import spam
"""
# Local imports
from .. import fixer_base
from os.path import dirname, join, exists, pathsep
from ..fixer_util import FromImport
class FixImport(fixer_base.BaseFix):
PATTERN = """
import_from< type='from' imp=any 'impo | rt' any >
|
import_name< type='import' imp=any >
"""
def transform(self, node, results):
imp = results['imp']
if unicode(imp).startswith('.'):
# Already a new-style import
return
if not probably_a_local_import(unicode(imp), self.filename):
# I guess this is a global import -- skip it!
return
if results['type'].v | alue == 'from':
# Some imps are top-level (eg: 'import ham')
# some are first level (eg: 'import ham.eggs')
# some are third level (eg: 'import ham.eggs as spam')
# Hence, the loop
while not hasattr(imp, 'value'):
imp = imp.children[0]
imp.value = "." + imp.value
node.changed()
else:
new = FromImport('.', getattr(imp, 'content', None) or [imp])
new.set_prefix(node.get_prefix())
node = new
return node
def probably_a_local_import(imp_name, file_path):
# Must be stripped because the right space is included by the parser
imp_name = imp_name.split('.', 1)[0].strip()
base_path = dirname(file_path)
base_path = join(base_path, imp_name)
for ext in ['.py', pathsep, '.pyc', '.so', '.sl', '.pyd']:
if exists(base_path + ext):
return True
return False
|
termie/nova-migration-demo | nova/api/openstack/extensions.py | Python | apache-2.0 | 15,632 | 0.001215 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import sys
import routes
import webob.dec
import webob.exc
from nova import exception
from nova import flags
from nova import log as logging
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
LOG = logging.getLogger('extensions')
FLAGS = flags.FLAGS
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
def get_name(self):
"""The name of the extension.
e.g. 'Fox In Socks'
"""
raise NotImplementedError()
def get_alias(self):
"""The alias for the extension.
e.g. 'FOXNSOX'
"""
raise NotImplementedError()
def get_description(self):
"""Friendly description for the extension.
e.g. 'The Fox In Socks Extension'
"""
| raise NotImplementedError()
def get_namespace(self):
"""The XML namespace for the extension.
e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
"""
raise NotImplementedError()
def get_updated(self):
"""The timestamp when the extension was last updated.
e.g. '2011-01-22T13:25:27-06:00'
"""
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
| raise NotImplementedError()
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_actions(self):
"""List of extensions.ActionExtension extension objects.
Actions are verbs callable from the API.
"""
actions = []
return actions
def get_response_extensions(self):
"""List of extensions.ResponseExtension extension objects.
Response extensions are used to insert information into existing
response data.
"""
response_exts = []
return response_exts
class ActionExtensionController(common.OpenstackController):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, req, id):
input_dict = self._deserialize(req.body, req.get_content_type())
for action_name, handler in self.action_handlers.iteritems():
if action_name in input_dict:
return handler(input_dict, req, id)
# no action handler found (bump to downstream application)
res = self.application
return res
class ResponseExtensionController(common.OpenstackController):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, req, *args, **kwargs):
res = req.get_response(self.application)
content_type = req.best_match_content_type()
# currently response handlers are un-ordered
for handler in self.handlers:
res = handler(res)
try:
body = res.body
headers = res.headers
except AttributeError:
default_xmlns = None
body = self._serialize(res, content_type, default_xmlns)
headers = {"Content-Type": content_type}
res = webob.Response()
res.body = body
res.headers = headers
return res
class ExtensionController(common.OpenstackController):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['namespace'] = ext.get_namespace()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, req, id):
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
return self._translate(ext)
def delete(self, req, id):
raise faults.Fault(webob.exc.HTTPNotFound())
def create(self, req):
raise faults.Fault(webob.exc.HTTPNotFound())
class ExtensionMiddleware(wsgi.Middleware):
"""Extensions middleware for WSGI."""
@classmethod
def factory(cls, global_config, **local_config):
"""Paste factory."""
def _factory(app):
return cls(app, **local_config)
return _factory
def _action_ext_controllers(self, application, ext_mgr, mapper):
"""Return a dict of ActionExtensionController-s by collection."""
action_controllers = {}
for action in ext_mgr.get_actions():
if not action.collection in action_controllers.keys():
controller = ActionExtensionController(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
action_controllers[action.collection] = controller
return action_controllers
def _response_ext_controllers(self, application, ext_mgr, mapper):
"""Returns a dict of ResponseExtensionController-s by collection."""
response_ext_controllers = {}
for resp_ext in ext_mgr.get_response_extensions():
if not resp_ext.key in response_ext_controllers.keys():
controller = ResponseExtensionController(application)
mapper.connect(resp_ext.url_route + '.:(format)',
action='process',
controller=controller,
conditions=resp_ext.conditions)
mapper.connect(resp_ext.url_route,
action='process',
controller=controller,
conditions=resp_ext.conditions)
response_ext_controllers[resp_ext.key] = controller
return response_ext_controllers
def __init__(self, application, ext_mgr=None):
if ext_mgr is None:
ext_mgr = ExtensionManager(FLAGS.osapi_extensions_path)
self.ext_mgr = ext_mgr
mapper = routes.Mapper()
# extended resources
for resource in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'),
resource.collection)
|
alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/_xmlplus/dom/html/HTMLDListElement.py | Python | agpl-3.0 | 1,396 | 0.007163 | ########################################################################
#
# File Name: HTMLDListElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.do | m import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLDListElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="DL"):
HTMLElement.__init__(self, ownerDocument, nodeN | ame)
### Attribute Methods ###
def _get_compact(self):
return self.hasAttribute("COMPACT")
def _set_compact(self, value):
if value:
self.setAttribute("COMPACT", "COMPACT")
else:
self.removeAttribute("COMPACT")
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"compact" : _get_compact
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"compact" : _set_compact
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
artfwo/aiosc | aiosc.py | Python | mit | 6,724 | 0.00238 | # aiosc - a minimalistic OSC communication module using asyncio
#
# Copyright (c) 2014 Artem Popov <artfwo@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import asyncio
import re
import struct
def singleton(cls):
instance = cls()
instance.__call__ = lambda: instance
return instance
@singleton
class Impulse:
pass
OSC_ADDR_REGEXP = '[^ #*,/?[\]{}]'
OSC_ADDR_SLASH_REGEXP = '[^ #*,?[\]{}]'
# translate osc address pattern to regexp for use in message handlers
def translate_pattern(pattern):
result = ''
i = 0
while i < len(pattern):
c = pattern[i]
if c == '/':
j = i + 1
if j < len(pattern) and pattern[j] == '/':
result += OSC_ADDR_SLASH_REGEXP + '*\/'
i = j
else:
result += re.escape(c)
elif c == '?':
result += OSC_ADDR_REGEXP
elif c == '*':
result += OSC_ADDR_REGEXP + '*'
elif c == '[':
j = pattern.index(']', i)
sub = pattern[i+1:j]
result += '['
if sub.startswith('!'):
sub = sub[1:]
result += '^'
result += '-'.join([re.escape(s) for s in sub.split('-')])
result += ']'
i = j
elif c == '{':
j = pattern.index('}', i)
sub = pattern[i+1:j]
result += '('
result += '|'.join([re.escape(s) for s in sub.split(',')])
result += ')'
i = j
else:
result += re.escape(c)
i += 1
return '^' + result + '$'
# read padded string from the beginning of a packet and return (value, tail)
def read_string(packet):
actual_len = packet.index(b'\x00')
padded_len = (actual_len // 4 + 1) * 4
return str(packet[:actual_len], 'ascii'), packet[padded_len:]
# read padded blob from the beginning of a packet and return (value, tail)
def read_blob(packet):
actual_len, tail = struct.unpack('>I', packet[:4])[0], packet[4:]
padded_len = (actual_len // 4 + 1) * 4
return tail[:padded_len][:actual_len], tail[padded_len:]
def parse_message(packet):
if packet.startswith(b'#bundle'):
raise NotImplementedError('OSC bundles are not yet supported')
tail = packet
path, tail = read_string(tail)
type_tag, tail = read_string(tail)
args = []
for t in type_tag[1:]:
if t == 'i':
len = 4
value, tail = struct.unpack('>i', tail[:len])[0], tail[len:]
elif t == 'f':
len = 4
value, tail = struct.unpack('>f', tail[:len])[0], tail[len:]
elif t == 'h':
len = 8
value, tail = struct.unpack('>q' | , tail[:len])[0], tail[len:]
elif t == 's':
value | , tail = read_string(tail)
elif t == 'b':
value, tail = read_blob(tail)
elif t == 'T':
value = True
elif t == 'F':
value = False
elif t == 'N':
value = None
elif t == 'I':
value = Impulse
else:
raise RuntimeError('Unable to parse type "{}"'.format(t))
args.append(value)
return (path, args)
# convert string to padded osc string
def pack_string(s):
b = bytes(s + '\x00', 'ascii')
if len(b) % 4 != 0:
width = (len(b) // 4 + 1) * 4
b = b.ljust(width, b'\x00')
return b
# convert bytes to padded osc blob
def pack_blob(b):
b = bytes(struct.pack('>I', len(b)) + b)
if len(b) % 4 != 0:
width = (len(b) // 4 + 1) * 4
b = b.ljust(width, b'\x00')
return b
def pack_message(path, *args):
result = b''
typetag = ','
for arg in args:
if type(arg) == int:
result += struct.pack('>i', arg)
typetag += 'i'
elif type(arg) == float:
result += struct.pack('>f', arg)
typetag += 'f'
elif type(arg) == str:
result += pack_string(arg)
typetag += 's'
elif type(arg) == bytes:
result += pack_blob(arg)
typetag += 'b'
elif type(arg) == bool:
typetag += 'T' if arg else 'F'
elif arg is Impulse:
typetag += 'I'
elif arg is None:
typetag += 'N'
else:
raise NotImplementedError('Unable to pack {}'.format(type(arg)))
result = pack_string(path) + pack_string(typetag) + result
if len(result) % 4 != 0:
width = (len(result) // 4 + 1) * 4
result = result.ljust(width, b'\x00')
return result
class OSCProtocol(asyncio.DatagramProtocol):
def __init__(self, handlers=None):
super().__init__()
self._handlers = []
if handlers:
for pattern, handler in handlers.items():
self.add_handler(pattern, handler)
def add_handler(self, pattern, handler):
pattern_re = re.compile(translate_pattern(pattern))
self._handlers.append((pattern_re, handler))
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
path, args = parse_message(data)
# dispatch the message
for pattern_re, handler in self._handlers:
if pattern_re.match(path):
handler(addr, path, *args)
def send(self, path, *args, addr=None):
return self.transport.sendto(pack_message(path, *args), addr=addr)
async def send(target, path, *args, loop=None):
loop = asyncio.get_running_loop()
transport, protocol = await loop.create_datagram_endpoint(OSCProtocol, remote_addr=target)
protocol.send(path, *args)
transport.close()
|
joaormatos/anaconda | tools/hfatiler/setup.py | Python | gpl-3.0 | 349 | 0.002865 | import sys
sys.path.append('../../../')
from cx_Freeze import setup, Executable
build_options = {'packages': ['PIL', 'mmfparser'],
'excludes': ['tcl', 'tk', 'Tkinter']}
executables = [
Executable('hfatiler.py', base='Console', targetName='hfatiler.e | xe')
]
s | etup(options={'build_exe': build_options}, executables=executables)
|
pythonanywhere/helper_scripts | tests/test_api_base.py | Python | mit | 1,981 | 0.003534 | from unittest.mock import patch
import pytest
import responses
from pythonanywhere.api.base import AuthenticationError, call_api, get_api_endpoint
class TestGetAPIEndpoint:
def test_defaults_to_pythonanywhere_dot_com_if_no_environment_variables(self):
assert get_api_endpoint() == "https://www.pythonanywhere.com/api/v0/user/{username}/{flavor}/"
def test_gets_domain_from_pythonanywhere_site_and_ignores_pythonanywhere_domain_if_both_set(self, monkeypatch):
monkeypatch.setenv("PYTHONANYWHERE_SITE", "www.foo.com")
monkeypatch.setenv("PYTHONANYWHERE_DOMAIN", "wibble.com")
assert get_api_endpoint() == "https://www.foo.com/api/v0/user/{username}/{flavor}/"
def test_gets_domain_from_pythonanywhere_domain_and_adds_on_www_if_set_but_no_pythonanywhere_site(
self, monkeypatch
):
monkeypatch.setenv("PYTHONANYWHERE_DOMAIN", "foo.com")
assert get_api_endpoint() == "https://www.foo.com/api/v0/user/{username}/{flavor}/"
class TestCallAPI:
def test_ | raises_on_401(self, api_token, api_responses):
url = "https://foo.com/"
api_responses.add(responses.POST, url, status=401, body="nope")
with pytest.raises(AuthenticationError) as e:
call_api(url, "post")
assert str(e.value) == "Authentication error 401 calling API: nope"
def test_passes_verify_from_environment | (self, api_token, monkeypatch):
monkeypatch.setenv("PYTHONANYWHERE_INSECURE_API", "true")
with patch("pythonanywhere.api.base.requests") as mock_requests:
call_api("url", "post", foo="bar")
args, kwargs = mock_requests.request.call_args
assert kwargs["verify"] is False
def test_verify_is_true_if_env_not_set(self, api_token):
with patch("pythonanywhere.api.base.requests") as mock_requests:
call_api("url", "post", foo="bar")
args, kwargs = mock_requests.request.call_args
assert kwargs["verify"] is True
|
mehta-raghav/fred-pygtav | screengrab.py | Python | gpl-3.0 | 1,245 | 0.009639 | import cv2
import time
import numpy as np
import win32gui, win32ui, win32con, win32api
hwin = win32gui.FindWindow(None,'Grand Theft Auto V')#it gets the process ID or as microsoft calls it 'window handle'
rect = win32gui.GetWindowRect(hwin)
def grab_screen():
while(True):
x = rect[0]
| y = rect[1]
left = 0
top = 40
height = rect[3] - y - top
width = rect[2] - x
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompa | tibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height,width,4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
#print('loop took {} seconds'.format(time.time()-last_time))
#last_time = time.time()
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
techtonik/pip | tests/lib/test_lib.py | Python | mit | 1,893 | 0 | """Test the test support."""
from __future__ import absolute_import
import filecmp
import re
from os.path import isdir, join
from tests.lib import SRC_DIR
def test_tmp_dir_exists_in_env(script):
"""
Test that $TMPDIR == env.temp_path and path exists and env.assert_no_temp()
passes (in fast env)
"""
# need these tests to ensure the assert_no_temp feature of scripttest is
# working
script.assert_no_ | temp() # this fails if env.tmp_path doesn't exist
assert script.environ['TMPDIR'] == script.temp_path
assert isdir(script.temp_path)
def test_correct_pip_version(script):
"""
Check we are running proper version of pip in run_pip.
"""
# output is like:
| # pip PIPVERSION from PIPDIRECTORY (python PYVERSION)
result = script.pip('--version')
# compare the directory tree of the invoked pip with that of this source
# distribution
pip_folder_outputed = re.match(
r'pip \d+(\.[\d]+)+(\.?(b|rc|dev|pre|post)\d+)? from (.*) '
r'\(python \d(.[\d])+\)$',
result.stdout
).group(4)
pip_folder = join(SRC_DIR, 'src', 'pip')
diffs = filecmp.dircmp(pip_folder, pip_folder_outputed)
# If any non-matching .py files exist, we have a problem: run_pip
# is picking up some other version! N.B. if this project acquires
# primary resources other than .py files, this code will need
# maintenance
mismatch_py = [
x for x in diffs.left_only + diffs.right_only + diffs.diff_files
if x.endswith('.py')
]
assert not mismatch_py, (
'mismatched source files in %r and %r: %r' %
(pip_folder, pip_folder_outputed, mismatch_py)
)
def test_as_import(script):
""" test that pip.__init__.py does not shadow
the command submodule with a dictionary
"""
import pip._internal.commands.install as inst
assert inst is not None
|
sqlalchemy/alembic | alembic/ddl/sqlite.py | Python | mit | 6,734 | 0 | import re
from typing import Any
from typing import Dict
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import cast
from sqlalchemy import JSON
from sqlalchemy import schema
from sqlalchemy import sql
from .impl import DefaultImpl
from .. import util
if TYPE_CHECKING:
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.elements import Cast
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.type_api import TypeEngine
from ..operations.batch import BatchOperationsImpl
class SQLiteImpl(DefaultImpl):
__dialect__ = "sqlite"
transactional_ddl = False
"""SQLite supports transactional DDL, but pysqlite does not:
see: http://bugs.python.org/issue10740
"""
def requires_recreate_in_batch(
self, batch_op: "BatchOperationsImpl"
) -> bool:
"""Return True if the given :class:`.BatchOperationsImpl`
would need the table to be recreated and copied in order to
proceed.
Normally, only returns True on SQLite when operations other
than add_column are present.
"""
for op in batch_op.batch:
if op[0] == "add_column":
col = op[1][1]
if isinstance(
col.server_default, schema.DefaultClause
) and isinstance(col.server_default.arg, sql.ClauseElement):
return True
elif (
isinstance(col.server_default, util.sqla_compat.Computed)
and col.server_default.persisted
):
return True
elif op[0] not in ("create_index", "drop_index"):
return True
else:
return False
def add_constraint(self, const: "Constraint"):
# attempt to distinguish between an
# auto-gen constraint and an explicit one
if const._create_rule is None: # type:ignore[attr-defined]
raise NotImplementedError(
"No support for ALTER of constraints in SQLite dialect. "
"Please refer to the batch mode feature which allows for "
"SQLite migrations using a copy-and-move strategy."
)
elif const._create_rule(self): # type:ignore[attr-defined]
util.warn(
"Skipping unsupported ALTER for "
"creation of implicit constraint. "
"Please refer to the batch mode feature which allows for "
"SQLite migrations using a copy-and-move strategy."
)
def drop_constrai | nt(self, const: "Constraint"):
if const._create_rule is None: # type:ignore[attr-defined]
raise NotImplementedError(
"No support for ALTER of constraints in SQLite dialect. "
"Please refer to the batch mode feature which allows for "
"SQLite migrations using a copy-and-move strategy."
)
def compare_server_default(
self,
inspector_column: "Column",
metadata_column: "Column",
| rendered_metadata_default: Optional[str],
rendered_inspector_default: Optional[str],
) -> bool:
if rendered_metadata_default is not None:
rendered_metadata_default = re.sub(
r"^\((.+)\)$", r"\1", rendered_metadata_default
)
rendered_metadata_default = re.sub(
r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
)
if rendered_inspector_default is not None:
rendered_inspector_default = re.sub(
r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
)
return rendered_inspector_default != rendered_metadata_default
def _guess_if_default_is_unparenthesized_sql_expr(
self, expr: Optional[str]
) -> bool:
"""Determine if a server default is a SQL expression or a constant.
There are too many assertions that expect server defaults to round-trip
identically without parenthesis added so we will add parens only in
very specific cases.
"""
if not expr:
return False
elif re.match(r"^[0-9\.]$", expr):
return False
elif re.match(r"^'.+'$", expr):
return False
elif re.match(r"^\(.+\)$", expr):
return False
else:
return True
def autogen_column_reflect(
self,
inspector: "Inspector",
table: "Table",
column_info: Dict[str, Any],
) -> None:
# SQLite expression defaults require parenthesis when sent
# as DDL
if self._guess_if_default_is_unparenthesized_sql_expr(
column_info.get("default", None)
):
column_info["default"] = "(%s)" % (column_info["default"],)
def render_ddl_sql_expr(
self, expr: "ClauseElement", is_server_default: bool = False, **kw
) -> str:
# SQLite expression defaults require parenthesis when sent
# as DDL
str_expr = super(SQLiteImpl, self).render_ddl_sql_expr(
expr, is_server_default=is_server_default, **kw
)
if (
is_server_default
and self._guess_if_default_is_unparenthesized_sql_expr(str_expr)
):
str_expr = "(%s)" % (str_expr,)
return str_expr
def cast_for_batch_migrate(
self,
existing: "Column",
existing_transfer: Dict[str, Union["TypeEngine", "Cast"]],
new_type: "TypeEngine",
) -> None:
if (
existing.type._type_affinity # type:ignore[attr-defined]
is not new_type._type_affinity # type:ignore[attr-defined]
and not isinstance(new_type, JSON)
):
existing_transfer["expr"] = cast(
existing_transfer["expr"], new_type
)
# @compiles(AddColumn, 'sqlite')
# def visit_add_column(element, compiler, **kw):
# return "%s %s" % (
# alter_table(compiler, element.table_name, element.schema),
# add_column(compiler, element.column, **kw)
# )
# def add_column(compiler, column, **kw):
# text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
# need to modify SQLAlchemy so that the CHECK associated with a Boolean
# or Enum gets placed as part of the column constraints, not the Table
# see ticket 98
# for const in column.constraints:
# text += compiler.process(AddConstraint(const))
# return text
|
vileopratama/vitech | src/openerp/addons/base/tests/test_view_validation.py | Python | mit | 3,424 | 0.000876 | # This test can be run stand-alone with something like:
# > PYTHONPATH=. python2 openerp/tests/test_view_validation.py
from lxml import etree
from StringIO import StringIO
import unittest
from openerp.tools.view_validation import (valid_page_in_book, valid_att_in_form, valid_type_in_colspan,
valid_type_in_col, valid_att_in_field, valid_att_in_label,
valid_field_in_graph, valid_field_in_tree
)
invalid_form = etree.parse(StringIO('''\
<form>
<label></label>
<group>
<div>
<page></page>
<label colspan="True"></label>
<field></field>
</div>
</group>
<notebook>
<page>
<group col="Two">
<div>
<label></label>
<field colspan="Five"> </field>
</div>
</group>
</page>
</notebook>
</form>
''')).getroot()
valid_form = etree.parse(StringIO('''\
<form string="">
<field name=""></field>
<field name=""></field>
<notebook>
<page>
<field name=""></field>
<label string=""></label>
<field name=""></field>
</page>
<page>
<group colspan="5" col="2">
<label for=""></label>
<label string="" colspan="5"></label>
</group>
</page>
</notebook>
</form>
''')).getroot()
invalid_graph = etree.parse(StringIO('''\
<graph>
<label/>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</graph>
''')).getroot()
valid_graph = etree.parse(StringIO('''\
<graph string="">
<field name=""></field>
<field name=""></field>
</graph>
''')).getroot()
invalid_tree = etree.parse(StringIO('''\
<tree>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</tree>
''')).getroot()
valid_tree = etree.parse(StringIO('''\
<tree string="">
<field name=""></field>
<field name=""></field>
<button/>
<field name=""></field>
</tree>
''')).getroot()
cla | ss test_view_validation(unitte | st.TestCase):
""" Test the view validation code (but not the views themselves). """
def test_page_validation(self):
assert not valid_page_in_book(invalid_form)
assert valid_page_in_book(valid_form)
def test_all_field_validation(self):
assert not valid_att_in_field(invalid_form)
assert valid_att_in_field(valid_form)
def test_all_label_validation(self):
assert not valid_att_in_label(invalid_form)
assert valid_att_in_label(valid_form)
def test_form_string_validation(self):
assert valid_att_in_form(valid_form)
def test_graph_validation(self):
assert not valid_field_in_graph(invalid_graph)
assert valid_field_in_graph(valid_graph)
def test_tree_validation(self):
assert not valid_field_in_tree(invalid_tree)
assert valid_field_in_tree(valid_tree)
def test_colspan_datatype_validation(self):
assert not valid_type_in_colspan(invalid_form)
assert valid_type_in_colspan(valid_form)
def test_col_datatype_validation(self):
assert not valid_type_in_col(invalid_form)
assert valid_type_in_col(valid_form)
if __name__ == '__main__':
unittest.main()
|
stonebig/bokeh | examples/reference/models/Cross.py | Python | bsd-3-clause | 775 | 0.00129 | import numpy as np
from bokeh.models import ColumnData | Source, Plot, LinearAxis, Grid
from bokeh.models.markers import Cross
from bokeh.io import curdoc, show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = Cross(x="x", y="y", size="sizes", line_color="#e6550d", fill_color=None, line_width=2)
plot.add_glyph(source, glyph)
xaxis = L | inearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
|
nickmckay/LiPD-utilities | Python/lipd/bag.py | Python | gpl-2.0 | 2,447 | 0.001635 | import bagit
from .loggers import create_logger
logger_bagit = create_logger('bag')
def create_bag(dir_bag):
"""
Create a Bag out of given files.
:param str dir_bag: Directory that contains csv, jsonld, and changelog files.
:return obj: Bag
"""
logger_bagit.info("enter create_bag")
# if not dir_bag:
# dir_bag = os.getcwd()
try:
bag = bagit.make_bag(dir_bag, {'Name': 'LiPD Project', 'Reference': 'www.lipds.net', 'DOI-Resolved': 'True'})
logger_bagit.info("created bag")
return bag
except FileNotFoundError as e:
print("Error: directory not found to create bagit")
logger_bagit.debug("create_bag: FileNotFoundError: failed to create bagit, {}".format(e))
except Exception as e:
print("Error: failed to create bagit bag")
logger_bagit.debug("create_bag: Exception: failed to create bag, {}".format(e))
return None
def open_bag(dir_bag):
"""
Open Bag at the given path
:param str dir_bag: Path to Bag
:return obj: Bag
"""
logger_bagit.info("enter open_bag")
try:
bag = bagit.Bag(dir_bag)
logger_bagit.info("opened bag")
return bag
except Exception as e:
print("Error: failed to open bagit bag")
logger_bagit.debug("failed to open bag, { | }".format(e))
return None
def validate_md5(bag):
"""
Check if Bag is valid
:param obj bag: Bag
:return None:
"""
logger_bagit.info("validate_md5")
if bag.is_valid():
print("Valid md5")
# for path, fixity in bag.entries.items():
# print("path:{}\nmd5:{}\n".format(path, fixity["md5"])) |
else:
print("Invalid md5")
logger_bagit.debug("invalid bag")
return
def resolved_flag(bag):
"""
Check DOI flag in bag.info to see if doi_resolver has been previously run
:param obj bag: Bag
:return bool: Flag
"""
if 'DOI-Resolved' in bag.info:
logger_bagit.info("bagit resolved_flag: true")
return True
logger_bagit.info("bagit resolved_flag: false")
return False
def finish_bag(dir_bag):
"""
Closing steps for creating a bag
:param obj dir_bag:
:return None:
"""
logger_bagit.info("enter finish_bag")
# Create a bag for the 3 files
new_bag = create_bag(dir_bag)
open_bag(dir_bag)
new_bag.save(manifests=True)
logger_bagit.info("exit finish_bag")
return
|
alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/sbml/TestSpecies.py | Python | gpl-3.0 | 8,165 | 0.030496 | #
# @file TestSpecies.py
# @brief Species unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSpecies.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSpecies(unittest.TestCase):
global S
S = None
def setUp(self):
self.S = libsbml.Species(2,4)
if (self.S == None):
pass
pass
def tearDown(self):
_dummyList = [ self.S ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_create(self):
self.assert_( self.S.getTypeCode() == libsbml.SBML_SPECIES )
self.assert_( self.S.getMetaId() == "" )
self.assert_( self.S.getNotes() == None )
self.assert_( self.S.getAnnotation() == None )
self.assert_( self.S.getId() == "" )
self.assert_( self.S.getName() == "" )
self.assert_( self.S.getCompartment() == "" )
self.assert_( self.S.getInitialAmount() == 0.0 )
self.assert_( self.S.getInitialConcentration() == 0.0 )
self.assert_( self.S.getSubstanceUnits() == "" )
self.assert_( self.S.getSpatialSizeUnits() == "" )
self.assert_( self.S.getHasOnlySubstanceUnits() == False )
self.assert_( self.S.getBoundaryCondition() == False )
self.assert_( self.S.getCharge() == 0 )
self.assert_( self.S.getConstant() == False )
self.assertEqual( False, self.S.isSetId() )
self.assertEqual( False, self.S.isSetName() )
self.assertEqual( False, self.S.isSetCompartment() )
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.assertEqual( False, self.S.isSetSubstanceUnits() )
self.assertEqual( False, self.S.isSetSpatialSizeUnits() )
self.assertEqual( False, self.S.isSetUnits() )
self.assertEqual( False, self.S.isSetCharge() )
self.assertEqual( True, self.S.isSetBoundaryCondition() )
self.assertEqual( True, self.S.isSetHasOnlySubstanceUnits() )
self.assertEqual( True, self.S.isSetConstant() )
pass
def test_Species_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.Species(sbmlns)
self.assert_( object.getTypeCode() == libsbml.SBML_SPECIES )
self.assert_( object.getMetaId() == "" )
self.assert_( object.getNotes() == None )
self.assert_( object.getAnnotation() == None )
self.assert_( object.getLevel() == 2 )
self.assert_( object.getVersion() == 1 )
self.assert_( object.getNamespaces() != None )
self.assert_( object.getNamespaces().getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_Species_setCompartment(self):
compartment = "cell";
self.S.setCompartment(compartment)
self.assert_(( compartment == self.S.getCompartment() ))
self.assertEqual( True, self.S.isSetCompartment() )
if (self.S.getCompartment() == compartment):
pass
self.S.setCompartment(self.S.getCompartment())
self.assert_(( compartment == self.S.getCompartment() ))
self.S.setCompartment("")
self.assertEqual( False, self.S.isSetCompartment() )
if (self.S.getCompartment() != None):
pass
pass
def test_Species_setId(self):
id = "Glucose";
self.S.setId(id)
self.assert_(( id == self.S.getId() ))
self.assertEqual( True, self.S.isSetId() )
if (self.S.getId() == id):
pass
self.S.setId(self.S.getId())
self.assert_(( id == self.S.getId() ))
self.S.setId("")
self.assertEqual( False, self.S.isSetId() )
if (self.S.getId() != None):
pass
pass
def test_Species_setInitialAmount(self):
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.S.setInitialAmount(1.2)
self.assertEqual( True, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.assert_( self.S.getInitialAmount() == 1.2 )
pass
def test_Species_setInitialConcentration(self):
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( False, self.S.isSetInitialConcentration() )
self.S.setInitialConcentration(3.4)
self.assertEqual( False, self.S.isSetInitialAmount() )
self.assertEqual( True, self.S.isSetInitialConcentration() )
self.assert_( self.S.getInitialConcentration() == 3.4 )
pass
def test_Species_setName(self):
name = "So_Sweet";
self.S.setName(name)
self.assert_(( name == self.S.getName() ))
self.assertEqual( True, self.S.isSetName() )
if (self.S.getName() == name):
pass
self.S.setName(self.S.getName())
self.assert_(( name == self.S.getName() ))
self.S.setName("")
self.assertEqual( False, self.S.isSetName() )
if (self.S.getName() != None):
pass
pass
def test_Species_setSpatialSizeUnits(self):
s = libsbml.Species(2,1)
units = "volume";
s.setSpatialSizeUnits(units)
self.assert_(( units == s.getSpatialSizeUnits() ))
self.assertEqual( True, s.isSetSpatialSizeUnits() )
if (s.getSpatialSizeUnits() == units):
pass
s.setSpatialSizeUnits(s.getSpatialSizeUnits())
self.assert_(( units == s.getSpatialSizeUnits() ))
s.setSpatialSizeUnits("")
self.assertEqual( False, s.isSetSpatialSizeUnits() )
if (s.getSpatialSizeUnits() != None):
pass
_dummyList = [ s ]; _dummyList[:] = []; d | el _dummyList
pass
def test_Species_setSubstanceUnits(self):
units = "item";
self | .S.setSubstanceUnits(units)
self.assert_(( units == self.S.getSubstanceUnits() ))
self.assertEqual( True, self.S.isSetSubstanceUnits() )
if (self.S.getSubstanceUnits() == units):
pass
self.S.setSubstanceUnits(self.S.getSubstanceUnits())
self.assert_(( units == self.S.getSubstanceUnits() ))
self.S.setSubstanceUnits("")
self.assertEqual( False, self.S.isSetSubstanceUnits() )
if (self.S.getSubstanceUnits() != None):
pass
pass
def test_Species_setUnits(self):
units = "mole";
self.S.setUnits(units)
self.assert_(( units == self.S.getUnits() ))
self.assertEqual( True, self.S.isSetUnits() )
if (self.S.getSubstanceUnits() == units):
pass
self.S.setUnits(self.S.getSubstanceUnits())
self.assert_(( units == self.S.getUnits() ))
self.S.setUnits("")
self.assertEqual( False, self.S.isSetUnits() )
if (self.S.getSubstanceUnits() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpecies))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
andrei4ka/fuel-web-redhat | fuel_agent_ci/fuel_agent_ci/tests/test_configdrive.py | Python | apache-2.0 | 6,888 | 0.000726 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import time
from fuel_agent_ci.tests import base
from fuel_agent_ci import utils
class TestConfigDrive(base.BaseFuelAgentCITest):
def _build_configdrive(self, profile):
data = json.loads(self.render_template(
template_name='provision.json',
template_data={
'IP': self.dhcp_hosts[0]['ip'],
'MAC': self.dhcp_hosts[0]['mac'],
'MASTER_IP': self.net.ip,
'MASTER_HTTP_PORT': self.http.port,
'PROFILE': profile,
}
| ))
self.ssh.put_content(json.dumps(data), '/tmp/provision.json')
admin_interface = filter(
lambda x: (x['mac_address'] ==
data['kernel_options']['netcfg/choose_interface']),
[dict(name=name, **spec) for name, spec |
in data['interfaces'].iteritems()])[0]
with open('/tmp/boothook.txt', 'wb') as f:
f.write(self.render_template(
template_name='boothook_%s.jinja2' % profile.split('_')[0],
template_data={
'MASTER_IP': data['ks_meta']['master_ip'],
'ADMIN_MAC': \
data['kernel_options']['netcfg/choose_interface'],
'UDEVRULES': data['kernel_options']['udevrules']
}
))
with open('/tmp/cloud_config.txt', 'wb') as f:
f.write(self.render_template(
template_name='cloud_config_%s.jinja2' % profile.split('_')[0],
template_data={
'SSH_AUTH_KEY': data['ks_meta']['auth_key'],
'TIMEZONE': data['ks_meta']['timezone'],
'HOSTNAME': data['hostname'],
'FQDN': data['hostname'],
'NAME_SERVERS': data['name_servers'],
'SEARCH_DOMAIN': data['name_servers_search'],
'MASTER_IP': data['ks_meta']['master_ip'],
'MASTER_URL': \
'http://%s:8000/api' % data['ks_meta']['master_ip'],
# FIXME(kozhukalov):
# 'KS_REPOS': IS NOT SET YET,
'MCO_PSKEY': data['ks_meta']['mco_pskey'],
'MCO_CONNECTOR': data['ks_meta']['mco_connector'],
'MCO_VHOST': data['ks_meta']['mco_vhost'],
'MCO_HOST': data['ks_meta']['mco_host'],
# 'MCO_PORT': IS NOT SET, DEFAULT IS USED
'MCO_USER': data['ks_meta']['mco_user'],
'MCO_PASSWORD': data['ks_meta']['mco_password'],
'PUPPET_MASTER': data['ks_meta']['puppet_master']
}
))
with open('/tmp/meta-data', 'wb') as f:
f.write(self.render_template(
template_name='meta-data_%s.jinja2' % profile.split('_')[0],
template_data={
'ADMIN_IFACE_NAME': admin_interface['name'],
'ADMIN_IP': admin_interface['ip_address'],
'ADMIN_MASK': admin_interface['netmask'],
'HOSTNAME': data['hostname']
}
))
# write-mime-multipart is provided by cloud-utils package
utils.execute('write-mime-multipart --output=/tmp/user-data '
'/tmp/boothook.txt:text/cloud-boothook '
'/tmp/cloud_config.txt:text/cloud-config')
# That does not make sense to build config-drive.img as we can not
# use it as a reference for comparing md5 sum.
# The reason for that is that write-mime-multipart generates
# random boundary identifier in the beginning of user-data.
def _test_configdrive(self, profile):
def _get_md5sum(file_path, size=-1):
md5 = None
with open(file_path) as f:
md5 = hashlib.md5(f.read(size)).hexdigest()
return md5
self._build_configdrive(profile)
self.ssh.run('configdrive')
self.ssh.get_file('/tmp/config-drive.img',
'/tmp/actual-config-drive.img')
# checking configdrive file system type
fs_type = utils.execute(
'blkid -o value -s TYPE /tmp/actual-config-drive.img')
self.assertEqual('iso9660', str(fs_type).strip())
# checking configdrive label
label_output = utils.execute(
'blkid -o value -s LABEL /tmp/actual-config-drive.img')
self.assertEqual('cidata', str(label_output).strip())
# mounting configdrive to check its content
utils.execute('mkdir -p /tmp/cfgdrv')
utils.execute('sudo mount -o loop '
'/tmp/actual-config-drive.img /tmp/cfgdrv')
#NOTE(agordeev): mime boundary should be the same in both files,
# since boundary is always randomly generated,
# thus magic prevents from checksum differencies
with open('/tmp/user-data') as f:
expected_boundary = f.read().split('\n')[0].split('"')[1]
actual_boundary = str(utils.execute(
'head -n1 /tmp/cfgdrv/user-data')).split('"')[1]
actual_md5_userdata = str(utils.execute(
'sed -e s/%s/%s/ %s | md5sum' %
(actual_boundary, expected_boundary,
'/tmp/cfgdrv/user-data'))).split()[0]
actual_md5_metadata = str(utils.execute(
'md5sum /tmp/cfgdrv/meta-data')).split()[0]
# getting reference md5 for user-data and meta-data
md5_userdata = _get_md5sum('/tmp/user-data')
md5_metadata = _get_md5sum('/tmp/meta-data')
self.assertEqual(md5_userdata, actual_md5_userdata)
self.assertEqual(md5_metadata, actual_md5_metadata)
def test_configdrive_centos(self):
self._test_configdrive('centos_65_x86_64')
def test_configdrive_ubuntu(self):
self._test_configdrive('ubuntu_1204_x86_64')
def tearDown(self):
utils.execute('sudo umount -f /tmp/cfgdrv')
utils.execute('rm /tmp/actual-config-drive.img '
'/tmp/user-data /tmp/meta-data '
'/tmp/cloud_config.txt /tmp/boothook.txt')
super(TestConfigDrive, self).tearDown()
|
aleasoluciones/gosnmpquerier | tools/data.py | Python | mit | 397 | 0.002519 | commands = (
('walk', '1.3.6 | .1.2.1.2.2.1.2'),
('walk', '1.3.6.1.2.1.2.2.1.10'),
('get', '1.3.6.1.2.1.2.2.1.2.1'),
)
destinations = (
'ada-xem1', 'ona-xem1', 'alo-xem1', 'otm-xem1',
'c2k-xem1', 'vtr-xem1', 'tom-xem1',
'onr-xem1', 'vco-xem1', 'inm-xem1', 'gtr-xem1',
'ram-xem1', 'vir-x | em1', 'tge-xem1', 'ola-xem1',
'pip-xem1', 'vmc-xem1', 'pra-xem1', 'arm-xem1',
)
|
vesellov/callfeed.net | mainapp/migrations/0034_auto_20150614_2007.py | Python | mit | 9,969 | 0.001906 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0033_auto_20150613_2110'),
]
operations = [
migrations.RemoveField(
model_name='deferredcallback',
name='widget',
),
migrations.DeleteModel(
name='DeferredCallback',
),
migrations.AddField(
model_name='callbackinfo',
name='planned_for_datetime',
field=models.DateTimeField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='widget',
name='is_operator_shown_in_widget',
field=models.BooleanField(default=True, verbose_name=b'\xd0\x9f\xd0\xbe\xd0\xba\xd0\xb0\xd0\xb7\xd1\x8b\xd0\xb2\xd0\xb0\xd1\x82\xd1\x8c \xd0\xb2 \xd0\xb2\xd0\xb8\xd0\xb4\xd0\xb6\xd0\xb5\xd1\x82\xd0\xb5 \xd0\xb8\xd0\xbd\xd1\x84\xd0\xbe\xd1\x80\xd0\xbc\xd0\xb0\xd1\x86\xd0\xb8\xd1\x8e \xd0\xbe\xd0\xb1 \xd0\xbe\xd0\xbf\xd0\xb5\xd1\x80\xd0\xb0\xd1\x82\xd0\xbe\xd1\x80\xd0\xb5'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='call_description',
field=models.CharField(default=b'', max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='callback_status',
field=models.CharField(default=b'planned', max_length=20, verbose_name=b'\xd0\xa1\xd1\x82\xd0\xb0\xd1\x82\xd1\x83\xd1\x81 \xd0\xb7\xd0\xb2\xd0\xbe\xd0\xbd\xd0\xba\xd0\xb0', choices=[(b'succeed', b'\xd0\x97\xd0\xb2\xd0\xbe\xd0\xbd\xd0\xbe\xd0\xba \xd0\xbf\xd1\x80\xd0\xbe\xd1\x88\xd1\x91\xd0\xbb \xd1\x83\xd1\x81\xd0\xbf\xd0\xb5\xd1\x88\xd0\xbd\xd0\xbe'), (b'planned', b'\xd0\x97\xd0\xb0\xd0\xbf\xd0\xbb\xd0\xb0\xd0\xbd\xd0\xb8\xd1\x80\xd0\xbe\xd0\xb2\xd0\xb0\xd0\xbd'), (b'fail_a', b'\xd0\x9e\xd0\xbf\xd0\xb5\xd1\x80\xd0\xb0\xd1\x82\xd0\xbe\xd1\x80 \xd0\xbd\xd0\xb5 \xd0\xb2\xd0\xb7\xd1\x8f\xd0\xbb \xd1\x82\xd1\x80\xd1\x83\xd0\xb1\xd0\xba\xd1\x83'), (b'fail_b', b'\xd0\x9a\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82 \xd0\xbd\xd0\xb5 \xd0\xb2\xd0\xb7\xd1\x8f\xd0\xbb \xd1\x82\xd1\x80\xd1\x83\xd0\xb1\xd0\xba\xd1\x83'), (b'out_of_balance', b'\xd0\x9d\xd0\xb5\xd0\xb4\xd0\xbe\xd1\x81\xd1\x82\xd0\xb0\xd1\x82\xd0\xbe\xd1\x87\xd0\xbd\xd0\xbe \xd0\xbc\xd0\xb8\xd0\xbd\xd1\x83\xd1\x82')]),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='charged_length_a_sec',
field=models.IntegerField(default=0, verbose_name=b'\xd0\xa1\xd0\xbf\xd0\xb8\xd1\x81\xd0\xb0\xd0\xbd\xd0\xbd\xd0\xb0\xd1\x8f \xd0\xb4\xd0\xbb\xd0\xb8\xd0\xbd\xd0\xb0 \xd1\x80\xd0\xb0\xd0\xb7\xd0\xb3\xd0\xbe\xd0\xb2\xd0\xbe\xd1\x80\xd0\xb0 \xd0\xbd\xd0\xb0 \xd1\x81\xd1\x82\xd0\xbe\xd1\x80\xd0\xbe\xd0\xbd\xd0\xb5 \xd0\xbe\xd0\xbf\xd0\xb5\xd1\x80\xd0\xb0\xd1\x82\xd0\xbe\xd1\x80\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='charged_length_b_sec',
f | ield=models.Integ | erField(default=0, verbose_name=b'\xd0\xa1\xd0\xbf\xd0\xb8\xd1\x81\xd0\xb0\xd0\xbd\xd0\xbd\xd0\xb0\xd1\x8f \xd0\xb4\xd0\xbb\xd0\xb8\xd0\xbd\xd0\xb0 \xd1\x80\xd0\xb0\xd0\xb7\xd0\xb3\xd0\xbe\xd0\xb2\xd0\xbe\xd1\x80\xd0\xb0 \xd0\xbd\xd0\xb0 \xd1\x81\xd1\x82\xd0\xbe\xd1\x80\xd0\xbe\xd0\xbd\xd0\xb5 \xd0\xba\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='cost',
field=models.DecimalField(default=Decimal('0'), verbose_name=b'\xd0\xa1\xd1\x82\xd0\xbe\xd0\xb8\xd0\xbc\xd0\xbe\xd1\x81\xd1\x82\xd1\x8c', max_digits=12, decimal_places=2),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='currency',
field=models.CharField(default=b'', max_length=3, verbose_name=b'\xd0\x92\xd0\xb0\xd0\xbb\xd1\x8e\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='geodata_side_b',
field=models.CharField(default=b'', max_length=100, verbose_name=b'\xd0\x93\xd0\xb5\xd0\xbe\xd0\xb4\xd0\xb0\xd1\x82\xd0\xb0 \xd0\xba\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='ip_side_b',
field=models.IPAddressField(default=b'', verbose_name=b'IP-\xd0\xb0\xd0\xb4\xd1\x80\xd0\xb5\xd1\x81 \xd0\xba\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='mtt_callback_call_id',
field=models.CharField(default=b'', max_length=50),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='phone_number_side_a',
field=models.CharField(default=b'', max_length=20, verbose_name=b'\xd0\x9d\xd0\xbe\xd0\xbc\xd0\xb5\xd1\x80 \xd1\x82\xd0\xb5\xd0\xbb\xd0\xb5\xd1\x84\xd0\xbe\xd0\xbd\xd0\xb0 \xd0\xbe\xd0\xbf\xd0\xb5\xd1\x80\xd0\xb0\xd1\x82\xd0\xbe\xd1\x80\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='phone_number_side_b',
field=models.CharField(default=b'', max_length=20, verbose_name=b'\xd0\x9d\xd0\xbe\xd0\xbc\xd0\xb5\xd1\x80 \xd1\x82\xd0\xb5\xd0\xbb\xd0\xb5\xd1\x84\xd0\xbe\xd0\xbd\xd0\xb0 \xd0\xba\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='real_length_a_sec',
field=models.IntegerField(default=0, verbose_name=b'\xd0\xa0\xd0\xb5\xd0\xb0\xd0\xbb\xd1\x8c\xd0\xbd\xd0\xb0\xd1\x8f \xd0\xb4\xd0\xbb\xd0\xb8\xd0\xbd\xd0\xb0 \xd1\x80\xd0\xb0\xd0\xb7\xd0\xb3\xd0\xbe\xd0\xb2\xd0\xbe\xd1\x80\xd0\xb0 \xd0\xbd\xd0\xb0 \xd1\x81\xd1\x82\xd0\xbe\xd1\x80\xd0\xbe\xd0\xbd\xd0\xb5 \xd0\xbe\xd0\xbf\xd0\xb5\xd1\x80\xd0\xb0\xd1\x82\xd0\xbe\xd1\x80\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='real_length_b_sec',
field=models.IntegerField(default=0, verbose_name=b'\xd0\xa0\xd0\xb5\xd0\xb0\xd0\xbb\xd1\x8c\xd0\xbd\xd0\xb0\xd1\x8f \xd0\xb4\xd0\xbb\xd0\xb8\xd0\xbd\xd0\xb0 \xd1\x80\xd0\xb0\xd0\xb7\xd0\xb3\xd0\xbe\xd0\xb2\xd0\xbe\xd1\x80\xd0\xb0 \xd0\xbd\xd0\xb0 \xd1\x81\xd1\x82\xd0\xbe\xd1\x80\xd0\xbe\xd0\xbd\xd0\xb5 \xd0\xba\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='record_url_a',
field=models.CharField(default=b'', max_length=255, verbose_name=b'\xd0\xa1\xd1\x81\xd1\x8b\xd0\xbb\xd0\xba\xd0\xb0 \xd0\xbd\xd0\xb0 \xd0\xb7\xd0\xb0\xd0\xbf\xd0\xb8\xd1\x81\xd1\x8c \xd1\x80\xd0\xb0\xd0\xb7\xd0\xb3\xd0\xbe\xd0\xb2\xd0\xbe\xd1\x80\xd0\xb0 \xd0\xbe\xd0\xbf\xd0\xb5\xd1\x80\xd0\xb0\xd1\x82\xd0\xbe\xd1\x80\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='record_url_b',
field=models.CharField(default=b'', max_length=255, verbose_name=b'\xd0\xa1\xd1\x81\xd1\x8b\xd0\xbb\xd0\xba\xd0\xb0 \xd0\xbd\xd0\xb0 \xd0\xb7\xd0\xb0\xd0\xbf\xd0\xb8\xd1\x81\xd1\x8c \xd1\x80\xd0\xb0\xd0\xb7\xd0\xb3\xd0\xbe\xd0\xb2\xd0\xbe\xd1\x80\xd0\xb0 \xd0\xba\xd0\xbb\xd0\xb8\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0'),
preserve_default=True,
),
migrations.AlterField(
model_name='callbackinfo',
name='referer',
field=models.CharField(default=b'' |
jakeret/abcpmc | abcpmc/sampler.py | Python | gpl-3.0 | 11,199 | 0.008661 | # abcpmc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# abcpmc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with abcpmc. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Oct 9, 2014
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
from multiprocessing.pool import Pool
from collections import namedtuple
import numpy as np
from scipy import stats
from scipy import spatial
__all__ = ["GaussianPrior",
"TophatPrior",
"ParticleProposal",
"KNNParticleProposal",
"OLCMParticleProposal",
"Sampler",
"PoolSpec",
"weighted_cov",
"weighted_avg_and_std"
]
class GaussianPrior(object):
"""
Normal gaussian prior
:param mu: scalar or vector of means
:param sigma: scalar variance or covariance matrix
"""
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
self._random = np.random.mtrand.RandomState()
def __call__(self, theta=None):
if theta is None:
return self._random.multivariate_normal(self.mu, self.sigma)
else:
return stats.multivariate_normal.pdf(theta, self.mu, self.sigma)
class TophatPrior(object):
"""
Tophat prior
:param min: scalar or array of min values
:param max: scalar or array of max values
"""
def __init__(self, min, max):
self.min = np.atleast_1d(min)
self.max = np.atleast_1d(max)
self._random = np.random.mtrand.RandomState()
assert self.min.shape == self.max.shape
assert np.all(self.min < self.max)
def __call__(self, theta=None):
if theta is None:
return np.array([self._random.uniform(mi, ma) for (mi, ma) in zip(self.min, self.max)])
else:
return 1 if np.all(theta < self.max) and np.all(theta >= self.min) else 0
class ParticleProposal(object):
"""
Creates new particles using twice the weighted covariance matrix (Beaumont et al. 2009)
"""
def __init__(self, sampler, eps, pool, kwargs):
self.postfn = sampler.postfn
self.distfn = sampler.dist
self._random = sampler._random
self.Y = sampler.Y
self.N = sampler.N
self.eps = np.asanyarray(eps)
self.pool = pool
self.kwargs = kwargs
self.sigma = 2 * weighted_cov(pool.thetas, pool.ws)
def __call__(self, i):
| # setting seed to prevent problem with multiprocessing
self._random.seed(i)
cnt = 1
while True:
idx = self._random.choice(r | ange(self.N), 1, p= self.pool.ws/np.sum(self.pool.ws))[0]
theta = self.pool.thetas[idx]
sigma = self._get_sigma(theta, **self.kwargs)
sigma = np.atleast_2d(sigma)
thetap = self._random.multivariate_normal(theta, sigma)
X = self.postfn(thetap)
p = np.asarray(self.distfn(X, self.Y))
if np.all(p <= self.eps):
break
cnt+=1
return thetap, p, cnt
def _get_sigma(self, theta):
return self.sigma
class KNNParticleProposal(ParticleProposal):
"""
Creates new particles using a covariance matrix from the K-nearest neighbours (Fillipi et al. 2012)
Set `k` as key-word arguement in `abcpmc.Sampler.particle_proposal_kwargs`
"""
def _get_sigma(self, theta, k):
tree = spatial.cKDTree(self.pool.thetas)
_, idxs = tree.query(theta, k, p=2)
sigma = np.cov(self.pool.thetas[idxs].T)
return sigma
class OLCMParticleProposal(ParticleProposal):
"""
Creates new particles using an optimal loacl covariance matrix (Fillipi et al. 2012)
"""
def _get_sigma(self, theta):
if len(self.eps.shape) == 0:
idx = self.pool.dists < self.eps
else:
idx = np.all(self.pool.dists < self.eps, axis=1)
thetas = self.pool.thetas[idx]
weights = self.pool.ws[idx]
weights = weights/np.sum(weights)
m = np.sum((weights * thetas.T).T, axis=0)
n = thetas.shape[1]
sigma = np.empty((n, n))
for i in range(n):
for j in range(n):
sigma[i, j] = np.sum(weights * (thetas[:, i] - m[i]) * (thetas[:, j] - m[j]).T) + (m[i] - theta[i]) * (m[j] - theta[j])
return sigma
"""Namedtuple representing a pool of one sampling iteration"""
PoolSpec = namedtuple("PoolSpec", ["t", "eps", "ratio", "thetas", "dists", "ws"])
class Sampler(object):
"""
ABC population monte carlo sampler
:param N: number of particles
:param Y: observed data set
:param postfn: model function (a callable), which creates a new dataset x for a given theta
:param dist: distance function rho(X, Y) (a callable)
:param threads: (optional) number of threads. If >1 and no pool is given <threads> multiprocesses will be started
:param pool: (optional) a pool instance which has a <map> function
"""
particle_proposal_cls = ParticleProposal
particle_proposal_kwargs = {}
def __init__(self, N, Y, postfn, dist, threads=1, pool=None):
self.N = N
self.Y = Y
self.postfn = postfn
self.dist = dist
self._random = np.random.mtrand.RandomState()
if pool is not None:
self.pool = pool
self.mapFunc = self.pool.map
elif threads == 1:
self.mapFunc = map
else:
self.pool = Pool(threads)
self.mapFunc = self.pool.map
def sample(self, prior, eps_proposal, pool=None):
"""
Launches the sampling process. Yields the intermediate results per iteration.
:param prior: instance of a prior definition (or an other callable) see :py:class:`sampler.GaussianPrior`
:param eps_proposal: an instance of a threshold proposal (or an other callable) see :py:class:`sampler.ConstEps`
:param pool: (optional) a PoolSpec instance,if not None the initial rejection sampling
will be skipped and the pool is used for the further sampling
:yields pool: yields a namedtuple representing the values of one iteration
"""
if pool is None:
eps = eps_proposal.next()
wrapper = _RejectionSamplingWrapper(self, eps, prior)
res = list(self.mapFunc(wrapper, self._random.randint(0, np.iinfo(np.uint32).max, self.N)))
thetas = np.array([theta for (theta, _, _) in res])
dists = np.array([dist for (_, dist, _) in res])
cnts = np.sum([cnt for (_, _, cnt) in res])
ws = np.ones(self.N) / self.N
pool = PoolSpec(0, eps, self.N/cnts, thetas, dists, ws)
yield pool
for t, eps in enumerate(eps_proposal, pool.t + 1):
particleProposal = self.particle_proposal_cls(self, eps, pool, self.particle_proposal_kwargs)
res = list(self.mapFunc(particleProposal, self._random.randint(0, np.iinfo(np.uint32).max, self.N)))
thetas = np.array([theta for (theta, _, _) in res])
dists = np.array([dist for (_, dist, _) in res])
cnts = np.sum([cnt for (_, _, cnt) in res])
sigma = 2 * weighted_cov(pool.thetas, pool.ws)
wrapper = _WeightWrapper(prior, sigma, pool.ws, pool.thetas)
wt = np.array(list(self.mapFunc(wrapper, thetas)))
ws = wt/np.sum(wt)
pool = PoolSpec(t, eps, |
dj80hd/certs | certs.py | Python | mit | 7,739 | 0.006719 | import sys
import os
import pexpect
def log(s):
print ">>>" + s
def error_exit(s):
print("ERROR: " + s)
sys.exit(99)
def check_file(fname):
if os.path.isfile(fname) and os.stat(fname).st_size > 0:
pass
else:
error_exit(fname + ' FAILED check.')
def remove_file_if_it_exists(f):
if os.path.isfile(f):
os.remove(f)
if (len(sys.argv)) < 2:
print "USAGE: python " + sys.argv[0] + " <domain>"
print "e.g. python " + sys.argv[0] + " foo.bar.com"
sys.exit(99)
domain = sys.argv[1]
passphrase = "0987654321"
DEBUG = True
ORG_NAME = "" #"Foo Bar"
ORG_UNIT_NAME = "" #"Foo Bar Unit"
CA_SRL_FILE = 'ca-cert.srl'
CA_KEY_FILE = 'ca-key.pem'
CA_CERT_FILE = 'ca-cert.pem'
SERVER_KEY_FILE = 'server-key.pem'
SERVER_CSR_FILE = 'server.csr'
SERVER_CERT_FILE = 'server-cert.pem'
CLIENT_KEY_FILE = 'client-key.pem'
CLIENT_CSR_FILE = 'client.csr'
CLIENT_CERT_FILE = 'client-cert.pem'
EXT_CONF_FILE = 'extfile.cnf'
#
# Create SRL FILE
#
remove_file_if_it_exists(CA_SRL_FILE)
os.system("echo 01 > " + CA_SRL_FILE)
check_file(CA_SRL_FILE)
#
# Generate CA key
#
remove_file_if_it_exists(CA_KEY_FILE)
cmd = 'openssl genrsa -des3 -out ' + CA_KEY_FILE + " 1024"
log("Creating a CA key: " + cmd)
p = pexpect.spawn(cmd)
p.expect('Enter pass phrase for ' + CA_KEY_FILE)
p.sendline(passphrase)
p.expect('Verifying - Enter pass phrase for ' + CA_KEY_FILE)
p.sendline(passphrase)
if not os.path.isfile(CA_KEY_FILE):
error_exit(CA_KEY_FILE + ' could not be created.')
p.terminate(force=True)
#
# Create a CA Certificate
#
remove_file_if_it_exists(CA_CERT_FILE)
cmd = 'openssl req -new -x509 -days 365 -key ' + CA_KEY_FILE + \
' -out ' + CA_CERT_FILE
log("Creating CA Certificate: " + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + CA_KEY_FILE)
p.sendline(passphrase)
p.expect_exact('Country Name (2 letter code) [AU]:',timeout=10)
p.sendline("")
p.expect_exact('State or Province Name (full name) [Some-State]:')
p.sendline("")
p.expect_exact('Locality Name (eg, city) []:')
p.sendline("")
# Organization Name (eg, company) [Internet Widgits Pty Ltd]:
p.expect_exact('Organization Name (eg, company) [Internet Widgits Pty Ltd]:')
p.sendline(ORG_NAME)
p.expect_exact('Organizational Unit Name (eg, section) []:')
p.sendline(ORG_UNIT_NAME)
p.expect_exact('Common Name (e.g. server FQDN or YOUR name) []:')
p.sendline(domain)
p.expect_exact('Email Address []:')
p.sendline("")
p.terminate(force=True)
check_file(CA_CERT_FILE)
#
# Create a server key
#
remove_file_if_it_exists(SERVER_KEY_FILE)
cmd = 'openssl genrsa -des3 -out ' + SERVER_KEY_FILE + " 1024"
log("Creating a Server Key: " + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + SERVER_KEY_FILE)
p.sendline(passphrase)
p.expect('Verifying - Enter pass phrase for ' + SERVER_KEY_FILE)
p.sendline(passphrase)
p.terminate(force=True)
check_file(SERVER_KEY_FILE)
#
# Create server CSR
#
remove_file_if_it_exists(SERVER_CSR_FILE)
cmd = 'openssl req -new -key ' + SERVER_KEY_FILE + ' -out ' + SERVER_CSR_FILE
log("Creating a Server CSR: " + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + SERVER_KEY_FILE)
p.sendline(passphrase)
p.expect_exact('Country Name (2 letter code) [AU]:',timeout=10)
p.sendline("")
p.expect_exact('State or Province Name (full name) [Some-State]:')
p.sendline("")
p.expect_exact('Locality Name (eg, city) []:')
p.s | endline("") |
p.expect_exact('Organization Name (eg, company) [Internet Widgits Pty Ltd]:')
p.sendline(ORG_NAME)
p.expect_exact('Organizational Unit Name (eg, section) []:')
p.sendline(ORG_UNIT_NAME)
p.expect_exact('Common Name (e.g. server FQDN or YOUR name) []:')
p.sendline(domain)
p.expect_exact('Email Address []:')
p.sendline("")
p.expect_exact('A challenge password []:')
p.sendline("")
p.expect_exact('An optional company name []:')
p.sendline("")
p.terminate(force=True)
check_file(SERVER_CSR_FILE)
#
# Create Signed Server Cert
#
remove_file_if_it_exists(SERVER_CERT_FILE)
log("Creating a Signed Server Certificate")
cmd = 'openssl x509 -req -days 365 -in ' + SERVER_CSR_FILE + ' -CA ' + CA_CERT_FILE + " -CAkey " + CA_KEY_FILE + ' -out ' + SERVER_CERT_FILE
log(cmd)
p = pexpect.spawn(cmd)
p.expect('Enter pass phrase for ' + CA_KEY_FILE)
p.sendline(passphrase)
p.sendline("")
p.terminate(force=True)
check_file(SERVER_CERT_FILE)
#
# Remove passphrase from server key
#
cmd = 'openssl rsa -in ' + SERVER_KEY_FILE + ' -out ' + SERVER_KEY_FILE
log('Remove passphrase from server key: ' + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + SERVER_KEY_FILE)
p.sendline(passphrase)
p.terminate(force=True)
check_file(SERVER_KEY_FILE)
#
# Generate client key
#
remove_file_if_it_exists(CLIENT_KEY_FILE)
cmd = 'openssl genrsa -des3 -out ' + CLIENT_KEY_FILE
log("Creating a Client Key: " + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + CLIENT_KEY_FILE)
p.sendline(passphrase)
p.expect('Verifying - Enter pass phrase for ' + CLIENT_KEY_FILE)
p.sendline(passphrase)
p.terminate(force=True)
check_file(CLIENT_KEY_FILE)
#
# Create client CSR
#
remove_file_if_it_exists(CLIENT_CSR_FILE)
cmd = 'openssl req -new -key ' + CLIENT_KEY_FILE + ' -out ' + CLIENT_CSR_FILE
log("Create Client CSR: " + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + CLIENT_KEY_FILE)
p.sendline(passphrase)
p.expect_exact('Country Name (2 letter code) [AU]:',timeout=10)
p.sendline("")
p.expect_exact('State or Province Name (full name) [Some-State]:')
p.sendline("")
p.expect_exact('Locality Name (eg, city) []:')
p.sendline("")
p.expect_exact('Organization Name (eg, company) [Internet Widgits Pty Ltd]:')
p.sendline(ORG_NAME)
p.expect_exact('Organizational Unit Name (eg, section) []:')
p.sendline(ORG_UNIT_NAME)
p.expect_exact('Common Name (e.g. server FQDN or YOUR name) []:')
p.sendline(domain)
p.expect_exact('Email Address []:')
p.sendline("")
p.expect_exact('A challenge password []:')
p.sendline("")
p.expect_exact('An optional company name []:')
p.sendline("")
p.terminate(force=True)
check_file(CLIENT_CSR_FILE)
#
# Add Client Authentication attributes
#
remove_file_if_it_exists(EXT_CONF_FILE)
os.system('echo extendedKeyUsage = clientAuth > ' + EXT_CONF_FILE )
check_file(EXT_CONF_FILE)
#
# Create Signed Client Cert
#
remove_file_if_it_exists(CLIENT_CERT_FILE)
cmd = 'openssl x509 -req -days 365 -in ' + CLIENT_CSR_FILE + ' -CA ' + CA_CERT_FILE + " -CAkey " + CA_KEY_FILE + ' -out ' + CLIENT_CERT_FILE + ' -extfile extfile.cnf'
log("Create Signed Client Cert: " + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + CA_KEY_FILE)
p.sendline(passphrase)
p.terminate(force=True)
check_file(CLIENT_CERT_FILE)
#
# Remove passphrase from Client key
#
cmd = 'openssl rsa -in ' + CLIENT_KEY_FILE + ' -out ' + CLIENT_KEY_FILE
log('Remove passphrase from client key: ' + cmd)
p = pexpect.spawn(cmd)
if DEBUG: p.logfile = sys.stdout
p.expect('Enter pass phrase for ' + CLIENT_KEY_FILE)
p.sendline(passphrase)
p.terminate(force=True)
check_file(CLIENT_KEY_FILE)
#
# No need to keep these
#
log('Deleting temp files')
remove_file_if_it_exists(EXT_CONF_FILE)
remove_file_if_it_exists(CLIENT_CSR_FILE)
remove_file_if_it_exists(SERVER_CSR_FILE)
remove_file_if_it_exists(CA_SRL_FILE)
print ("DONE. The following files were generated: \n%s\n%s\n%s\n%s\n%s\n%s\n" %
(CA_KEY_FILE, CA_CERT_FILE, SERVER_KEY_FILE, SERVER_CERT_FILE,
CLIENT_KEY_FILE, CLIENT_CERT_FILE))
|
Degreane/PsySys | Clients/dealersRouting.py | Python | gpl-3.0 | 9,487 | 0.046063 | from channels.auth import channel_session_user,channel_session_user_from_http,http_session
from channels.routing import route, route_class
from channels.sessions import channel_and_http_session,channel_session
import datetime
import json
import copy
import pprint as pp
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from Crypto.Hash import MD5
from hashlib import md5
from Crypto import Random
from Crypto.Cipher import AES
from random import choice
from base64 import b64encode,b64decode
from models import user
from models import plan
from mongoengine.queryset import Q
def derive_key_and_iv(password, salt, key_length, iv_length):
d = d_i = ''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length+iv_length]
def encrypt(data, password, key_length=32):
if len(data)%2 == 0:
data=data+" "
bs = AES.block_size
salt = Random.new().read(bs - len('Salted__'))
key, iv = derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
ch1='Salted__' + salt
#print ch1
if len(data) == 0 or len(data) % bs != 0:
padding_length = bs - (len(data) % bs)
data += padding_length * chr(padding_length)
return ch1+cipher.encrypt(data)
def decrypt(data, password, key_length=32):
bs = AES.block_size
salt = data[:bs][len('Salted__'):]
#print len(salt)
key, iv = derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
chunk=data[bs:]
unpadded_text=cipher.decrypt(chunk)
padding_length=ord(unpadded_text[-1])
#print ("padding Length {}".format(padding_length))
padded_text=unpadded_text[:-padding_length]
return padded_text
@channel_and_http_session
def connectedChannel(message):
encKey=MD5.new(str(message.reply_channel)).hexdigest()
decryptedJSON=decrypt(b64decode(message['text']),encKey)
messageJSON=json.loads(decryptedJSON)
pp.pprint(messageJSON)
pp.pprint(message)
pp.pprint("ConnectedChannel")
if message.http_session is None:
print("Session type None")
redirectPage="/"
redirectParam="InvalidSession=true"
encryptedRedirectParam=b64encode(encrypt(redirectParam,encKey))
message.reply_channel.send({
'text':json.dumps({'verdict':encryptedRedirectParam,'redirect':redirectPage})
})
if messageJSON['target'] == 'CU':
# need to get the CurrentUser Logged In.
CU=user.objects(pk=messageJSON['id'])
if CU.count() == 1:
encryptedCUJsonStr=b64encode(encrypt(CU[0].to_json(),encKey))
message.reply_channel.send({
'text':json.dumps({'CU':encryptedCUJsonStr})
})
else :
redirectPage="/LogOut"
message.reply_channel.send({
'text':json.dumps({'redirect':redirectPage})
})
if messageJSON['target'] == 'CHK' :
if message.http_session is None:
redirectPa | ge="/"
redirectParam="InvalidSession=true"
encryptedRedirectParam=b64encode(encrypt(redirectParam,encKey))
message.reply_channel.send({
'text':json.dumps({'verdict':encryptedRedirectParam,'redirect':redirectPage})
})
if messageJSON['target'] == 'CNTS':
QAll=Q(isDealer=True)
QEnabled=Q(isDealer=True) & Q(Enabled=True)
QDeleted=Q(isDealer=True) & Q(Deleted=True)
QDisabled=Q(isDealer=True) & Q(Enabled=False)
AllCount=user.objects(QAll).count()
EnabledCount=user.objects | (QEnabled).count()
DeletedCount=user.objects(QDeleted).count()
DisabledCount=user.objects(QDisabled).count()
CountsObj={
'All':AllCount,
'Ena':EnabledCount,
'Dis':DisabledCount,
'Del':DeletedCount
}
encryptedMSG=b64encode(encrypt(json.dumps(CountsObj),encKey))
message.reply_channel.send({
'text':json.dumps({'CNTS':encryptedMSG})
})
if messageJSON['target'] == 'DLRS':
QQuery=Q(isDealer=True)
if messageJSON['type']=='All':
QQuery=Q(isDealer=True)
elif messageJSON['type']=='Ena':
QQuery=Q(isDealer=True) & Q(Enabled=True)
elif messageJSON['type'] == 'Dis' :
QQuery=Q(isDealer=True) & Q(Enabled=False)
elif messageJSON['type']=='Del':
QQuery=Q(isDealer=True) & Q(Deleted=True)
theList=user.objects(QQuery)
encryptedMSG=b64encode(encrypt(theList.to_json(),encKey))
message.reply_channel.send({
'text':json.dumps({'DLRS':encryptedMSG})
})
if messageJSON['target'] == 'USR':
#pp.pprint(messageJSON['Who'])
QQuery=Q(isDealer=True) & Q(id=messageJSON['Who'])
theUser=user.objects(QQuery)
#pp.pprint(theUser[0].to_json())
encryptedMSG=b64encode(encrypt(theUser[0].to_json(),encKey))
message.reply_channel.send({
'text':json.dumps({'EUSR':encryptedMSG})
})
if messageJSON['target'] == 'AllPlans':
print("Getting All Plans Here As Should be returned ")
QQuery=Q(Enabled=True) & Q(Deleted=False)
thePlans=plan.objects(QQuery)
encryptedMSG=b64encode(encrypt(thePlans.to_json(),encKey))
message.reply_channel.send({
'text':json.dumps({'AllPlans':encryptedMSG})
})
if messageJSON['target'] == 'USRUPT' :
print(" Updating A User Of ID :({})".format(messageJSON['Who']))
currentDealer=copy.deepcopy(messageJSON['data'])
#get the currentDealerID to change
dealerID=messageJSON['Who']
#check for lgnName if exists.
#
lgnNameQuery=Q(lgnName=currentDealer['lgnName'])
idQuery=Q(_id=dealerID)
lgnNameFetch=user.objects(lgnNameQuery)
if lgnNameFetch.count() > 0 :
# if we have a lgnNameFetch Count >0 then we check for associated _id
theID=lgnNameFetch[0]['id']
if str(theID) == str(dealerID) :
# we proceed with Updates
#print("Matched ID continue")
if currentDealer.has_key('_id') :
del currentDealer['_id']
currentDealer['updatedAt']=datetime.datetime.now()
if currentDealer.has_key('InternalId') :
del currentDealer['InternalId']
if currentDealer.has_key('createdAt') :
del currentDealer['createdAt']
theDBDealer=user.objects(idQuery)
user.objects(id=dealerID).update(**currentDealer)
encryptedMSG=b64encode(encrypt(json.dumps({'Success':True}),encKey))
message.reply_channel.send({
'text':json.dumps({'dealerUPDT':encryptedMSG})
})
else:
# we should issue an error back
print("duplicate lgnName Error")
# ToDo
# Continue submitting Error from the server to the web browser.
if messageJSON['target'] == 'USRUPDTPLAN':
# ToDo Allow Updating Plans Here
#print("The Data {}".format(messageJSON['data']))
dealerID=messageJSON['Who']
#print("The ID To Be Fetched is {}".format(dealerID))
idQuery=Q(id=dealerID)
idFetch=user.objects(idQuery)
#print(" The ID Fetched is {} {}".format(idFetch[0],messageJSON['data']))
if idFetch.count() > 0:
currentUpdate=copy.deepcopy(messageJSON['data'])
user.objects(id=dealerID).update(**currentUpdate)
if messageJSON['target'] == 'USRUPDTCLIENT':
# ToDo
'''
Who = The Client ID To Update.
data = The data that needs to be updated.
We Have An Error in currentUpdate['Expires'] that should be fixed datetime.datetime.fromtimestamp(1493510400000/1000.0).strftime("%Y"-03-30T03:39:53.066Z) (REF: https://stackoverflow.com/questions/21787496/converting-epoch-time-with-milliseconds-to-datetime)
'''
clientID=messageJSON['Who']
idQuery=Q(id=clientID)
idFetch=user.objects(idQuery)
if idFetch.count() > 0:
currentUpdate=copy.deepcopy(messageJSON['data'])
del currentUpdate['_id']
currentUpdate['updatedAt']=datetime.datetime.now()
if currentUpdate.has_key('InternalId'):
del currentUpdate['InternalId']
if currentUpdate.has_key('createdAt'):
del currentUpdate['createdAt']
if currentUpdate.has_key('Expires'):
del currentUpdate['Expires']
user.objects(id=clientID).update(**currentUpdate)
if messageJSON['target'] == 'USRGETCLIENTS':
'''
Done Pick clients of Owner= 'Who'
'''
dealerID=messageJSON['Who']
clientsQuery=(Q(Owner=dealerID) &Q(isClient=True))
TheClients=user.objects(clientsQuery)
#print("No. Of Clients For the User is {}".format(TheClients.count()))
'''
Check if count is > 0
'''
if TheClients.count() > 0:
'''
if > 0 then we return the to_json back to the page.
'''
encryptedMSG=b64encode(encrypt(TheClients.to |
nathanlynch/atx-permit-scraper | src/session.py | Python | agpl-3.0 | 582 | 0.003436 | #!/usr/bin/python | 3
import requests
# url = "http://192.168.1.254"
landing_url = "https://www.austintexas.gov/devreview/a_queryfolder_permits.jsp?myWhere="
res_url = "https://www.austintexas.gov/devreview/d_showpropertyfolderlist.jsp?clicked=searchByOther"
parms = {'Lid': 'ReadOnlyaustin',
'zip': '78722',
'propertyStartDate': 'Apr 21, 2014',
'propertyEndDa | te': 'Apr 24, 2014'}
# print(parms)
s = requests.Session()
landing = s.get(landing_url)
results = s.post(res_url, data=parms)
# print(r.headers)
# print(r.request.headers)
print(results.text)
|
IfcOpenShell/IfcOpenShell | src/blenderbim/blenderbim/bim/module/drawing/gizmos.py | Python | lgpl-3.0 | 18,534 | 0.001133 | # BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Maxim Vasilyev <qwiglydee@gmail.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
import blf
import math
import gpu, bgl
from bpy import types
from mathutils import Vector, Matrix
from mathutils import geometry
from bpy_extras import view3d_utils
from blenderbim.bim.module.drawing.shaders import DotsGizmoShader, ExtrusionGuidesShader, BaseLinesShader
from ifcopenshell.util.unit import si_conversions
"""Gizmos under the hood
## Transforms:
source/blender/windowmanager/gizmo/WM_gizmo_types.h
matrix_basis -- "Transformation of this gizmo." = placement in scene
matrix_offset -- "Custom offset from origin." = local transforms according to state/value
matrix_space -- "The space this gizmo is being modified in." used by some gizmos for undefined purposes
matrix_world -- final matrix, scaled according to viewport zoom and custom scale_basis
source/blender/windowmanager/gizmo/intern/wm_gizmo.c:WM_gizmo_calc_matrix_final_params
final = space @ (autoscale * (basis @ offset))
final = space @ (basis @ offset) -- if gizmo.use_draw_scale == False
final = space @ ((autoscale * basis) @ offset) -- if gizmo.use_draw_offset_scale
source/blender/windowmanager/gizmo/intern/wm_gizmo.c:wm_gizmo_calculate_scale
autoscale = gizmo.scale_basis * magic(preferences, matrix_space, matrix_basis, context.region_data)
magic -- making 1.0 to match preferences.view.gizmo_size pixels (75 by default)
## Selection
select_id -- apparently, id of a selectable part
test_select -- expected to return id of selection, doesn't seem to work
draw_select -- fake-draw of selection geometry for gpu-side cursor tracking
"""
# some geometries for Gizmo.custom_shape shaders
CUBE = (
(+1, +1, +1),
(-1, +1, +1),
(+1, -1, +1), # top
(+1, -1, +1),
(-1, +1, +1),
(-1, -1, +1),
(+1, +1, +1),
(+1, -1, +1),
(+1, +1, -1), # right
(+1, +1, -1),
(+1, -1, +1),
(+1, -1, -1),
(+1, +1, +1),
(+1, +1, -1),
(-1, +1, +1), # back
(-1, +1, +1),
(+1, +1, -1),
(-1, +1, -1),
(-1, -1, -1),
(-1, +1, -1),
(+1, -1, -1), # bot
(+1, -1, -1),
(-1, +1, -1),
(+1, +1, -1),
(-1, -1, -1),
(-1, -1, +1),
(-1, +1, -1), # left
(-1, +1, -1),
(-1, -1, +1),
(-1, +1, +1),
(-1, -1, -1),
(+1, -1, -1),
(-1, -1, +1), # front
(-1, -1, +1),
(+1, -1, -1),
(+1, -1, +1),
)
DISC = (
(0.0, 0.0, 0.0),
(1.0, 0.0, 0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.0, 0.0, 0.0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.5000000000000001, 0.8660254037844386, 0),
(0.0, 0.0, 0.0),
(0.5000000000000001, 0.8660254037844386, 0),
(6.123233995736766e-17, 1.0, 0),
(0.0, 0.0, 0.0),
(6.123233995736766e-17, 1.0, 0),
(-0.4999999999999998, 0.8660254037844387, 0),
(0.0, 0.0, 0.0),
(-0.4999999999999998, 0.8660254037844387, 0),
(-0.8660254037844385, 0.5000000000000003, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844385, 0.5000000000000003, 0),
(-1.0, 1.2246467991473532e-16, 0),
(0.0, 0.0, 0.0),
(-1.0, 1.2246467991473532e-16, 0),
(-0.8660254037844388, -0.4999999999999997, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844388, -0.4999999999999997, 0),
(-0.5000000000000004, -0.8660 | 254037844384, 0),
(0.0, 0.0, 0.0),
(-0.5000000000000004, -0.8660254037844384, 0),
(-1.8369701987210297e-16, -1.0, 0),
(0.0, 0.0, 0.0),
(-1.8369701987210297e-16, -1.0, 0),
(0.49999999999999933, -0.866025403784439, 0),
(0.0, 0.0, 0.0),
(0.49999999999999933, -0.866025403784439, 0),
(0.8660254037844384, -0.5000000000000004, 0),
(0.0, 0.0, 0.0),
(0.8660254037844384, -0.5000000000000004, 0),
(1.0, 0.0, 0),
)
X3DISC = (
( | 0.0, 0.0, 0.0),
(1.0, 0.0, 0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.0, 0.0, 0.0),
(0.8660254037844387, 0.49999999999999994, 0),
(0.5000000000000001, 0.8660254037844386, 0),
(0.0, 0.0, 0.0),
(0.5000000000000001, 0.8660254037844386, 0),
(6.123233995736766e-17, 1.0, 0),
(0.0, 0.0, 0.0),
(6.123233995736766e-17, 1.0, 0),
(-0.4999999999999998, 0.8660254037844387, 0),
(0.0, 0.0, 0.0),
(-0.4999999999999998, 0.8660254037844387, 0),
(-0.8660254037844385, 0.5000000000000003, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844385, 0.5000000000000003, 0),
(-1.0, 1.2246467991473532e-16, 0),
(0.0, 0.0, 0.0),
(-1.0, 1.2246467991473532e-16, 0),
(-0.8660254037844388, -0.4999999999999997, 0),
(0.0, 0.0, 0.0),
(-0.8660254037844388, -0.4999999999999997, 0),
(-0.5000000000000004, -0.8660254037844384, 0),
(0.0, 0.0, 0.0),
(-0.5000000000000004, -0.8660254037844384, 0),
(-1.8369701987210297e-16, -1.0, 0),
(0.0, 0.0, 0.0),
(-1.8369701987210297e-16, -1.0, 0),
(0.49999999999999933, -0.866025403784439, 0),
(0.0, 0.0, 0.0),
(0.49999999999999933, -0.866025403784439, 0),
(0.8660254037844384, -0.5000000000000004, 0),
(0.0, 0.0, 0.0),
(0.8660254037844384, -0.5000000000000004, 0),
(1.0, 0.0, 0),
(0.0, 0.0, 0.0),
(0, 1.0, 0.0),
(0, 0.8660254037844387, 0.49999999999999994),
(0.0, 0.0, 0.0),
(0, 0.8660254037844387, 0.49999999999999994),
(0, 0.5000000000000001, 0.8660254037844386),
(0.0, 0.0, 0.0),
(0, 0.5000000000000001, 0.8660254037844386),
(0, 6.123233995736766e-17, 1.0),
(0.0, 0.0, 0.0),
(0, 6.123233995736766e-17, 1.0),
(0, -0.4999999999999998, 0.8660254037844387),
(0.0, 0.0, 0.0),
(0, -0.4999999999999998, 0.8660254037844387),
(0, -0.8660254037844385, 0.5000000000000003),
(0.0, 0.0, 0.0),
(0, -0.8660254037844385, 0.5000000000000003),
(0, -1.0, 1.2246467991473532e-16),
(0.0, 0.0, 0.0),
(0, -1.0, 1.2246467991473532e-16),
(0, -0.8660254037844388, -0.4999999999999997),
(0.0, 0.0, 0.0),
(0, -0.8660254037844388, -0.4999999999999997),
(0, -0.5000000000000004, -0.8660254037844384),
(0.0, 0.0, 0.0),
(0, -0.5000000000000004, -0.8660254037844384),
(0, -1.8369701987210297e-16, -1.0),
(0.0, 0.0, 0.0),
(0, -1.8369701987210297e-16, -1.0),
(0, 0.49999999999999933, -0.866025403784439),
(0.0, 0.0, 0.0),
(0, 0.49999999999999933, -0.866025403784439),
(0, 0.8660254037844384, -0.5000000000000004),
(0.0, 0.0, 0.0),
(0, 0.8660254037844384, -0.5000000000000004),
(0, 1.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0, 1.0),
(0.49999999999999994, 0, 0.8660254037844387),
(0.0, 0.0, 0.0),
(0.49999999999999994, 0, 0.8660254037844387),
(0.8660254037844386, 0, 0.5000000000000001),
(0.0, 0.0, 0.0),
(0.8660254037844386, 0, 0.5000000000000001),
(1.0, 0, 6.123233995736766e-17),
(0.0, 0.0, 0.0),
(1.0, 0, 6.123233995736766e-17),
(0.8660254037844387, 0, -0.4999999999999998),
(0.0, 0.0, 0.0),
(0.8660254037844387, 0, -0.4999999999999998),
(0.5000000000000003, 0, -0.8660254037844385),
(0.0, 0.0, 0.0),
(0.5000000000000003, 0, -0.8660254037844385),
(1.2246467991473532e-16, 0, -1.0),
(0.0, 0.0, 0.0),
(1.2246467991473532e-16, 0, -1.0),
(-0.4999999999999997, 0, -0.8660254037844388),
(0.0, 0.0, 0.0),
(-0.4999999999999997, 0, -0.8660254037844388),
(-0.8660254037844384, 0, -0.5000000000000004),
(0.0, 0.0, 0.0),
(-0.8660254037844384, 0, -0.5000000000000004),
(-1.0, 0, -1.8369701987210297e-16),
(0.0, 0.0, 0.0),
(-1.0, 0, -1.8369701987210297e-16),
(-0.866025403 |
alfasin/st2 | st2client/st2client/client.py | Python | apache-2.0 | 6,290 | 0.003816 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from st2client import models
from st2client.models.core import ResourceManager
from st2client.models.core import ActionAliasResourceManager
from st2client.models.core import LiveActionResourceManager
from st2client.models.core import TriggerInstanceResourceManager
LOG = logging.getLogger(__name__)
# Default values for the options not explicitly specified by the user
DEFAULT_API_PORT = 9101
DEFAULT_AUTH_PORT = 9100
DEFAULT_BASE_URL = 'http://127.0.0.1'
DEFAULT_API_VERSION = 'v1'
class Client(object):
def __init__(self, base_url=None, auth_url=None, api_url=None, api_version=None, cacert=None,
debug=False, token=None):
# Get CLI options. If not given, then try to get it from the environment.
self.endpoints = dict()
# Populate the endpoints
if base_url:
self.endpoints['base'] = base_url
else:
self.endpoints['base'] = os.environ.get('ST2_BASE_URL', DEFAULT_BASE_URL)
api_version = api_version or os.environ.get('ST2_API_VERSION', DEFAULT_API_VERSION)
if api_url:
self.endpoints['api'] = api_url
else:
self.endpoints['api'] = os.environ.get(
'ST2_API_URL', '%s:%s/%s' % (self.endpoints['base'], DEFAULT_API_PORT, api_version))
if auth_url:
self.endpoints['auth'] = auth_url
else:
self.endpoints['auth'] = os.environ.get(
'ST2_AUTH_URL', '%s:%s' % (self.endpoints['base'], DEFAULT_AUTH_PORT))
if cacert:
self.cacert = cacert
else:
self.cacert = os.environ.get('ST2_CACERT', None)
if self.cacert and not os.path.isfile(self.cacert):
raise ValueError('CA cert file "%s" does not exist.' % (self.cacert))
self.debug = debug
# Note: This is a nasty hack for now, but we need to get rid of the decrator abuse
if token:
os.environ['ST2_AUTH_TOKEN'] = token
self.token = token
# Instantiate resource managers and assign appropriate API endpoint.
self.managers = dict()
self.managers['Token'] = ResourceManager(
models.Token, self.endpoints['auth'], cacert=self.cacert, debug=self.debug)
self.managers['RunnerType'] = ResourceManager(
models.RunnerType, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Action'] = ResourceManager(
models.Action, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['ActionAlias'] = ActionAliasResourceManager(
models.ActionAlias, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['ApiKey'] = ResourceManager(
models.ApiKey, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['LiveAction'] = LiveActionResourceManager(
models.LiveAction, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Policy'] = ResourceManager(
models.Policy, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['PolicyType'] = ResourceManager(
models.PolicyType, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Rule'] = ResourceManager(
models.Rule, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Sensor'] = ResourceManager(
models.Sensor, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['TriggerType'] = ResourceManager(
models.TriggerType, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Trigger'] = ResourceManager(
models.Trigger, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['TriggerInstance'] = TriggerInstanceResourceManager(
models.TriggerInstance, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['KeyValuePair'] = ResourceManager(
models.KeyValuePair, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Webhook'] = ResourceManager(
models.Webhook, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
self.managers['Trace'] = ResourceManager(
models.Trace, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
@property
def actions(self):
return self.managers['Action']
@property
def apikeys(self):
return self.managers['ApiKey']
@property
def keys(self):
return self.managers['KeyValuePair']
@property
def liveactions(self):
return self.managers['LiveAction']
@property
def policies(self):
return self.managers['Policy']
@property
def policytypes(self):
return self.managers['PolicyType']
@property
def rules( | self):
| return self.managers['Rule']
@property
def runners(self):
return self.managers['RunnerType']
@property
def sensors(self):
return self.managers['Sensor']
@property
def tokens(self):
return self.managers['Token']
@property
def triggertypes(self):
return self.managers['TriggerType']
@property
def triggerinstances(self):
return self.managers['TriggerInstance']
@property
def trace(self):
return self.managers['Trace']
|
freerangerouting/frr | tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py | Python | gpl-2.0 | 28,602 | 0.001923 | #!/usr/bin/python
#
# Copyright (c) 2020 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation,
# Inc. ("NetDEF") in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
-Verify static route ECMP functionality with 8 next hop
-Verify static route functionality with 8 next hop different AD value
-Verify static route with tag option
-Verify BGP did not install the static route when it receive route
with local next hop
"""
import sys
import time
import os
import pytest
import platform
import random
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
reset_config_on_routers,
verify_rib,
create_static_routes,
check_address_types,
step,
shutdown_bringup_interface | ,
stop_router,
start_router,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, | verify_bgp_rib
from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
NETWORK = {
"ipv4": [
"11.0.20.1/32",
"11.0.20.2/32",
"11.0.20.3/32",
"11.0.20.4/32",
"11.0.20.5/32",
"11.0.20.6/32",
"11.0.20.7/32",
"11.0.20.8/32",
],
"ipv6": [
"2::1/128",
"2::2/128",
"2::3/128",
"2::4/128",
"2::5/128",
"2::6/128",
"2::7/128",
"2::8/128",
],
}
PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"}
NETWORK2 = {"ipv4": ["11.0.20.1/32"], "ipv6": ["2::1/128"]}
NEXT_HOP_IP = []
def setup_module(mod):
"""
Set up the pytest environment.
* `mod`: module name
"""
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/static_routes_topo3_ibgp.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
if version_cmp(platform.release(), "4.19") < 0:
error_msg = (
'These tests will not run. (have kernel "{}", '
"requires kernel >= 4.19)".format(platform.release())
)
pytest.skip(error_msg)
# Checking BGP convergence
global BGP_CONVERGENCE
global ADDR_TYPES
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Api call verify whether BGP is converged
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
BGP_CONVERGENCE
)
logger.info("Running setup_module() done")
def teardown_module(mod):
"""
Teardown the pytest environment
* `mod`: module name
"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info(
"Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
)
logger.info("=" * 40)
def populate_nh():
NEXT_HOP_IP = {
"nh1": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0],
},
"nh2": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0],
},
"nh3": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0],
},
"nh4": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link3"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link3"]["ipv6"].split("/")[0],
},
"nh5": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link4"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link4"]["ipv6"].split("/")[0],
},
"nh6": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link5"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link5"]["ipv6"].split("/")[0],
},
"nh7": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link6"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link6"]["ipv6"].split("/")[0],
},
"nh8": {
"ipv4": topo["routers"]["r1"]["links"]["r2-link7"]["ipv4"].split("/")[0],
"ipv6": topo["routers"]["r1"]["links"]["r2-link7"]["ipv6"].split("/")[0],
},
}
return NEXT_HOP_IP
#####################################################
#
# Tests starting
#
#####################################################
def test_staticroute_with_ecmp_p0_tc3_ibgp(request):
"""
Verify static route ECMP functionality with 8 next hop'
"""
tc_name = request.node.name
write_test_header(tc_name)
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
reset_config_on_routers(tgen)
NEXT_HOP_IP = populate_nh()
step("Configure 8 interfaces / links between R1 and R2,")
step(
"Configure IPv4 static route in R2 with 8 next hop"
"N1(21.1.1.2), N2(22.1.1.2), N3(23.1.1.2), N4(24.1.1.2),"
"N5(25.1.1.2), N6(26.1.1.2), N7(27.1.1.2),N8(28.1.1.2), Static"
"route next-hop present on R1"
)
step("Configure IBGP IPv4 peering between R2 and R3 router.")
for addr_type in ADDR_TYPES:
# Enable static routes
for nhp in range(1, 9):
input_dict_4 = {
"r2": {
"static_routes": [
{
"network": PREFIX1[addr_type],
"next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type],
}
]
}
}
logger.info("Configure static routes")
result = create_static_routes(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
logger.info("Verifying %s routes on r2", addr_type)
nh = [
NEXT_HOP_IP["nh1"][addr_type],
NEXT_HOP_IP["nh2"][addr_type],
NEXT_HOP_IP["nh3"][addr_type],
NEXT_HOP_IP["nh4"][addr_type],
NEXT_HOP_IP["nh5"][addr_type], |
CiscoSystems/avos | openstack_dashboard/dashboards/project/stacks/forms.py | Python | apache-2.0 | 15,580 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo.utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta:
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
' | file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File') | ,
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment',
_('environment'),
cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['template_data']:
kwargs['template'] = cleaned['template_data']
else:
kwargs['template_url'] = cleaned['template_url']
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
except Exception as e:
raise forms.ValidationError(unicode(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data'],
'template_data': data['template_data'],
'template_url': data['template_url']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta:
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField |
nirs/vdsm | lib/vdsm/storage/operation.py | Python | gpl-2.0 | 6,412 | 0 | #
# Copyright 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fift | h Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import errno
import logging
import threading
from vdsm import utils
from vdsm.common import cmdutils
from vdsm.common import commands
from vdsm.common.compat import subprocess
from vdsm.common import exception
# Operation states
# Operation was created but not started yet
CREATED = "created"
# The operation was started
RUNNING = "running"
# Th | e operation has terminated
TERMINATED = "terminated"
# Abort was called when the operation was running.
ABORTING = "aborting"
# The operation was aborted and is not running.
ABORTED = "aborted"
log = logging.getLogger("storage.operation")
class Command(object):
"""
Simple storage command that does not support progress.
"""
def __init__(self, cmd, cwd=None, nice=utils.NICENESS.HIGH,
ioclass=utils.IOCLASS.IDLE):
self._cmd = cmd
self._cwd = cwd
self._nice = nice
self._ioclass = ioclass
self._lock = threading.Lock()
self._state = CREATED
self._proc = None
def run(self):
"""
Run a command, collecting data from the underlying process stdout and
stderr, and returning the collected otuput.
Data read from stderr is collected and will be included in the
cmdutils.Error raised if the underlying command failed.
Raises:
`RuntimeError` if invoked more then once
`exception.ActionStopped` if the command was aborted
`cmdutils.Error` if the command failed
"""
self._start_process()
out, err = self._proc.communicate()
self._finalize(out, err)
return out
def watch(self):
"""
Run a command, iterating on data received from underlying command
stdout.
Data read from stderr is collected and will be included in the
cmdutils.Error raised if the underlying command failed.
Raises:
`RuntimeError` if invoked more then once
`exception.ActionStopped` if the command was aborted
`cmdutils.Error` if the command failed
"""
self._start_process()
err = bytearray()
for src, data in cmdutils.receive(self._proc):
if src == cmdutils.OUT:
yield data
else:
err += data
self._finalize(b"", err)
def abort(self):
"""
Attempt to terminate the child process from another thread.
Does not wait for the child process; the thread running this process
will wait for the process. The caller must not assume that the
operation was aborted when this returns.
May be invoked multiple times.
Raises:
OSError if killing the underlying process failed.
"""
with self._lock:
if self._state == CREATED:
log.debug("%s not started yet", self)
self._state = ABORTED
elif self._state == RUNNING:
self._state = ABORTING
log.info("Aborting %s", self)
self._kill_process()
elif self._state == ABORTING:
log.info("Retrying abort %s", self)
self._kill_process()
elif self._state == TERMINATED:
log.debug("%s has terminated", self)
elif self._state == ABORTED:
log.debug("%s was aborted", self)
else:
raise RuntimeError("Invalid state: %s" % self)
def _start_process(self):
"""
Start the underlying process.
Raises:
`RuntimeError` if invoked more then once
"""
with self._lock:
if self._state == ABORTED:
raise exception.ActionStopped
if self._state != CREATED:
raise RuntimeError("Attempt to run an operation twice")
self._proc = commands.start(
self._cmd,
cwd=self._cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
nice=self._nice,
ioclass=self._ioclass)
self._state = RUNNING
def _finalize(self, out, err):
"""
Update operation state after underlying process has terminated.
Raises:
`exception.ActionStopped` if the command was aborted
`cmdutils.Error` if the command failed
`RuntimeError` if operation state is invalid
"""
rc = self._proc.returncode
log.debug(cmdutils.retcode_log_line(rc, err))
with self._lock:
self._proc = None
if self._state == ABORTING:
self._state = ABORTED
raise exception.ActionStopped
elif self._state == RUNNING:
self._state = TERMINATED
if rc != 0:
raise cmdutils.Error(self._cmd, rc, out, err)
else:
raise RuntimeError("Invalid state: %s" % self)
def _kill_process(self):
"""
Must be called when holding the command lock.
"""
if self._proc.poll() is not None:
log.debug("%s has terminated", self)
return
try:
self._proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
log.debug("%s has terminated", self)
def __repr__(self):
s = "<Command {self._cmd} {self._state}, cwd={self._cwd} at {addr:#x}>"
return s.format(self=self, addr=id(self))
|
jeanparpaillon/kerrighed-tools | libs/kerrighed.py | Python | gpl-2.0 | 9,679 | 0.005992 | #
# kerrighed.py - A Python interface to libkerrighed
#
# Copyright (c) 2009 Kerlabs
# Author: Jean Parpaillon <jean.parpaillon@kerlabs.com>
#
LIBKERRIGHED_VERSION = 2
import os
import ctypes
from ctypes import *
libkerrighed_soname = "libkerrighed.so.%i" % LIBKERRIGHED_VERSION
try:
if 'get_errno' in dir(ctypes):
get_errno = ctypes.get_errno
libkerrighed = CDLL(libkerrighed_soname, use_errno=True)
else:
get_errno = None
libkerrighed = CDLL(libkerrighed_soname)
except OSError, ose:
print "Can not import " + libkerrighed_soname
raise SystemExit(1)
class kerrighed_error(Exception):
pass
class krg_error_handler(object):
def __init__(self, func):
super(krg_error_handler, self).__init__()
self.func = func
def __call__(self, value):
if value==-1:
if get_errno is not None:
msg = "error in %s: %s" % (self.func.__name___)
else:
msg = "error in %s: %s" % (self.func.__name__, os.strerror(get_errno()))
raise kerrighed_error(msg)
else:
return value
#
# From hotplug.h
#
libkerrighed.krg_hotplug_init()
kerrighed_max_nodes = c_int.in_dll(libkerrighed, "kerrighed_max_nodes")
kerrighed_max_clusters = c_int.in_dll(libkerrighed, "kerrighed_max_clusters")
class krg_nodes_t(Structure):
_fields_ = [("nodes", c_char_p)]
krg_nodes_ptr_t = POINTER(krg_nodes_t)
class krg_clusters_t(Structure):
_fields_ = [("clusters", c_char_p)]
krg_clusters_ptr_t = POINTER(krg_clusters_t)
class krg_node_set_t(Structure):
_fields_ = [("subclusterid", c_int),
("v", c_char_p)]
krg_node_set_ptr_t = POINTER(krg_node_set_t)
libkerrighed.krg_nodes_create.restype = krg_nodes_ptr_t
libkerrighed.krg_nodes_num.restype = krg_error_handler(libkerrighed.krg_nodes_num)
libkerrighed.krg_nodes_num_online.restype = krg_error_handler(libkerrighed.krg_nodes_num_online)
libkerrighed.krg_nodes_num_possible.restype = krg_error_handler(libkerrighed.krg_nodes_num_possible)
libkerrighed.krg_nodes_num_present.restype = krg_error_handler(libkerrighed.krg_nodes_num_present)
libkerrighed.krg_nodes_is.restype = krg_error_handler(libkerrighed.krg_nodes_is)
libkerrighed.krg_nodes_is_online.restype = krg_error_handler(libkerrighed.krg_nodes_is_online)
libkerrighed.krg_nodes_is_possible.restype = krg_error_handler(libkerrighed.krg_nodes_is_possible)
libkerrighed.krg_nodes_is_present.restype = krg_error_handler(libkerrighed.krg_nodes_is_present)
libkerrighed.krg_nodes_get.restype = krg_node_set_ptr_t
libkerrighed.krg_nodes_get_online.restype = krg_node_set_ptr_t
libkerrighed.krg_nodes_get_possible.restype = krg_node_set_ptr_t
libkerrighed.krg_nodes_get_present.restype = krg_node_set_ptr_t
libkerrighed.krg_nodes_getnode.restype = c_int
libkerrighed.krg_nodes_nextnode.restype = c_int
libkerrighed.krg_clusters_create.restype = krg_clusters_ptr_t
libkerrighed.krg_clusters_is_up.restype = krg_error_handler(libkerrighed.krg_clusters_is_up)
libkerrighed.krg_node_set_create.restype = krg_node_set_ptr_t
libkerrighed.krg_node_set_add.restype = krg_error_handler(libkerrighed.krg_node_set_add)
libkerrighed.krg_node_set_remove.restype = krg_error_handler(libkerrighed.krg_node_set_remove)
libkerrighed.krg_node_set_contains.restype = krg_error_handler(libkerrighed.krg_node_set_contains)
libkerrighed.krg_node_set_weight.restype = krg_error_handler(libkerrighed.krg_node_set_weight)
libkerrighed.krg_node_set_next.restype = c_int
libkerrighed.krg_status_str.restype = c_char_p
libkerrighed.krg_nodes_status.restype = krg_nodes_ptr_t
libkerrighed.krg_cluster_status.restype = krg_clusters_ptr_t
libkerrighed.krg_cluster_shutdown.restype = krg_error_handler(libkerrighed.krg_cluster_shutdown)
libkerrighed.krg_cluster_reboot.restype = krg_error_handler(libkerrighed.krg_cluster_reboot)
libkerrighed.krg_nodes_add.restype = krg_error_handler(libkerrighed.krg_nodes_add)
libkerrighed.krg_nodes_remove.restype = krg_error_handler(libkerrighed.krg_nodes_remove)
class krg_nodes(object):
def __init__(self, _c=None):
self.cur = -1
if not _c:
_c = libkerrighed.krg_nodes_create()
if not _c:
raise krg_error_handler("krg_nodes_create returned NULL")
self.c = _c
def __del__(self):
libkerrighed.krg_nodes_destroy(self.c)
def getnode(self, node):
ret = libkerrighed.krg_nodes_getnode(self.c, node)
if ret>=0:
return ret
else:
raise IndexError
def __getitem__(self, node):
return self.getnode(node)
def __setitem__(self, node, status):
raise NotImplemented
def num_possible(self):
return libkerrighed.krg_nodes_num_possible(self.c)
def num_present(self):
return libkerrighed.krg_nodes_num_present(self.c)
def num_online(self):
return libkerrighed.krg_nodes_num_online(self.c)
def is_possible(self, node):
return libkerrighed.krg_nodes_is_possible(self.c, node)==1
def is_present(self, node):
return libkerrighed.krg_nodes_is_present(self.c, node)==1
def is_online(self, node):
return libkerrighed.krg_nodes_is_online(self.c, node)==1
def get_possible(self):
ret = libkerrighed.krg_nodes_get_possible(self.c)
if ret is None:
raise kerrighed_error("error in %s: %s" % (self.get_possible.__name__,
| os.strerror(libkerrighed.krg_get_status)))
return krg_node_set(ret)
def get_present(self):
ret = libkerrighed.krg_nodes_get_present(self.c)
if ret is None:
raise kerrighed_error("error in %s: %s" % (self.get_present.__name__,
os.strerror(libkerrighed.krg_get_s | tatus)))
return krg_node_set(ret)
def get_online(self):
ret = libkerrighed.krg_nodes_get_online(self.c)
if ret is None:
raise kerrighed_error("error in %s: %s" % (self.get_online.__name__,
os.strerror(libkerrighed.krg_get_status)))
return krg_node_set(ret)
def __iter__(self):
return self
def next(self):
self.cur = libkerrighed.krg_nodes_nextnode(self.c, self.cur)
if self.cur>=0:
return self.cur
else:
raise StopIteration
def __str__(self):
return "\n".join(map(lambda n: "%d:%s" % (n, krg_status_str(self[n])),
self))
def __repr__(self):
return "[" + ",".join(map(lambda n: "%d:%d" % (n, self[n]),
self)) + "]"
class krg_clusters(object):
def __init__(self, _c=None):
if not _c:
_c = libkerrighed.krg_clusters_create()
if not _c:
raise krg_error_handler("krg_clusters_create returned NULL")
self.c = _c
def __del__(self):
libkerrighed.krg_clusters_destroy(self.c)
def is_up(self, n=0):
return libkerrighed.krg_clusters_is_up(self.c, n)==1
class krg_node_set(object):
"""
Implements Python set
"""
def __init__(self, _c=None):
self.cur = -1
self.pop_cur = -1
if not _c:
_c = libkerrighed.krg_node_set_create()
if not _c:
raise krg_error_handler("krg_node_set_create returned NULL")
self.c = _c
def __del__(self):
libkerrighed.krg_node_set_destroy(self.c)
def add(self, n):
return libkerrighed.krg_node_set_add(self.c, n)==1
def discard(self, n):
return libkerrighed.krg_node_set_remove(self.c, n)==1
def contains(self, n):
return libkerrighed.krg_node_set_contains(self.c, n)==1
def __contains__(self, n):
return self.contains(n)
def weight(self):
return libkerrighed.krg_node_set_weight(self.c)
def __len__(self):
return self.weight()
def next(self):
self.cur = libkerrighed.krg_node_set_next(self.c, self.cur)
if self.cur>=0:
return self.cur
else:
rais |
Turgon37/SMSShell | tests/test_cmdline_parser.py | Python | gpl-3.0 | 2,168 | 0.004613 | # -*- coding: utf8 -*-
import json
import os
import shlex
import subprocess
# command line test
def test_cmdline_help():
"""Must produce an error is no url was given"""
result = subprocess.Popen(shlex.split('./bin/sms-shell-parser --help'), stdout=subprocess.PIPE)
stdout, stderr = result.communicate()
assert 'usage:' in stdout.decode()
def test_cmdline_version():
result = subprocess.Popen(shlex.split('./bin/sms-shell-parser --version'), stdout=subprocess.PIPE)
stdout, stderr = result.communicate()
assert 'SMSShell parser version' in stdout.decode()
def test_cmdline_with_env_input():
env = dict(
SMS_MESSAGES="1",
DECODED_PARTS="0",
SMS_1_NUMBER="0124",
SMS_1_CLASS="-1",
SMS_1_TEXT="ghgg",
| )
# Load env
for key in | env:
os.environ[key] = env[key]
result = subprocess.Popen(shlex.split('./bin/sms-shell-parser --input env'), stdout=subprocess.PIPE)
stdout, stderr = result.communicate()
if isinstance(stdout, bytes):
stdout = stdout.decode()
obj = json.loads(stdout)
assert result.returncode == 0
assert 'sms_number' in obj and obj['sms_number'] == '0124'
result = subprocess.Popen(shlex.split('./bin/sms-shell-parser --input env --output tests/test_out'), stdout=subprocess.PIPE)
stdout, stderr = result.communicate()
assert os.path.exists('tests/test_out')
os.unlink('tests/test_out')
def test_cmdline_with_bad_file_input():
result = subprocess.Popen(shlex.split('./bin/sms-shell-parser --input file'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = result.communicate()
assert result.returncode != 0
def test_cmdline_with_file_input():
result = subprocess.Popen(shlex.split('./bin/sms-shell-parser --input file -ia tests/samples/IN20190512_222525_00.txt'), stdout=subprocess.PIPE)
stdout, stderr = result.communicate()
if isinstance(stdout, bytes):
stdout = stdout.decode()
obj = json.loads(stdout)
assert result.returncode == 0
assert 'sms_number' in obj and obj['sms_number'] == '+3365'
|
gangadhar-kadam/mtn-erpnext | accounts/report/item_wise_sales_register/item_wise_sales_register.py | Python | agpl-3.0 | 2,744 | 0.030977 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
item_list = get_items(filters)
data = []
for d in item_list:
data.append([d.item_code, d.item_name, d.item_group, d.name, d.posting_date, d.customer,
d.debit_to, d.territory, d.project_name, d.company, d.sales_order, d.delivery_note,
d.income_account, d.qty, d.basic_rate, d.amount])
return columns, data
def get_columns():
return [
"Item Code:Link/Item:120", "Item Name::120", "Item Group:Link/Item Group:100",
"Invoice:Lin | k/Sales Invoice:120", "Postin | g Date:Date:80", "Customer:Link/Customer:120",
"Customer Account:Link/Account:120", "Territory:Link/Territory:80",
"Project:Link/Project:80", "Company:Link/Company:100", "Sales Order:Link/Sales Order:100",
"Delivery Note:Link/Delivery Note:100", "Income Account:Link/Account:140",
"Qty:Float:120", "Rate:Currency:120", "Amount:Currency:120"
]
def get_conditions(filters):
conditions = ""
if filters.get("account"): conditions += " and si.debit_to = %(account)s"
if filters.get("item_code"): conditions += " and si_item.item_code = %(item_code)s"
if filters.get("from_date"): conditions += " and si.posting_date>=%(from_date)s"
if filters.get("to_date"): conditions += " and si.posting_date<=%(to_date)s"
return conditions
def get_items(filters):
conditions = get_conditions(filters)
return webnotes.conn.sql("""select si.name, si.posting_date, si.debit_to, si.project_name,
si.customer, si.remarks, si.territory, si.company, si_item.item_code, si_item.item_name,
si_item.item_group, si_item.sales_order, si_item.delivery_note, si_item.income_account,
si_item.qty, si_item.basic_rate, si_item.amount
from `tabSales Invoice` si, `tabSales Invoice Item` si_item
where si.name = si_item.parent and si.docstatus = 1 %s
order by si.posting_date desc, si_item.item_code desc""" % conditions, filters, as_dict=1) |
planlos/pl-mediaservice | mediaservice/api/media.py | Python | gpl-3.0 | 1,051 | 0.015224 | from flask.ext.restful import Resource
from flask import make_response, jsonify
class Media_Api( | Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self, cmd = None):
if cmd == "_s | tatus":
response = dict(
msg = "mediaservice status",
documents = 1
)
else:
response = dict(
msg = "Hello, this is mediaservice"
)
return make_response(jsonify(response))
def post(self):
pass
class Media_Object_Api(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self, cmd = None):
if cmd == "_status":
response = dict(
msg = "mediaservice status",
documents = 1
)
else:
response = dict(
msg = "Hello, this is mediaservice"
)
return make_response(jsonify(response))
def post(self):
pass
|
avikdatta/python_scripts | lib/eHive_files/Runnable/Ftp_bed_file_factory.py | Python | apache-2.0 | 1,990 | 0.025126 | import eHive, os
import pandas as pd
from collections import defaultdict
from urllib.parse import urlsplit, urlunparse
def read_index_data(index, ftp_url, dir_prefix):
'''
This function accept an index file and prepare a list of seeds
containing the experiment id and file url
'''
# define empty list of dict
seed_list = []
try:
# read index file in chunks of 4000 lines
data=pd.read_table(index, chunksize=4000)
for data_chunk in data:
chip_exps=data_chunk.groupby('LIBRARY_STRATEGY').get_group('ChIP-Seq').groupby('EXPERIMENT_ID').groups.keys()
data_chunk=data_chunk.set_index('EXPERIMENT_ID')
for exp_id in chip_exps:
if type(data_chunk.loc[exp_id]['FILE']) is str:
file_uri=data_chunk. | loc[exp_id]['FILE']
else:
file_uri=data_chunk.loc[exp_id][data_chunk.loc[exp_id]['FILE'].str.contains('bed.gz')]['FILE'][exp_id]
if not file_uri:
raise Exception('No file uri found for exp id: {0}'.format(exp_id))
if file_uri.endswith('bed.gz'):
# Process only bed files
file_uri=urlunparse(('http',ftp_url, dir_prefix + file_ur | i,'','',''))
seed_list.append({'experiment_id':exp_id, 'file_uri':file_uri})
except Exception as e:
sys.exit('Got error: {0}'.format(e))
return seed_list
class Ftp_bed_file_factory(eHive.BaseRunnable):
def param_defaults(self):
return {
'ftp_url':'ftp.ebi.ac.uk',
'dir_prefix':'/pub/databases/',
}
def run(self):
# fetch FTP info
ftp_url=self.param('ftp_url')
dir_prefix=self.param('dir_prefix')
# fetch index file
index_file=self.param_required('index_file')
# prepare list of seeds
seed_list=read_index_data(index=index_file, ftp_url=ftp_url, dir_prefix=dir_prefix)
# set seed list in param
self.param('seed_list', seed_list)
def write_output(self):
seed_list=self.param('seed_list')
self.dataflow(seed_list, 2)
|
kaarolch/ansible | lib/ansible/modules/cloud/amazon/rds_subnet_group.py | Python | gpl-3.0 | 5,396 | 0.007969 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rds_subnet_group
version_added: "1.5"
short_description: manage RDS database subnet groups
description:
- Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
default: null
aliases: []
description:
description:
- Database subnet group description. Only set when a new group is added.
required: false
default: null
aliases: []
subnets:
description:
- List of subnet IDs that make up the database subnet group.
required: false
default: null
aliases: []
author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a subnet group
- rds_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- rds_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto.rds
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
try:
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
except boto.exception.BotoServerError as e:
module.fail_json(msg = e.error_message)
try:
changed = False
exists = False
try:
matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError as e:
if e. | error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=gro | up_subnets)
changed = True
else:
# Sort the subnet groups before we compare them
matching_groups[0].subnet_ids.sort()
group_subnets.sort()
if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ):
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError as e:
module.fail_json(msg = e.error_message)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
vgrem/Office365-REST-Python-Client | tests/graph_case.py | Python | mit | 1,515 | 0.00396 | from unittest import TestCase
import msal
from office365.graph_client import GraphClient
from tests import load_settings
def acquire_token_by_username_password():
settings = load_settings()
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings.get('default', 'tenant'))
app = msal.PublicClientApplication(
authority=authority_url,
client_id=settings.get('client_credentials', 'client_id')
)
result = app.acquire_token_by_username_password(username=settings.get('user_credentials', "username"),
password=settings.get('user_credentials', "password"),
| scopes=["https://graph.microsoft.com/.def | ault"])
return result
def acquire_token_by_client_credentials():
settings = load_settings()
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings.get('default', 'tenant'))
app = msal.ConfidentialClientApplication(
authority=authority_url,
client_id=settings.get('client_credentials', 'client_id'),
client_credential=settings.get('client_credentials', 'client_secret')
)
return app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
class GraphTestCase(TestCase):
"""Microsoft Graph specific test case base class"""
client = None # type: GraphClient
@classmethod
def setUpClass(cls):
cls.client = GraphClient(acquire_token_by_username_password)
|
whinedo/sdnlb | sdnlb/heartbeat/heartbeat.py | Python | gpl-3.0 | 4,347 | 0.047389 | from multiprocessing import Process
import time
import commands
from socketconnection import SocketConnection
from json_message import *
from subprocess import Popen, PIPE, STDOUT
import sdnlb_conf
class HeartBeat (object):
def __init__(self,ip,services,sendEvent=False):
self.services = services
self.sendEvent = sendEvent
self.timeout = sdnlb_conf.hb_timeout
self.ip = ip
def getIp(self):
return self.ip
def start(self):
# start event listener
p1 = Process(target=self.main)
p1.start()
def main(self):
while True:
self.heartBeat()
if (self.sendEvent):
self.eventBeat()
time.sleep(self.timeout)
def heartBeat(self):
for service in self.services.getServices():
index = 0
lbPort = service.getLbPort()
for server in service.getServers():
socket = SocketConnection()
try:
socket.connect(server.getIp(),server.getPort())
print "STATUS OK"
server.setStatus(True)
except Exception,e:
# cannot connect with server
#print e
#print "STATUS DOWN"
server.setStatus(False)
finally:
socket.close()
self.services.setServer(lbPort,server,index=index)
index += 1
#DEBUG
for service in self.services.getServices():
for server in service.getServers():
print "STATUS:",server.getStatus()
#FINDEBUG
def eventBeat(self):
#DEBUG
print "eventBeat"
#FINDEBUG
for service in self.services.getServices():
index = 0
processes = []
for server in service.getServers():
eventPort = server.getEventPort()
p = Process(target=self.eventBeatWorker, args=(service.getLbPort(),eventPort,self.services,index))
p.start()
processes.append(p)
index += 1
for p in processes:
p.join()
#DEBUG
for service in self.services.getServices():
for server in service.getServers():
print "STATUS:",server.getStatus()
print "CPU:",server.getCpu()
print "CONNS:",server.getConnections()
#FINDEBUG
def eventBeatWorker(self,lbPort,eventPort,services,index):
server = services.getServer(lbPort,index)
socket = SocketConnection()
try:
#socket.connect(server.getIp(),int(eventPort),30)
socket.connect(server.getIp(),int(eventPort),int(sdnlb_conf.iperf_tout)*3)
if (sdnlb_conf.algo =="cpu"):
cmd = "iperf"
args = []
elif (sdnlb_conf.algo == "connections"):
cmd = "netstat"
args = [server.getPort()]
msg | = JsonMessage.genCmdReqMessage(cmd,args)
socket.send(msg)
msg = socket.receive()
#DEBUG
print "HB msg recv:",msg
#FINDEBUG
if msg != '':
(msgtype, data) = JsonMessage.parse_json(msg)
if (msgtype == msgTypes['cmd_ans']):
if (data['cmd'] == "iperf"):
port = int(data['args'])
time.sleep(4) # wait for iperf to start running
cmd = 'iperf3 -c %s -t %d -p %d -J'%(server.getIp(),int(sdnlb_conf.iperf_tout),int(por | t))
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
json_msg = JsonMessage.parse_iperf_json(output)
if (json_msg['end']['cpu_utilization_percent']['remote_system'] != None):
#cpu_load = json_msg['end']['cpu_utilization_percent']['remote_system']
cpu_load = json_msg['end']['cpu_utilization_percent']['remote_total']
#DEBUG
print "CPU_LOAD:",cpu_load
#FINDEBUG
server.setCpu(float(cpu_load))
elif (data['cmd'] == "netstat"):
connections = int(data['args'])
server.setConnections(int(connections))
#DEBUG
print "EB CONNS:",connections
#FINDEBUG
except Exception,e:
# cannot connect with server
#print "Exception"
#print e
#print "STATUS DOWN"
server.setStatus(False)
finally:
socket.close()
print "%d,%d"%(server.getCpu(),index)
services.setServer(lbPort,server,index=index)
|
alexander-yu/nycodex | scripts/socrata_raw.py | Python | apache-2.0 | 1,584 | 0 | from nycodex import db
from nycodex.logging import get_logger
from nycodex.scrape import scrape_dataset, scrape_geojson
from nycodex.scrape.exceptions import SocrataError
BASE = "https://data.cityofnewyork.us/api"
logger = get_logger(__name__)
def main():
session = db.Session()
while True:
try:
with db.queue.next_row_to_scrape() as (trans, dataset_id):
if dataset_id is None:
break
# TODO(alan): Use same transaction connection for this query
dataset_type, names, fields, types = session.query(
db.Dataset.asset_type, db.Dataset.column_names,
db.Dataset.column_sql_names, db.Dataset.column_types
).filter(db.Dataset.id == dataset_id).first() # yapf: disable
log = logger.bind(
dataset_id=dataset_id, dataset_type=dataset_type)
log.info(f"Scraping dataset {dataset_id}")
if dataset_type == db.AssetType.DATASET or names:
scrape_dataset(trans, dataset_id, names, | fields, types)
elif dataset_type == db.AssetType.MAP:
scrape_geojson(trans, dataset_id)
else:
log.warning("Illegal dataset_ | type")
except SocrataError as e:
log.error("Failed to import dataset", exc_info=e)
except Exception as e:
log.critical(
"Failed to import datset with unknown exception", exc_info=e)
if __name__ == "__main__":
main()
|
patricklaw/pants | src/python/pants/backend/project_info/dependees.py | Python | apache-2.0 | 6,668 | 0.00225 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
from typing import Iterable, Set, cast
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.engine.addresses import Address, Addresses
from pants.engine.collection import DeduplicatedCollection
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.target import Dependencies, DependenciesRequest, Targets, UnexpandedTargets
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import FrozenOrderedSet
@dataclass(frozen=True)
class AddressToDependees:
mapping: FrozenDict[Address, FrozenOrderedSet[Address]]
@rule(desc="Map all targets to their dependees", level=LogLevel.DEBUG)
async def map_addresses_to_dependees() -> AddressToDependees:
# Get every target in the project so that we can iterate over them to find their dependencies.
with_generated_targets, without_generated_targets = await MultiGet(
Get(Targets, AddressSpecs([DescendantAddresses("")])),
Get(UnexpandedTargets, AddressSpecs([DescendantAddresses("")])),
)
all_targets = {*with_generated_targets, *without_generated_targets}
dependencies_per_target = await MultiGet(
Get(Addresses, DependenciesRequest(tgt.get(Dependencies), include_special_cased_deps=True))
for tgt in all_targets
)
address_to_dependees = defaultdict(set)
for tgt, dependencies in zip(all_targets, dependencies_per_target):
for dependency in dependencies:
address_to_dependees[dependency].add(tgt.address)
return AddressToDependees(
FrozenDict(
{addr: FrozenOrderedSet(dependees) for addr, dependees in address_to_dependees.items()}
)
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class DependeesRequest:
addresses: FrozenOrderedSet[Address]
transitive: bool
include_roots: bool
def __init__(
self, addresses: Iterable[Address], *, transitive: bool, include_roots: bool
) -> None:
self.addresses = FrozenOrderedSet(addresses)
self.transitive = transitive
self.include_roots = include_roots
class Dependees(DeduplicatedCollection[Address]):
sort_input = True
@rule(level=LogLevel.DEBUG)
def find_dependees(
request: DependeesRequest, address_to_dependees: AddressToDependees
) -> Dependees:
check = set(request.addresses)
known_dependents: Set[Address] = set()
while True:
dependents = set(known_dependents)
for target in check:
target_dependees = address_to_dependees.mapping.get(target, FrozenOrderedSet())
dependents.update(target_dependees)
check = dependents - known_dependents
if not check or not request.transitive:
r | esult = (
dependents | set(request.addresses)
if request.include_roots
else dependents - set(request.addresses)
)
return Dependees(result)
known_dependents = dependents
class DependeesOutputFormat(Enum):
text = "text | "
json = "json"
class DependeesSubsystem(LineOriented, GoalSubsystem):
name = "dependees"
help = "List all targets that depend on any of the input files/targets."
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--transitive",
default=False,
type=bool,
help="List all transitive dependees. If unspecified, list direct dependees only.",
)
register(
"--closed",
type=bool,
default=False,
help="Include the input targets in the output, along with the dependees.",
)
register(
"--output-format",
type=DependeesOutputFormat,
default=DependeesOutputFormat.text,
removal_version="2.9.0.dev0",
removal_hint="Use the `peek` goal for structured output, including dependencies.",
help=(
"Use `text` for a flattened list of target addresses; use `json` for each key to be "
"the address of one of the specified targets, with its value being "
"a list of that target's dependees, e.g. `{':example': [':dep1', ':dep2']}`."
),
)
@property
def transitive(self) -> bool:
return cast(bool, self.options.transitive)
@property
def closed(self) -> bool:
return cast(bool, self.options.closed)
@property
def output_format(self) -> DependeesOutputFormat:
return cast(DependeesOutputFormat, self.options.output_format)
class DependeesGoal(Goal):
subsystem_cls = DependeesSubsystem
@goal_rule
async def dependees_goal(
specified_addresses: Addresses, dependees_subsystem: DependeesSubsystem, console: Console
) -> DependeesGoal:
# TODO: Delte this entire conditional in 2.9.0.dev0.
if dependees_subsystem.output_format == DependeesOutputFormat.json:
dependees_per_target = await MultiGet(
Get(
Dependees,
DependeesRequest(
[specified_address],
transitive=dependees_subsystem.transitive,
include_roots=dependees_subsystem.closed,
),
)
for specified_address in specified_addresses
)
json_result = {
specified_address.spec: [dependee.spec for dependee in dependees]
for specified_address, dependees in zip(specified_addresses, dependees_per_target)
}
with dependees_subsystem.line_oriented(console) as print_stdout:
print_stdout(json.dumps(json_result, indent=4, separators=(",", ": "), sort_keys=True))
return DependeesGoal(exit_code=0)
dependees = await Get(
Dependees,
DependeesRequest(
specified_addresses,
transitive=dependees_subsystem.transitive,
include_roots=dependees_subsystem.closed,
),
)
with dependees_subsystem.line_oriented(console) as print_stdout:
for address in dependees:
print_stdout(address.spec)
return DependeesGoal(exit_code=0)
def rules():
return collect_rules()
|
abhishekshanbhag/emotion_based_spotify | spotifyAPITest/python/test.py | Python | gpl-3.0 | 75 | 0.04 | import sys
def main():
print | (1)
if __name__ == '__main__':
ma | in() |
nycholas/ask-undrgz | src/ask-undrgz/django/contrib/localflavor/ro/forms.py | Python | bsd-3-clause | 6,464 | 0.003868 | # -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, *args, **kwargs):
super(ROCIFField, self).__init__(r'^[0-9]{2,10}', max_length=10,
min_length=2, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(key_iter.next())
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_err | or_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length=13,
min_length=13, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
# check birthdate digits
import datetime
try:
dat | etime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(value_iter.next())
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': u'Enter a Romanian county code or name.',
}
def clean(self, value):
from ro_counties import COUNTIES_CHOICES
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
from ro_counties import COUNTIES_CHOICES
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length=40, min_length=24, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length=20, min_length=10, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length=6, min_length=6, *args, **kwargs)
|
chrivers/pyjaco | tests/list/subclass.py | Python | mit | 147 | 0.006803 | class A(list):
def m | y_append(self, a):
self.append(a)
a = A()
print a
a.appe | nd(5)
print a
a.my_append(6)
print a
a.remove(5)
print a
|
spikeekips/source-over-ssh | src/tests/__init__.py | Python | gpl-3.0 | 344 | 0.011628 | # -*- coding: utf-8 -*-
__all__ = [
"test_config_db",
"test_grid",
"test | _shell",
"test_svn",
]
|
if __name__ == "__main__" :
import doctest
for i in __all__ :
print ("%%-%ds: %%s" % (max(map(len, __all__)) + 1)) % (
i,
doctest.testmod(__import__(i, None, None, [i, ], ), ),
)
|
hbldh/flask-pybankid | tests/test_config.py | Python | mit | 1,775 | 0.001127 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`test_config`
==================
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-02-04
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import tempfile
import unittest
import flask
import bankid
from flask_pybankid import PyBankID
class FlaskPyMongoConfigTest(unittest.TestCase):
def setUp(self):
self.certificate_file, self.key_file = bankid.create_bankid_test_server_cert_and_key(
tempfile.gettempdir()
)
self.app = flask.Flask("test")
| self.context = self.app.test_request_context("/")
self.context.push()
def tearDown(self):
self.context.pop()
try:
os.remove(self.certificate_file)
os.remove(self.key_file)
except:
pass
def test_default_config_prefix(self):
self.app.config["PYBANKID_CERT_PATH"] = self.cert | ificate_file
self.app.config["PYBANKID_KEY_PATH"] = self.key_file
self.app.config["PYBANKID_TEST_SERVER"] = True
fbid = PyBankID(self.app)
assert fbid.client.certs == (self.certificate_file, self.key_file)
assert fbid.client.api_url == "https://appapi2.test.bankid.com/rp/v4"
def test_custom_config_prefix(self):
self.app.config["CUSTOM_CERT_PATH"] = self.certificate_file
self.app.config["CUSTOM_KEY_PATH"] = self.key_file
self.app.config["CUSTOM_TEST_SERVER"] = True
fbid = PyBankID(self.app, "CUSTOM")
assert fbid.client.certs == (self.certificate_file, self.key_file)
assert fbid.client.api_url == "https://appapi2.test.bankid.com/rp/v4"
|
CospanDesign/python | site/test_site_utils.py | Python | mit | 415 | 0.009639 | import os
import sys
import site
import site_utils
def test_site_dir_exists():
result = site_utils.site_dir_exists("test")
def test_ | create_site_dir():
site_utils.create_site_dir("test")
tdir = os.path.join(site.getuserbase(), "test")
#print "tdir: %s" % | str(tdir)
assert os.path.exists(tdir)
site_utils._remove_site_dir("test")
assert not site_utils.site_dir_exists("test")
|
wangheda/youtube-8m | youtube-8m-wangheda/all_frame_models/distillchain_lstm_cnn_deep_combine_chain_model.py | Python | apache-2.0 | 7,770 | 0.012098 | import math
import models
import tensorflow as tf
import numpy as np
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
class DistillchainLstmCnnDeepCombineChainModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def cnn(self,
model_input,
l2_penalty=1e-8,
num_filters = [1024, 1024, 1024],
filter_sizes = [1,2,3],
sub_scope="",
**unused_params):
max_frames = model_input.get_shape().as_list()[1]
num_features = model_input.get_shape().as_list()[2]
shift_inputs = []
for i in xrange(max(filter_sizes)):
if i == 0:
shift_inputs.append(model_input)
else:
shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])
cnn_outputs = []
for nf, fs in zip(num_filters, filter_sizes):
sub_input = tf.concat(shift_inputs[:fs], axis=2)
sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
shape=[num_features*fs, nf], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))
cnn_output = tf.concat(cnn_outputs, axis=2)
return cnn_output
def create_model(self, model_input, vocab_size, num_frames, num_mixtures=None,
l2_penalty=1e-8, sub_scope="", original_input=None,
distillation_predictions=None,
**unused_params):
assert distillation_predictions is not None, "distillation feature must be used"
distillchain_relu_cells = FLAGS.distillchain_relu_cells
num_supports = FLAGS.num_supports
num_layers = FLAGS.deep_chain_layers
relu_cells = FLAGS.deep_chain_relu_cells
max_frames = model_input.get_shape().as_list()[1]
relu_layers = []
support_predictions = []
# distill
distill_relu = slim.fully_connected(
distillation_predictions,
distillchain_relu_cells,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="distillrelu")
distill_norm = tf.nn.l2_normalize(distill_relu, dim=1)
relu_layers.append(distill_norm)
# mean
mask = tf.sequence_mask(num_frames, maxlen=max_frames, dtype=tf.float32)
mean_input = tf.einsum("ijk,ij->ik", model_input, mask) \
/ tf.expand_dims(tf.cast(num_frames, dtype=tf.float32), dim=1)
mean_relu = slim.fully_connected(
mean_input,
relu_cells,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope=sub_scope+"mean-relu")
mean_relu_norm = tf.nn.l2_normalize(mean_relu, dim=1)
relu_layers.append(mean_relu_norm)
# LSTM
lstm_output = self.lstmoutput(model_input, vocab_size, num_frames)
# CNN
cnn_output = self.cnn(lstm_output, num_filters=[relu_cells,2*relu_cells,relu_cells], filter_sizes=[1,2,3], sub_scope=sub_scope+"cnn0")
max_cnn_output = tf.reduce_max(cnn_output, axis=1)
normalized_cnn_output = tf.nn.l2_normalize(max_cnn_output, dim=1)
next_input = tf.concat([normalized_cnn_output] + relu_layers, axis=1)
for layer in xrange(num_layers):
sub_prediction = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"prediction-%d"%layer)
support_predictions.append(sub_prediction)
sub_relu = slim.fully_connected(
sub_prediction,
relu_cells,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope=sub_scope+"relu-%d"%layer)
relu_norm = tf.nn.l2_normalize(sub_relu, dim=1)
relu_layers.append(relu_norm)
cnn_output = self.cnn(lstm_output, num_filters=[relu_cells,relu_cells,relu_cells*2], filter_sizes=[1,2,3], sub_scope=sub_scope+"cnn%d"%(layer+1))
max_cnn_output = tf.reduce_max(cnn_output, axis=1)
normalized_cnn_output = tf.nn.l2_normalize(max_cnn_output, dim=1)
next_input = tf.concat([normalized_cnn_output] + relu_layers, axis=1)
main_predictions = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"-main")
support_predictions = tf.concat(support_predictions, axis=1)
return {"predictions": main_predictions, "support_predictions": support_predictions}
def sub_model(self, model_input, vocab_size, num_mixtures=None,
l2_penalty=1e-8, sub | _scope="", **unused_params):
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-"+sub | _scope)
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-"+sub_scope)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return final_probabilities
def get_mask(self, max_frames, num_frames):
mask_array = []
for i in xrange(max_frames + 1):
tmp = [0.0] * max_frames
for j in xrange(i):
tmp[j] = 1.0
mask_array.append(tmp)
mask_array = np.array(mask_array)
mask_init = tf.constant_initializer(mask_array)
mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames],
dtype = tf.float32, trainable = False, initializer = mask_init)
mask = tf.nn.embedding_lookup(mask_emb, num_frames)
return mask
def lstmoutput(self, model_input, vocab_size, num_frames):
number_of_layers = FLAGS.lstm_layers
lstm_sizes = map(int, FLAGS.lstm_cells.split(","))
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
sub_inputs = [tf.nn.l2_normalize(x, dim=2) for x in tf.split(model_input, feature_sizes, axis = 2)]
assert len(lstm_sizes) == len(feature_sizes), \
"length of lstm_sizes (={}) != length of feature_sizes (={})".format( \
len(lstm_sizes), len(feature_sizes))
outputs = []
for i in xrange(len(feature_sizes)):
with tf.variable_scope("RNN%d" % i):
sub_input = sub_inputs[i]
lstm_size = lstm_sizes[i]
## Batch normalize the input
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=True)
for _ in range(number_of_layers)
],
state_is_tuple=True)
output, state = tf.nn.dynamic_rnn(stacked_lstm, sub_input,
sequence_length=num_frames,
swap_memory=FLAGS.rnn_swap_memory,
dtype=tf.float32)
outputs.append(output)
# concat
final_output = tf.concat(outputs, axis=2)
return final_output
|
hjorturlarsen/Kapall | menu.py | Python | mit | 5,750 | 0.009913 | #!/usr/bin/env python
# encoding: utf-8
'''
Created on Apr 8, 2013
@author: redw0lf
'''
import pygame, sys, os, random
fro | m pygame.locals import *
class MenuItem (pygame.font.Font):
'''
The Menu Item should be derived from the pygame Font class
'''
def __init__(self, text, position, fontSize=55, antialias=1, color=(255, 255, 255), background=None):
pygame.font.Font.__init__(self, 'data/menu_font.ttf', fontSize)
self.text = tex | t
if background == None:
self.textSurface = self.render(self.text, antialias, (255, 255, 255))
else:
self.textSurface = self.render(self.text, antialias, (255, 255, 255), background)
self.position = self.textSurface.get_rect(centerx=position[0], centery=position[1])
def get_pos(self):
return self.position
def get_text(self):
return self.text
def get_surface(self):
return self.textSurface
class Menu:
'''
The Menu should be initalized with a list of menu entries
it then creates a menu accordingly and manages the different
print Settings needed
'''
MENUCLICKEDEVENT = USEREVENT + 1
def __init__(self, menuEntries, menuCenter=None):
'''
The constructer uses a list of string for the menu entries,
which need to be created
and a menu center if non is defined, the center of the screen is used
'''
screen = pygame.display.get_surface()
self.area = screen.get_rect()
self.background = pygame.Surface(screen.get_size())
self.background = self.background.convert()
self.background_image = pygame.image.load("data/start_background.png")
self.backgroundRect = self.background_image.get_rect()
self.background.blit(self.background_image, self.backgroundRect)
self.active = False
if pygame.font:
fontSize = 36
fontSpace = 4
# loads the standard font with a size of 36 pixels
# font = pygame.font.Font(None, fontSize)
# calculate the height and startpoint of the menu
# leave a space between each menu entry
menuHeight = (fontSize + fontSpace) * len(menuEntries)
startY = self.background.get_height() / 2 - menuHeight / 2
# listOfTextPositions=list()
self.menuEntries = list()
for menuEntry in menuEntries:
centerX = self.background.get_width() / 2
centerY = startY + fontSize + fontSpace
newEnty = MenuItem(menuEntry, (centerX, centerY))
self.menuEntries.append(newEnty)
self.background.blit(newEnty.get_surface(), newEnty.get_pos())
startY = startY + fontSize + fontSpace
def drawMenu(self):
self.active = True
screen = pygame.display.get_surface()
screen.blit(self.background, (0, 0))
def isActive(self):
return self.active
def activate(self,):
self.active = True
def deactivate(self):
self.active = False
def handleEvent(self, event):
# only send the event if menu is active
if event.type == MOUSEBUTTONDOWN and self.isActive():
# initiate with menu Item 0
curItem = 0
# get x and y of the current event
eventX = event.pos[0]
eventY = event.pos[1]
# for each text position
for menuItem in self.menuEntries:
textPos = menuItem.get_pos()
# check if current event is in the text area
if eventX > textPos.left and eventX < textPos.right \
and eventY > textPos.top and eventY < textPos.bottom:
# if so fire new event, which states which menu item was clicked
menuEvent = pygame.event.Event(self.MENUCLICKEDEVENT, item=curItem, text=menuItem.get_text())
pygame.event.post(menuEvent)
curItem = curItem + 1
def main():
# pygame initialization
width = 800
height = 600
pygame.init()
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Menu Demo')
pygame.mouse.set_visible(1)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 0, 0))
clock = pygame.time.Clock()
# draw background
screen.blit(background, (0, 0))
pygame.display.flip()
# code for our menu
ourMenu = ("Start Game",
"Quit")
myMenu = Menu(ourMenu)
myMenu.drawMenu()
# pygame.display.flip()
# main loop for event handling and drawing
while 1:
clock.tick(60)
# Handle Input Events
for event in pygame.event.get():
myMenu.handleEvent(event)
# quit the game if escape is pressed
if event.type == QUIT:
return
elif event.type == KEYDOWN and event.key == K_ESCAPE:
myMenu.activate()
elif event.type == Menu.MENUCLICKEDEVENT:
if event.text == "Quit":
return
elif event.item == 0:
isGameActive = True
myMenu.deactivate()
screen.blit(background, (0, 0))
if myMenu.isActive():
myMenu.drawMenu()
else:
background.fill((0, 0, 0))
pygame.display.flip()
if __name__ == '__main__': main() |
tanutarou/OptBoard | optboard/urls.py | Python | mit | 838 | 0 | """optboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='h | ome')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
| Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('dashboard.urls', namespace='dashboard')),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.