repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ecell/libmoleculizer
|
python-src/language_parser/moleculizer/moleculizerrules.py
|
1
|
21379
|
###############################################################################
# Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute
# Original Author:
# Nathan Addy, Scientific Programmer Email: addy@molsci.org
# The Molecular Sciences Institute
#
###############################################################################
import pdb
import re
import util
from xmlobject import XmlObject
import StringIO
from sectionparameter import SymbolicExpressionEvaluator
from sectionmodifications import ModificationsSection
from sectionmols import MolsSection
from sectionallostery import AllostericPlexesSection, AllostericOmnisSection
from sectionreactionrules import ReactionRulesSection
from sectionspeciesstreams import SpeciesStreamsSection
from sectionexplicitspeciesblock import ExplicitSpeciesSection
from moleculizer_xcpt import *
class MoleculizerRulesFile:
"""
This object acts as an parsing thing that outputs moleculizer files xml,
suitable for processing internally by a mzr::moleculizer instance."""
def BlockPassesSanityCheck( linearray ):
linearray = [x for x in linearray if x.strip() != ""]
if len(linearray) == 0: return True
everyLineEndsWithSemiColon = [ x[-1] == ";" and x.count(";") == 1for x in linearray]
noWhiteSpace = [ (x.count("\n") + x.count(" ") + x.count("\t") == 0) for x in linearray]
return reduce(util.And, everyLineEndsWithSemiColon) and reduce(util.And, noWhiteSpace)
BlockPassesSanityCheck = staticmethod( BlockPassesSanityCheck )
def addWholeRulesString( self, rulesString):
print "Reading file '%s' " % rulesString
lines = rulesString.split("\n")
parameterBlock, modificationsBlock, molsBlock, allostericPlexes, allostericOmnis,\
reactionRulesBlock, dimerizationGenBlock, omniGenBlock, \
explicitSpeciesBlock, speciesStreamBlock = parseBlockTypesFromRulesFile( lines )
self.addParameterBlock( parameterBlock )
self.addModicationsBlock( modificationsBlock )
self.addMolsBlock( molsBlock )
self.addAllostericPlexesBlock( allostericPlexes )
self.addAllostericOmnisBlock( allostericOmnis )
self.addReactionRulesBlock( reactionRulesBlock, dimerizationGenBlock, \
omniGenBlock, [] )
self.addExplicitSpeciesBlock( explicitSpeciesBlock )
self.addSpeciesStreamsBlock( speciesStreamBlock )
return
def addWholeRulesFile(self, rulesFile):
parameterBlock, modificationsBlock, molsBlock, allostericPlexes, allostericOmnis, \
reactionRulesBlock, dimerizationGenBlock, omniGenBlock, \
explicitSpeciesBlock, speciesStreamBlock = parseBlockTypesFromRulesFile( open(rulesFile).readlines() )
self.addParameterBlock( parameterBlock )
self.addModicationsBlock( modificationsBlock )
self.addMolsBlock( molsBlock )
self.addAllostericPlexesBlock( allostericPlexes )
self.addAllostericOmnisBlock( allostericOmnis )
self.addReactionRulesBlock( reactionRulesBlock, dimerizationGenBlock, \
omniGenBlock, [] )
self.addExplicitSpeciesBlock( explicitSpeciesBlock )
self.addSpeciesStreamsBlock( speciesStreamBlock )
return
def addParameterStatement(self, paramStatement):
paramStatement = self.PreProcessStatement( paramStatement )
print "Adding param line: '%s'" % paramStatement
self.parameterBlock.append( paramStatement)
self.parameterEE = SymbolicExpressionEvaluator( self.parameterBlock )
return
def addModificationStatement(self, modLine):
modLine = self.PreProcessStatement( modLine )
print "Adding mod line: '%s'" % modLine
self.modificationsBlock.append( modLine)
self.modificationsSection = ModificationsSection( self.modificationsBlock )
return
def addMolsStatement(self, molsLine):
molsLine = self.PreProcessStatement( molsLine )
self.molsBlock.append( molsLine )
self.molsSection = MolsSection( molsBlock )
return
def addAllostericPlexStatement(self, alloPlexLine):
alloPlexLine = self.PreProcessStatement( alloPlexLine )
self.allostericPlexes.append( alloPlexLine )
self.allostericPlexesSection = AllostericPlexesSection( self.allostericPlexes )
return
def addAllostericOmniStatement(self, alloOmniLine):
alloOmniLine = self.PreProcessStatement( alloOmniLine )
self.allostericOmnis.append( alloOmniLine )
self.allostericOmnisSection = AllostericOmnisSection( self.allostericOmnis )
return
def addDimerizationGenStatement(self, dimerGenLine):
dimerGenLine = self.PreProcessStatement( dimerGenLine )
self.dimerizationGenBlock.append(dimerGenLine)
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addOmniGenStatement(self, omniGenLine):
omniGenLine = self.PreProcessStatement( omniGenLine )
self.omniGenLine.append( omniGenLine )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addUniMolGenStatement(self, uniMolGenLine):
uniMolGenLine = self.PreProcessStatement( uniMolGenLine )
self.uniMolGenBlock.append( uniMolGenLine )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
return
def addExplicitSpeciesStatement(self, explicitSpeciesStatement):
explicitSpeciesStatement = self.PreProcessStatement( explicitSpeciesStatement )
self.explicitSpeciesBlock.append( explicitSpeciesStatement )
self.explicitSpeciesSection = ExplicitSpeciesSection( self.explicitSpeciesBlock )
return
def addSpeciesStreamStatement(self, speciesStreamLine):
speciesStreamLine = self.PreProcessStatement( speciesStreamLine )
self.speciesStreamBlock.append( speciesStreamLine )
self.speciesStreamSection = SpeciesStreamsSection( self.speciesStreamBlock )
return
def __init__(self):
# These are the lines of input, in one statement per line form, with no whitespace
self.parameterBlock = []
self.modificationsBlock = []
self.molsBlock = []
self.allostericPlexes = []
self.allostericOmnis = []
self.reactionRulesBlock = []
self.dimerizationGenBlock = []
self.omniGenBlock = []
self.uniMolGenBlock = []
self.explicitSpeciesBlock = []
self.speciesStreamBlock = []
# These are the objects that will be used to process the parsed
# data.
# A section is an intermediate between a rules file (they have lines, for example,
# and can answer questions about what has been parsed ) and an xml section (it can
# write out an xml section -
# Parameters doesn't write anything out currently, but easily could
self.parameterSection = 0
self.modificationsSection = 0
self.molsSection = 0
self.allostericPlexesSection = 0
self.allostericOmnisSection = 0
self.reactionGensSection = 0
self.explicitSpeciesSection = 0
self.speciesStreamSection = 0
def getOutputFileName(self):
return self.outputFileName
def write(self):
self.openXmlFile = open(self.outputFileName, 'w')
self.__writeOutput(self.openXmlFile)
return
def writeToString(self):
myString = StringIO.StringIO()
self.__writeOutput( myString )
return myString.getvalue()
def close(self):
self.openXmlFile.close()
def addParameterBlock(self, parameterBlock, overwrite = False):
if self.parameterBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a parameter block twice.")
if not self.BlockPassesSanityCheck( parameterBlock ):
raise InsaneBlockOnTheLooseException(parameterBlock, "parameter block")
self.parameterBlock = parameterBlock[:]
self.parameterEE = SymbolicExpressionEvaluator( self.parameterBlock )
def addModicationsBlock(self, modificationsBlock, overwrite = False):
if self.modificationsBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a modifications block twice.")
if not self.BlockPassesSanityCheck( modificationsBlock ):
raise InsaneBlockOnTheLooseException(modificationsBlock, "modifications block")
self.modificationsBlock = modificationsBlock[:]
self.modificationsSection = ModificationsSection( self.modificationsBlock )
return
def addMolsBlock(self, molsBlock):
if self.molsBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a mols block twice.")
if not self.BlockPassesSanityCheck( molsBlock ):
raise InsaneBlockOnTheLooseException(molsBlock, "mols block")
self.molsBlock = molsBlock[:]
self.molsSection = MolsSection( molsBlock )
def addAllostericPlexesBlock(self, apBlock, overwrite = False):
if self.allostericPlexes and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add an allosteric plexes block twice.")
if not self.BlockPassesSanityCheck( apBlock ):
raise InsaneBlockOnTheLooseException(apBlock, "allosteric plexes block")
self.allostericPlexes = apBlock[:]
self.allostericPlexesSection = AllostericPlexesSection( self.allostericPlexes )
def addAllostericOmnisBlock(self, aoBlock, overwrite = False):
if self.allostericOmnis and not overwrite: raise MzrExceptions.MoleculizerException("Error: Cannot add an allosteric omnis block twice.")
if not self.BlockPassesSanityCheck( aoBlock ):
raise InsaneBlockOnTheLooseException( aoBlock, "allosteric omnis block")
self.allostericOmnis = aoBlock[:]
self.allostericOmnisSection = AllostericOmnisSection( self.allostericOmnis )
def addReactionRulesBlock( self, rrBlock, dimerGenBlock, omniGenBlock, uniMolGenBlock, overwrite = False):
if self.reactionRulesBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a reaction rules block twice.")
if not self.BlockPassesSanityCheck( rrBlock ):
raise InsaneBlockOnTheLooseException(rrBlock, "reaction rules")
if not self.BlockPassesSanityCheck( dimerGenBlock ):
raise InsaneBlockOnTheLooseException(dimerGenBlock, "dimerization gen block")
if not self.BlockPassesSanityCheck( omniGenBlock ):
raise InsaneBlockOnTheLooseException(omniGenBlock, "omni-gen block")
if not self.BlockPassesSanityCheck( uniMolGenBlock ):
raise InsaneBlockOnTheLooseException(uniMolGenBlock, "uni-mol-gen block")
self.reactionRulesBlock.extend( rrBlock )
self.dimerizationGenBlock.extend( dimerGenBlock )
self.omniGenBlock.extend( omniGenBlock )
self.uniMolGenBlock.extend( uniMolGenBlock )
self.reactionGensSection = ReactionRulesSection( self.reactionRulesBlock,
self.dimerizationGenBlock,
self.omniGenBlock,
self.uniMolGenBlock)
def addExplicitSpeciesBlock( self, esBlock, overwrite = False):
if self.explicitSpeciesBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add an explicit species block twice.")
if not self.BlockPassesSanityCheck( esBlock ):
raise InsaneBlockOnTheLooseException(esBlock, "explicit-species")
self.explicitSpeciesBlock = esBlock[:]
self.explicitSpeciesSection = ExplicitSpeciesSection( esBlock )
def addSpeciesStreamsBlock(self, ssBlock, overwrite = False):
if self.speciesStreamBlock and not overwrite:
raise MzrExceptions.MoleculizerException("Error: Cannot add a species stream block twice.")
if not self.BlockPassesSanityCheck( ssBlock ):
raise InsaneBlockOnTheLooseException(ssBlock, "")
self.speciesStreamBlock = ssBlock[:]
self.speciesStreamSection = SpeciesStreamsSection( self.speciesStreamBlock )
def __processAllostericRulesBlocks( self, allostericPlexBlock, allostericOmniBlock):
return 0
def __processReactionRulesBlocks( self, rxnRulesBlock, dimerBlock, omniGenBlock, uniGenBlock):
return 0
def __processExplicitSpeciesBlock( self, explicitSpeciesBlock):
return 0
def __processSpeciesStreamBlock( self, ssBlock):
return 0
def __writeOutput(self, openXMLFile):
xmlobject = self.__constructXMLRepresentation()
xmlobject.writeall(openXMLFile)
def __constructXMLRepresentation(self):
rootNode = XmlObject("moleculizer-input")
modelElmt = XmlObject("model")
modelElmt.attachToParent(rootNode)
streamsElmt = XmlObject("streams", rootNode)
self.__addModifications( modelElmt )
self.__addMols( modelElmt )
self.__addAllostericPlexes( modelElmt )
self.__addAllostericOmnis( modelElmt )
self.__addReactionGens( modelElmt )
self.__addExplicitSpecies( modelElmt )
self.__addExplicitReactions( modelElmt )
self.__addSpeciesStreams( streamsElmt )
return rootNode
def __addModifications(self, parentObject):
# Write me!!!
modificationsSection = XmlObject("modifications", parentObject)
if self.modificationsSection:
self.modificationsSection.writeModificationsSections( modificationsSection )
return
def __addMols(self, parentObject):
molsSection = XmlObject("mols", parentObject)
if self.molsSection:
self.molsSection.writeMolsSection( molsSection)
return
def __addAllostericPlexes(self, parentObject):
allostericPlexes = XmlObject("allosteric-plexes", parentObject)
if self.allostericPlexesSection:
self.allostericPlexesSection.writeAllostericPlexesSection(allostericPlexes)
return
def __addAllostericOmnis(self, parentObject):
allostericOmnis = XmlObject("allosteric-omnis", parentObject)
if self.allostericOmnisSection:
self.allostericOmnisSection.writeAllostericOmnisSection( allostericOmnis )
return
def __addReactionGens(self, parentObject):
reactionGenElmt = XmlObject("reaction-gens", parentObject)
if self.reactionGensSection:
self.reactionGensSection.writeReactionGensSection( reactionGenElmt )
return
def __addSpeciesStreams( self, parentObject):
speciesStreamsElement = XmlObject("species-streams", parentObject)
if self.speciesStreamSection:
self.speciesStreamSection.writeSpeciesStreamSection( speciesStreamsElement )
def __addExplicitSpecies(self, parentObject):
explicitSpeciesElmt = XmlObject("explicit-species", parentObject)
if self.explicitSpeciesSection:
self.explicitSpeciesSection.writeExplicitSpeciesSection( explicitSpeciesElmt )
return
def __addExplicitReactions( self, modelElmt ):
explicitReactionsElmt = XmlObject("explicit-reactions", modelElmt)
return
def parseBlockTypesFromRulesFile(textRulesFile):
textRulesFile = [re.sub("#.*$", "", x) for x in textRulesFile] # Delete all comments
# textRulesFile = [re.sub("//.*$", "", x) for x in textRulesFile] # Delete all comments
textRulesFile = [re.sub(r"\s*", "", x) for x in textRulesFile] # Delete all whitespace
textRulesFile = [x.strip() for x in textRulesFile] # Strip it for good measure
textRulesFile = [x for x in textRulesFile if x != ""] # This must be last, because line.strip() results in some empty lines.
parameterBlock = []
modificationsBlock = []
molsBlock = []
allostericPlexes = []
allostericOmnis = []
reactionRulesBlock = []
dimerizationGenBlock = []
omniGenBlock = []
uniMolGenBlock = []
explicitSpeciesBlock = []
speciesStreamBlock = []
# textRulesFile = '\n'.join(textRulesFile)
# textRulesFile = re.sub(r"\\\s*\n\s*", " ", textRulesFile)
# textRulesFile = textRulesFile.split("\n")
blockCodes = ["Parameters", "Modifications", "Molecules", "Explicit-Allostery", "Allosteric-Classes",
"Reaction-Rules", "Association-Reactions", "Transformation-Reactions",
"Explicit-Species", "Species-Classes" ]
blockObjNdx = -1
blockDataObj = [ (blockCodes[0], parameterBlock), \
(blockCodes[1], modificationsBlock), \
(blockCodes[2], molsBlock), \
(blockCodes[3], allostericPlexes),
(blockCodes[4], allostericOmnis),
(blockCodes[5], reactionRulesBlock), \
(blockCodes[6], dimerizationGenBlock), \
(blockCodes[7], omniGenBlock), \
(blockCodes[8], explicitSpeciesBlock),\
(blockCodes[9], speciesStreamBlock) ]
currentDmp = []
try:
assert( textRulesFile[0].startswith("="))
except:
raise Exception("Line '%s' should start with a '=', but does not." % textRulesFile[0])
blockObjNdx = -1
for line in textRulesFile:
if line.startswith("="):
blockObjNdx = returnNewIndex(line, blockDataObj)
currentDmp = blockDataObj[blockObjNdx][1]
else:
currentDmp.append(line)
return getFormattedArray(parameterBlock), getFormattedArray(modificationsBlock), getFormattedArray(molsBlock), getFormattedArray(allostericPlexes), getFormattedArray(allostericOmnis), \
getFormattedArray(reactionRulesBlock), getFormattedArray(dimerizationGenBlock), getFormattedArray(omniGenBlock), \
getFormattedArray(explicitSpeciesBlock), getFormattedArray(speciesStreamBlock)
def returnNewIndex(lineOfText, blockObjData):
key = lineOfText.strip().strip("=").strip()
for ndx in range(len(blockObjData)):
if key == blockObjData[ndx][0]:
return ndx
raise Exception("Section title '%s' cannot be found" % key)
return -1
def barf(msg):
sys.stderr.write(msg + '\n')
sys.stderr.write("Crashing....\n")
sys.exit(1)
def printerror(msg):
sys.stderr.write(msg + '\n')
return
def getFormattedArray( arrayToFormat ):
tmpArray = getBalancedArray( arrayToFormat )
tmpString = "".join( tmpArray )
if tmpString == "":
return []
try:
assert( tmpString[-1] == ";" )
except:
raise Exception("Error parsing block '%s'. Line does not end in ';'." % repr(arrayToFormat))
tmpArray = tmpString.split(";")
tmpArray.pop() # Last entry is blank
tmpArray = [tok + ";" for tok in tmpArray]
return tmpArray
def getBalancedArray( arrayToBalance ):
if not EachEntryIsParenBalanced( arrayToBalance ):
# Combine the ..., ndx_i, ndx_(i+1) where ndx_i is the smallest i not balanced
return getBalancedArray( GetIncrementallyBetterArray( arrayToBalance ) )
else:
return arrayToBalance
def GetIncrementallyBetterArray( anArray ):
values = [ StringIsParenBalenced(x) for x in anArray]
# This is correct: this function should only be used if the array does not pass
# EachEntryIsParenBalanced.
assert( False in values)
badNdx = values.index( False )
combinedTokens = anArray[badNdx] + anArray[badNdx + 1]
returnArray = anArray[ : badNdx]
returnArray.append( combinedTokens )
returnArray.extend( anArray[badNdx + 2 : ] )
return returnArray
def EachEntryIsParenBalanced( array ):
entries = [ StringIsParenBalenced(x) for x in array ]
returnVal = True
for val in entries:
returnVal = returnVal and val
return returnVal
def StringIsParenBalenced(line):
return ( line.count("(") == line.count(")") and
line.count("[") == line.count("]") and
line.count("{") == line.count("}") )
|
gpl-2.0
| -8,921,513,987,681,306,000
| 36.245645
| 189
| 0.658263
| false
| 4.037583
| false
| false
| false
|
nevermoreluo/privateoverseas
|
overseas/migrations/0001_initial.py
|
1
|
3314
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-05 02:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agid', models.PositiveIntegerField(unique=True)),
('name', models.CharField(max_length=100)),
('desc', models.CharField(max_length=200)),
('api_correlation_id', models.CharField(max_length=100)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Geo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metro', models.CharField(blank=True, max_length=100, null=True)),
('region', models.CharField(max_length=100)),
('requests', models.DecimalField(decimal_places=2, max_digits=20)),
('throughput', models.DecimalField(decimal_places=2, max_digits=20)),
('peak_throughput', models.DecimalField(decimal_places=2, max_digits=20)),
('bandwidth', models.DecimalField(decimal_places=2, max_digits=20)),
('peak_bandwidth', models.DecimalField(decimal_places=2, max_digits=20)),
('hit_rate', models.DecimalField(decimal_places=2, max_digits=20)),
('status_4XX', models.DecimalField(decimal_places=2, max_digits=20)),
('status_5XX', models.DecimalField(decimal_places=2, max_digits=20)),
('time', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='NetworkIdentifiers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ni', models.CharField(max_length=100, unique=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scid', models.CharField(max_length=50, unique=True)),
('active', models.BooleanField(default=True)),
('access_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.AccessGroup')),
],
),
migrations.AddField(
model_name='networkidentifiers',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.Service'),
),
migrations.AddField(
model_name='geo',
name='ni',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='overseas.NetworkIdentifiers'),
),
]
|
gpl-3.0
| -9,114,533,386,148,599,000
| 44.39726
| 124
| 0.573929
| false
| 4.287193
| false
| false
| false
|
zstackio/zstack-woodpecker
|
integrationtest/vm/mini/paths/path54.py
|
1
|
2373
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", path_list=[
[TestAction.add_image, 'image1', 'root', os.environ.get('isoForVmUrl')],
[TestAction.create_vm_by_image, 'image1', 'iso', 'vm1'],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_mini_vm, 'vm2', 'memory=random'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.create_mini_vm, 'vm3', 'network=random'],
[TestAction.create_volume, 'volume2', 'flag=thin,scsi'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume1-backup1'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_image, 'image2'],
[TestAction.recover_image, 'image2'],
[TestAction.delete_image, 'image2'],
[TestAction.expunge_image, 'image2'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.create_volume, 'volume3', 'size=random', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.stop_vm, 'vm3'],
[TestAction.add_image, 'image3', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_volume, 'volume1'],
[TestAction.expunge_volume, 'volume1'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup3'],
[TestAction.resize_volume, 'vm3', 5*1024*1024],
[TestAction.delete_image, 'image1'],
[TestAction.delete_vm_backup, 'vm2-backup3'],
[TestAction.add_image, 'image4', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume4', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume2'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:['vm3']
Enadbled:['volume1-backup1', 'vm1-backup2', 'image3', 'image4']
attached:['volume2']
Detached:['volume4']
Deleted:['volume3', 'vm2-backup3', 'image1']
Expunged:['volume1', 'image2']
Ha:[]
Group:
vm_backup1:['vm1-backup2']---vm1_
'''
|
apache-2.0
| -4,760,517,556,792,538,000
| 39.220339
| 104
| 0.686473
| false
| 2.616318
| true
| false
| false
|
v6ak/qubes-core-admin
|
core/storage/__init__.py
|
2
|
15124
|
#!/usr/bin/python2
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2013 Marek Marczykowski <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
from __future__ import absolute_import
import ConfigParser
import os
import os.path
import shutil
import subprocess
import sys
import qubes.qubesutils
from qubes.qubes import QubesException, defaults, system_path
CONFIG_FILE = '/etc/qubes/storage.conf'
class QubesVmStorage(object):
"""
Class for handling VM virtual disks. This is base class for all other
implementations, mostly with Xen on Linux in mind.
"""
def __init__(self, vm,
private_img_size = None,
root_img_size = None,
modules_img = None,
modules_img_rw = False):
self.vm = vm
self.vmdir = vm.dir_path
if private_img_size:
self.private_img_size = private_img_size
else:
self.private_img_size = defaults['private_img_size']
if root_img_size:
self.root_img_size = root_img_size
else:
self.root_img_size = defaults['root_img_size']
self.root_dev = "xvda"
self.private_dev = "xvdb"
self.volatile_dev = "xvdc"
self.modules_dev = "xvdd"
# For now compute this path still in QubesVm
self.modules_img = modules_img
self.modules_img_rw = modules_img_rw
# Additional drive (currently used only by HVM)
self.drive = None
def format_disk_dev(self, path, script, vdev, rw=True, type="disk",
domain=None):
if path is None:
return ''
template = " <disk type='block' device='{type}'>\n" \
" <driver name='phy'/>\n" \
" <source dev='{path}'/>\n" \
" <target dev='{vdev}' bus='xen'/>\n" \
"{params}" \
" </disk>\n"
params = ""
if not rw:
params += " <readonly/>\n"
if domain:
params += " <backenddomain name='%s'/>\n" % domain
if script:
params += " <script path='%s'/>\n" % script
return template.format(path=path, vdev=vdev, type=type, params=params)
def get_config_params(self):
args = {}
args['rootdev'] = self.root_dev_config()
args['privatedev'] = self.private_dev_config()
args['volatiledev'] = self.volatile_dev_config()
args['otherdevs'] = self.other_dev_config()
return args
def root_dev_config(self):
raise NotImplementedError
def private_dev_config(self):
raise NotImplementedError
def volatile_dev_config(self):
raise NotImplementedError
def other_dev_config(self):
if self.modules_img is not None:
return self.format_disk_dev(self.modules_img,
None,
self.modules_dev,
self.modules_img_rw)
elif self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
if drive_type == "hd":
drive_type = "disk"
writable = False
if drive_type == "disk":
writable = True
if drive_domain.lower() == "dom0":
drive_domain = None
return self.format_disk_dev(drive_path, None,
self.modules_dev,
rw=writable,
type=drive_type,
domain=drive_domain)
else:
return ''
def _copy_file(self, source, destination):
"""
Effective file copy, preserving sparse files etc.
"""
# TODO: Windows support
# We prefer to use Linux's cp, because it nicely handles sparse files
retcode = subprocess.call (["cp", "--reflink=auto", source, destination])
if retcode != 0:
raise IOError ("Error while copying {0} to {1}".\
format(source, destination))
def get_disk_utilization(self):
return qubes.qubesutils.get_disk_usage(self.vmdir)
def get_disk_utilization_private_img(self):
return qubes.qubesutils.get_disk_usage(self.private_img)
def get_private_img_sz(self):
if not os.path.exists(self.private_img):
return 0
return os.path.getsize(self.private_img)
def resize_private_img(self, size):
raise NotImplementedError
def create_on_disk_private_img(self, verbose, source_template = None):
raise NotImplementedError
def create_on_disk_root_img(self, verbose, source_template = None):
raise NotImplementedError
def create_on_disk(self, verbose, source_template = None):
if source_template is None:
source_template = self.vm.template
old_umask = os.umask(002)
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.vmdir)
os.mkdir (self.vmdir)
self.create_on_disk_private_img(verbose, source_template)
self.create_on_disk_root_img(verbose, source_template)
self.reset_volatile_storage(verbose, source_template)
os.umask(old_umask)
def clone_disk_files(self, src_vm, verbose):
if verbose:
print >> sys.stderr, "--> Creating directory: {0}".format(self.vmdir)
os.mkdir (self.vmdir)
if src_vm.private_img is not None and self.private_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\
format(src_vm.private_img, self.private_img)
self._copy_file(src_vm.private_img, self.private_img)
if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None:
if verbose:
print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\
format(src_vm.root_img, self.root_img)
self._copy_file(src_vm.root_img, self.root_img)
# TODO: modules?
def rename(self, old_name, new_name):
old_vmdir = self.vmdir
new_vmdir = os.path.join(os.path.dirname(self.vmdir), new_name)
os.rename(self.vmdir, new_vmdir)
self.vmdir = new_vmdir
if self.private_img:
self.private_img = self.private_img.replace(old_vmdir, new_vmdir)
if self.root_img:
self.root_img = self.root_img.replace(old_vmdir, new_vmdir)
if self.volatile_img:
self.volatile_img = self.volatile_img.replace(old_vmdir, new_vmdir)
def verify_files(self):
if not os.path.exists (self.vmdir):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.vmdir))
if self.root_img and not os.path.exists (self.root_img):
raise QubesException (
"VM root image file doesn't exist: {0}".\
format(self.root_img))
if self.private_img and not os.path.exists (self.private_img):
raise QubesException (
"VM private image file doesn't exist: {0}".\
format(self.private_img))
if self.modules_img is not None:
if not os.path.exists(self.modules_img):
raise QubesException (
"VM kernel modules image does not exists: {0}".\
format(self.modules_img))
def remove_from_disk(self):
shutil.rmtree (self.vmdir)
def reset_volatile_storage(self, verbose = False, source_template = None):
if source_template is None:
source_template = self.vm.template
# Re-create only for template based VMs
if source_template is not None and self.volatile_img:
if os.path.exists(self.volatile_img):
os.remove(self.volatile_img)
# For StandaloneVM create it only if not already exists (eg after backup-restore)
if self.volatile_img and not os.path.exists(self.volatile_img):
if verbose:
print >> sys.stderr, "--> Creating volatile image: {0}...".\
format(self.volatile_img)
subprocess.check_call([system_path["prepare_volatile_img_cmd"],
self.volatile_img, str(self.root_img_size / 1024 / 1024)])
def prepare_for_vm_startup(self, verbose):
self.reset_volatile_storage(verbose=verbose)
if self.private_img and not os.path.exists (self.private_img):
print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\
format(self.private_img)
self.create_on_disk_private_img(verbose=False)
def dump(o):
""" Returns a string represention of the given object
Args:
o (object): anything that response to `__module__` and `__class__`
Given the class :class:`qubes.storage.QubesVmStorage` it returns
'qubes.storage.QubesVmStorage' as string
"""
return o.__module__ + '.' + o.__class__.__name__
def load(string):
""" Given a dotted full module string representation of a class it loads it
Args:
string (str) i.e. 'qubes.storage.xen.QubesXenVmStorage'
Returns:
type
See also:
:func:`qubes.storage.dump`
"""
if not type(string) is str:
# This is a hack which allows giving a real class to a vm instead of a
# string as string_class parameter.
return string
components = string.split(".")
module_path = ".".join(components[:-1])
klass = components[-1:][0]
module = __import__(module_path, fromlist=[klass])
return getattr(module, klass)
def get_pool(name, vm):
""" Instantiates the storage for the specified vm """
config = _get_storage_config_parser()
klass = _get_pool_klass(name, config)
keys = [k for k in config.options(name) if k != 'driver' and k != 'class']
values = [config.get(name, o) for o in keys]
config_kwargs = dict(zip(keys, values))
if name == 'default':
kwargs = defaults['pool_config'].copy()
kwargs.update(keys)
else:
kwargs = config_kwargs
return klass(vm, **kwargs)
def pool_exists(name):
""" Check if the specified pool exists """
try:
_get_pool_klass(name)
return True
except StoragePoolException:
return False
def add_pool(name, **kwargs):
""" Add a storage pool to config."""
config = _get_storage_config_parser()
config.add_section(name)
for key, value in kwargs.iteritems():
config.set(name, key, value)
_write_config(config)
def remove_pool(name):
""" Remove a storage pool from config file. """
config = _get_storage_config_parser()
config.remove_section(name)
_write_config(config)
def _write_config(config):
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def _get_storage_config_parser():
""" Instantiates a `ConfigParaser` for specified storage config file.
Returns:
RawConfigParser
"""
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
return config
def _get_pool_klass(name, config=None):
""" Returns the storage klass for the specified pool.
Args:
name: The pool name.
config: If ``config`` is not specified
`_get_storage_config_parser()` is called.
Returns:
type: A class inheriting from `QubesVmStorage`
"""
if config is None:
config = _get_storage_config_parser()
if not config.has_section(name):
raise StoragePoolException('Uknown storage pool ' + name)
if config.has_option(name, 'class'):
klass = load(config.get(name, 'class'))
elif config.has_option(name, 'driver'):
pool_driver = config.get(name, 'driver')
klass = defaults['pool_drivers'][pool_driver]
else:
raise StoragePoolException('Uknown storage pool driver ' + name)
return klass
class StoragePoolException(QubesException):
pass
class Pool(object):
def __init__(self, vm, dir_path):
assert vm is not None
assert dir_path is not None
self.vm = vm
self.dir_path = dir_path
self.create_dir_if_not_exists(self.dir_path)
self.vmdir = self.vmdir_path(vm, self.dir_path)
appvms_path = os.path.join(self.dir_path, 'appvms')
self.create_dir_if_not_exists(appvms_path)
servicevms_path = os.path.join(self.dir_path, 'servicevms')
self.create_dir_if_not_exists(servicevms_path)
vm_templates_path = os.path.join(self.dir_path, 'vm-templates')
self.create_dir_if_not_exists(vm_templates_path)
def vmdir_path(self, vm, pool_dir):
""" Returns the path to vmdir depending on the type of the VM.
The default QubesOS file storage saves the vm images in three
different directories depending on the ``QubesVM`` type:
* ``appvms`` for ``QubesAppVm`` or ``QubesHvm``
* ``vm-templates`` for ``QubesTemplateVm`` or ``QubesTemplateHvm``
* ``servicevms`` for any subclass of ``QubesNetVm``
Args:
vm: a QubesVM
pool_dir: the root directory of the pool
Returns:
string (str) absolute path to the directory where the vm files
are stored
"""
if vm.is_appvm():
subdir = 'appvms'
elif vm.is_template():
subdir = 'vm-templates'
elif vm.is_netvm():
subdir = 'servicevms'
elif vm.is_disposablevm():
subdir = 'appvms'
return os.path.join(pool_dir, subdir, vm.template.name + '-dvm')
else:
raise QubesException(vm.type() + ' unknown vm type')
return os.path.join(pool_dir, subdir, vm.name)
def create_dir_if_not_exists(self, path):
""" Check if a directory exists in if not create it.
This method does not create any parent directories.
"""
if not os.path.exists(path):
os.mkdir(path)
|
gpl-2.0
| 4,979,779,882,463,710,000
| 32.910314
| 91
| 0.583047
| false
| 3.872983
| true
| false
| false
|
eloquence/unisubs
|
apps/teams/models.py
|
1
|
128528
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from collections import defaultdict
from itertools import groupby
from math import ceil
import csv
import datetime
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.files import File
from django.db import models
from django.db.models import query, Q
from django.db.models.signals import post_save, post_delete, pre_delete
from django.http import Http404
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, ugettext
from haystack import site
from haystack.query import SQ
import teams.moderation_const as MODERATION
from caching import ModelCacheManager
from comments.models import Comment
from auth.models import UserLanguage, CustomUser as User
from auth.providers import get_authentication_provider
from messages import tasks as notifier
from subtitles import shims
from subtitles.signals import language_deleted
from teams.moderation_const import WAITING_MODERATION, UNMODERATED, APPROVED
from teams.permissions_const import (
TEAM_PERMISSIONS, PROJECT_PERMISSIONS, ROLE_OWNER, ROLE_ADMIN, ROLE_MANAGER,
ROLE_CONTRIBUTOR
)
from teams import tasks
from teams import workflows
from teams.signals import api_subtitles_approved, api_subtitles_rejected
from utils import DEFAULT_PROTOCOL
from utils import translation
from utils.amazon import S3EnabledImageField, S3EnabledFileField
from utils.panslugify import pan_slugify
from utils.searching import get_terms
from utils.text import fmt
from videos.models import (Video, VideoUrl, SubtitleVersion, SubtitleLanguage,
Action)
from videos.tasks import video_changed_tasks
from subtitles.models import (
SubtitleVersion as NewSubtitleVersion,
SubtitleLanguage as NewSubtitleLanguage,
SubtitleNoteBase,
ORIGIN_IMPORTED
)
from subtitles import pipeline
from functools import partial
logger = logging.getLogger(__name__)
celery_logger = logging.getLogger('celery.task')
BILLING_CUTOFF = getattr(settings, 'BILLING_CUTOFF', None)
# Teams
class TeamQuerySet(query.QuerySet):
def add_members_count(self):
"""Add _members_count field to this query
This can be used to order/filter the query and also avoids a query in
when Team.members_count() is called.
"""
select = {
'_members_count': (
'SELECT COUNT(1) '
'FROM teams_teammember tm '
'WHERE tm.team_id=teams_team.id'
)
}
return self.extra(select=select)
def add_videos_count(self):
"""Add _videos_count field to this query
This can be used to order/filter the query and also avoids a query in
when Team.video_count() is called.
"""
select = {
'_videos_count': (
'SELECT COUNT(1) '
'FROM teams_teamvideo tv '
'WHERE tv.team_id=teams_team.id'
)
}
return self.extra(select=select)
def add_user_is_member(self, user):
"""Add user_is_member field to this query """
if not user.is_authenticated():
return self.extra(select={'user_is_member': 0})
select = {
'user_is_member': (
'EXISTS (SELECT 1 '
'FROM teams_teammember tm '
'WHERE tm.team_id=teams_team.id '
'AND tm.user_id=%s)'
)
}
return self.extra(select=select, select_params=[user.id])
class TeamManager(models.Manager):
def get_query_set(self):
"""Return a QS of all non-deleted teams."""
return TeamQuerySet(Team).filter(deleted=False)
def for_user(self, user, exclude_private=False):
"""Return the teams visible for the given user.
If exclude_private is True, then we will exclude private teams, even
if the user can apply to them.
"""
# policies where we should show the team, even if they're not visible
visible_policies = [Team.OPEN, Team.APPLICATION]
q = models.Q(is_visible=True)
if not exclude_private:
q |= models.Q(membership_policy__in=visible_policies)
if user.is_authenticated():
user_teams = TeamMember.objects.filter(user=user)
q |= models.Q(id__in=user_teams.values('team_id'))
return self.get_query_set().filter(q)
def with_recent_billing_record(self, day_range):
"""Find teams that have had a new video recently"""
start_date = (datetime.datetime.now() -
datetime.timedelta(days=day_range))
team_ids = list(BillingRecord.objects
.order_by()
.filter(created__gt=start_date)
.values_list('team_id', flat=True)
.distinct())
return Team.objects.filter(id__in=team_ids)
def needs_new_video_notification(self, notify_interval):
return (self.filter(
notify_interval=notify_interval,
teamvideo__created__gt=models.F('last_notification_time'))
.distinct())
class Team(models.Model):
APPLICATION = 1
INVITATION_BY_MANAGER = 2
INVITATION_BY_ALL = 3
OPEN = 4
INVITATION_BY_ADMIN = 5
MEMBERSHIP_POLICY_CHOICES = (
(OPEN, _(u'Open')),
(APPLICATION, _(u'Application')),
(INVITATION_BY_ALL, _(u'Invitation by any team member')),
(INVITATION_BY_MANAGER, _(u'Invitation by manager')),
(INVITATION_BY_ADMIN, _(u'Invitation by admin')),
)
VP_MEMBER = 1
VP_MANAGER = 2
VP_ADMIN = 3
VIDEO_POLICY_CHOICES = (
(VP_MEMBER, _(u'Any team member')),
(VP_MANAGER, _(u'Managers and admins')),
(VP_ADMIN, _(u'Admins only'))
)
TASK_ASSIGN_CHOICES = (
(10, 'Any team member'),
(20, 'Managers and admins'),
(30, 'Admins only'),
)
TASK_ASSIGN_NAMES = dict(TASK_ASSIGN_CHOICES)
TASK_ASSIGN_IDS = dict([choice[::-1] for choice in TASK_ASSIGN_CHOICES])
SUBTITLE_CHOICES = (
(10, 'Anyone'),
(20, 'Any team member'),
(30, 'Only managers and admins'),
(40, 'Only admins'),
)
SUBTITLE_NAMES = dict(SUBTITLE_CHOICES)
SUBTITLE_IDS = dict([choice[::-1] for choice in SUBTITLE_CHOICES])
NOTIFY_DAILY = 'D'
NOTIFY_HOURLY = 'H'
NOTIFY_INTERVAL_CHOICES = (
(NOTIFY_DAILY, _('Daily')),
(NOTIFY_HOURLY, _('Hourly')),
)
name = models.CharField(_(u'name'), max_length=250, unique=True)
slug = models.SlugField(_(u'slug'), unique=True)
description = models.TextField(_(u'description'), blank=True, help_text=_('All urls will be converted to links. Line breaks and HTML not supported.'))
logo = S3EnabledImageField(verbose_name=_(u'logo'), blank=True,
upload_to='teams/logo/',
default='',
thumb_sizes=[(280, 100), (100, 100)])
square_logo = S3EnabledImageField(verbose_name=_(u'square logo'),
blank=True,
default='',
upload_to='teams/square-logo/',
thumb_sizes=[(100, 100), (48, 48)])
is_visible = models.BooleanField(_(u'videos public?'), default=True)
videos = models.ManyToManyField(Video, through='TeamVideo', verbose_name=_('videos'))
users = models.ManyToManyField(User, through='TeamMember', related_name='teams', verbose_name=_('users'))
points = models.IntegerField(default=0, editable=False)
applicants = models.ManyToManyField(User, through='Application', related_name='applicated_teams', verbose_name=_('applicants'))
created = models.DateTimeField(auto_now_add=True)
highlight = models.BooleanField(default=False)
video = models.ForeignKey(Video, null=True, blank=True, related_name='intro_for_teams', verbose_name=_(u'Intro Video'))
application_text = models.TextField(blank=True)
page_content = models.TextField(_(u'Page content'), blank=True, help_text=_(u'You can use markdown. This will replace Description.'))
is_moderated = models.BooleanField(default=False)
header_html_text = models.TextField(blank=True, default='', help_text=_(u"HTML that appears at the top of the teams page."))
last_notification_time = models.DateTimeField(editable=False, default=datetime.datetime.now)
notify_interval = models.CharField(max_length=1,
choices=NOTIFY_INTERVAL_CHOICES,
default=NOTIFY_DAILY)
auth_provider_code = models.CharField(_(u'authentication provider code'),
max_length=24, blank=True, default="")
# code value from one the TeamWorkflow subclasses
# Since other apps can add workflow types, let's use this system to avoid
# conflicts:
# - Core types are defined in the teams app and 1 char long
# - Extention types are defined on other apps. They are 2 chars long,
# with the first one being unique to the app.
workflow_type = models.CharField(max_length=2, default='O')
# Enabling Features
projects_enabled = models.BooleanField(default=False)
# Deprecated field that enables the tasks workflow
workflow_enabled = models.BooleanField(default=False)
# Policies and Permissions
membership_policy = models.IntegerField(_(u'membership policy'),
choices=MEMBERSHIP_POLICY_CHOICES,
default=OPEN)
video_policy = models.IntegerField(_(u'video policy'),
choices=VIDEO_POLICY_CHOICES,
default=VP_MEMBER)
# The values below here are mostly specific to the tasks workflow and will
# probably be deleted.
task_assign_policy = models.IntegerField(_(u'task assignment policy'),
choices=TASK_ASSIGN_CHOICES,
default=TASK_ASSIGN_IDS['Any team member'])
subtitle_policy = models.IntegerField(_(u'subtitling policy'),
choices=SUBTITLE_CHOICES,
default=SUBTITLE_IDS['Anyone'])
translate_policy = models.IntegerField(_(u'translation policy'),
choices=SUBTITLE_CHOICES,
default=SUBTITLE_IDS['Anyone'])
max_tasks_per_member = models.PositiveIntegerField(_(u'maximum tasks per member'),
default=None, null=True, blank=True)
task_expiration = models.PositiveIntegerField(_(u'task expiration (days)'),
default=None, null=True, blank=True)
deleted = models.BooleanField(default=False)
partner = models.ForeignKey('Partner', null=True, blank=True,
related_name='teams')
objects = TeamManager()
all_objects = models.Manager() # For accessing deleted teams, if necessary.
cache = ModelCacheManager()
class Meta:
ordering = ['name']
verbose_name = _(u'Team')
verbose_name_plural = _(u'Teams')
def __init__(self, *args, **kwargs):
models.Model.__init__(self, *args, **kwargs)
self._member_cache = {}
def save(self, *args, **kwargs):
creating = self.pk is None
super(Team, self).save(*args, **kwargs)
self.cache.invalidate()
if creating:
# create a default project
self.default_project
# setup our workflow
self.new_workflow.setup_team()
def __unicode__(self):
return self.name or self.slug
def is_tasks_team(self):
return self.workflow_enabled
@property
def new_workflow(self):
if not hasattr(self, '_new_workflow'):
self._new_workflow = workflows.TeamWorkflow.get_workflow(self)
return self._new_workflow
def is_old_style(self):
return self.workflow_type == "O"
def get_tasks_page_url(self):
return reverse('teams:team_tasks', kwargs={
'slug': self.slug,
})
def languages(self, members_joined_since=None):
"""Returns the languages spoken by the member of the team
"""
if members_joined_since:
users = self.members_since(members_joined_since)
else:
users = self.users.all()
return UserLanguage.objects.filter(user__in=users).values_list('language', flat=True)
def active_users(self, since=None, published=True):
sv = NewSubtitleVersion.objects.filter(video__in=self.videos.all())
if published:
sv = sv.filter(Q(visibility_override='public') | Q(visibility='public'))
if since:
sv = sv.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=since))
return sv.exclude(author__username="anonymous").values_list('author', 'subtitle_language')
def get_default_message(self, name):
return fmt(Setting.MESSAGE_DEFAULTS.get(name, ''), team=self)
def get_messages(self, names):
"""Fetch messages from the settings objects
This method fetches the messages assocated with names and interpolates
them to replace %(team)s with the team name.
Returns:
dict mapping names to message text
"""
messages = {
name: self.get_default_message(name)
for name in names
}
for setting in self.settings.with_names(names):
if setting.data:
messages[setting.key_name] = setting.data
return messages
def render_message(self, msg):
"""Return a string of HTML represention a team header for a notification.
TODO: Get this out of the model and into a templatetag or something.
"""
author_page = msg.author.get_absolute_url() if msg.author else ''
context = {
'team': self,
'msg': msg,
'author': msg.author,
'author_page': author_page,
'team_page': self.get_absolute_url(),
"STATIC_URL": settings.STATIC_URL,
}
return render_to_string('teams/_team_message.html', context)
def is_open(self):
"""Return whether this team's membership is open to the public."""
return self.membership_policy == self.OPEN
def is_by_application(self):
"""Return whether this team's membership is by application only."""
return self.membership_policy == self.APPLICATION
def get_workflow(self):
"""Return the workflow for the given team.
A workflow will always be returned. If one isn't specified for the team
a default (unsaved) one will be populated with default values and
returned.
TODO: Refactor this behaviour into something less confusing.
"""
return Workflow.get_for_target(self.id, 'team')
@property
def auth_provider(self):
"""Return the authentication provider class for this Team, or None.
No DB queries are used, so this is safe to call many times.
"""
if not self.auth_provider_code:
return None
else:
return get_authentication_provider(self.auth_provider_code)
# Thumbnails
def logo_thumbnail(self):
"""URL for a kind-of small version of this team's logo, or None."""
if self.logo:
return self.logo.thumb_url(100, 100)
def logo_thumbnail_medium(self):
"""URL for a medium version of this team's logo, or None."""
if self.logo:
return self.logo.thumb_url(280, 100)
def square_logo_thumbnail(self):
"""URL for this team's square logo, or None."""
if self.square_logo:
return self.square_logo.thumb_url(100, 100)
def square_logo_thumbnail_small(self):
"""URL for a small version of this team's square logo, or None."""
if self.square_logo:
return self.square_logo.thumb_url(48, 48)
# URLs
@models.permalink
def get_absolute_url(self):
return ('teams:dashboard', [self.slug])
def get_site_url(self):
"""Return the full, absolute URL for this team, including http:// and the domain."""
return '%s://%s%s' % (DEFAULT_PROTOCOL,
Site.objects.get_current().domain,
self.get_absolute_url())
# Membership and roles
def get_member(self, user):
"""Get a TeamMember object for a user or None."""
if not user.is_authenticated():
return None
if user.id in self._member_cache:
return self._member_cache[user.id]
try:
member = self.members.get(user=user)
except TeamMember.DoesNotExist:
member = None
self._member_cache[user.id] = member
return member
def user_is_member(self, user):
members = self.cache.get('members')
if members is None:
members = list(self.members.values_list('user_id', flat=True))
self.cache.set('members', members)
return user.id in members
def uncache_member(self, user):
try:
del self._member_cache[user.id]
except KeyError:
pass
def user_can_view_videos(self, user):
return self.is_visible or self.user_is_member(user)
def _is_role(self, user, role=None):
"""Return whether the given user has the given role in this team.
Safe to use with null or unauthenticated users.
If no role is given, simply return whether the user is a member of this team at all.
TODO: Change this to use the stuff in teams.permissions.
"""
if not user or not user.is_authenticated():
return False
qs = self.members.filter(user=user)
if role:
qs = qs.filter(role=role)
return qs.exists()
def can_bulk_approve(self, user):
return self.is_owner(user) or self.is_admin(user)
def is_owner(self, user):
"""
Return whether the given user is an owner of this team.
"""
return self._is_role(user, TeamMember.ROLE_OWNER)
def is_admin(self, user):
"""Return whether the given user is an admin of this team."""
return self._is_role(user, TeamMember.ROLE_ADMIN)
def is_manager(self, user):
"""Return whether the given user is a manager of this team."""
return self._is_role(user, TeamMember.ROLE_MANAGER)
def is_member(self, user):
"""Return whether the given user is a member of this team."""
return self._is_role(user)
def is_contributor(self, user, authenticated=True):
"""Return whether the given user is a contributor of this team, False otherwise."""
return self._is_role(user, TeamMember.ROLE_CONTRIBUTOR)
def can_see_video(self, user, team_video=None):
"""I have no idea.
TODO: Figure out what this thing is, and if it's still necessary.
"""
if not user.is_authenticated():
return False
return self.is_member(user)
def fetch_video_actions(self, video_language=None):
"""Fetch the Action objects for this team's videos
Args:
video_language: only actions for videos with this
primary_audio_language_code
"""
video_q = TeamVideo.objects.filter(team=self).values_list('video_id')
if video_language is not None:
video_q = video_q.filter(
video__primary_audio_language_code=video_language)
return Action.objects.filter(video_id__in=video_q)
# moderation
# Moderation
def moderates_videos(self):
"""Return whether this team moderates videos in some way, False otherwise.
Moderation means the team restricts who can create subtitles and/or
translations.
"""
if self.subtitle_policy != Team.SUBTITLE_IDS['Anyone']:
return True
if self.translate_policy != Team.SUBTITLE_IDS['Anyone']:
return True
return False
def video_is_moderated_by_team(self, video):
"""Return whether this team moderates the given video."""
return video.moderated_by == self
# Item counts
@property
def members_count(self):
"""Return the number of members of this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_members_count'):
setattr(self, '_members_count', self.users.count())
return self._members_count
def members_count_since(self, joined_since):
"""Return the number of members of this team who joined the last n days.
"""
return self.users.filter(date_joined__gt=datetime.datetime.now() - datetime.timedelta(days=joined_since)).count()
def members_since(self, joined_since):
""" Returns the members who joined the team the last n days
"""
return self.users.filter(date_joined__gt=datetime.datetime.now() - datetime.timedelta(days=joined_since))
@property
def videos_count(self):
"""Return the number of videos of this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_videos_count'):
setattr(self, '_videos_count', self.teamvideo_set.count())
return self._videos_count
def videos_count_since(self, added_since = None):
"""Return the number of videos of this team added the last n days.
"""
return self.teamvideo_set.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=added_since)).count()
def videos_since(self, added_since):
"""Returns the videos of this team added the last n days.
"""
return self.videos.filter(created__gt=datetime.datetime.now() - datetime.timedelta(days=added_since))
def unassigned_tasks(self, sort=None):
qs = Task.objects.filter(team=self, deleted=False, completed=None, assignee=None, type=Task.TYPE_IDS['Approve'])
if sort is not None:
qs = qs.order_by(sort)
return qs
def get_task(self, task_pk):
return Task.objects.get(pk=task_pk)
def get_tasks(self, task_pks):
return Task.objects.filter(pk__in=task_pks).select_related('new_subtitle_version', 'new_subtitle_version__subtitle_language', 'team_video', 'team_video__video', 'team_video__video__teamvideo', 'workflow')
def _count_tasks(self):
qs = Task.objects.filter(team=self, deleted=False, completed=None)
# quick, check, are there more than 1000 tasks, if so return 1001, and
# let the UI display > 1000
if qs[1000:1001].exists():
return 1001
else:
return qs.count()
@property
def tasks_count(self):
"""Return the number of incomplete, undeleted tasks of this team.
Caches the result in-object for performance.
Note: the count is capped at 1001 tasks. If a team has more than
that, we generally just want to display "> 1000". Use
get_tasks_count_display() to do that.
"""
if not hasattr(self, '_tasks_count'):
setattr(self, '_tasks_count', self._count_tasks())
return self._tasks_count
def get_tasks_count_display(self):
"""Get a string to display for our tasks count."""
if self.tasks_count <= 1000:
return unicode(self.tasks_count)
else:
return ugettext('> 1000')
# Applications (people applying to join)
def application_message(self):
"""Return the membership application message for this team, or '' if none exists."""
try:
return self.settings.get(key=Setting.KEY_IDS['messages_application']).data
except Setting.DoesNotExist:
return ''
@property
def applications_count(self):
"""Return the number of open membership applications to this team.
Caches the result in-object for performance.
"""
if not hasattr(self, '_applications_count'):
setattr(self, '_applications_count', self.applications.count())
return self._applications_count
# Language pairs
def _lang_pair(self, lp, suffix):
return SQ(content="{0}_{1}_{2}".format(lp[0], lp[1], suffix))
def get_videos_for_languages_haystack(self, language=None,
num_completed_langs=None,
project=None, user=None, query=None,
sort=None, exclude_language=None):
qs = self.get_videos_for_user(user)
if project:
qs = qs.filter(project_pk=project.pk)
if query:
for term in get_terms(query):
qs = qs.auto_query(qs.query.clean(term).decode('utf-8'))
if language:
qs = qs.filter(video_completed_langs=language)
if exclude_language:
qs = qs.exclude(video_completed_langs=exclude_language)
if num_completed_langs is not None:
qs = qs.filter(num_completed_langs=num_completed_langs)
qs = qs.order_by({
'name': 'video_title_exact',
'-name': '-video_title_exact',
'subs': 'num_completed_langs',
'-subs': '-num_completed_langs',
'time': 'team_video_create_date',
'-time': '-team_video_create_date',
}.get(sort or '-time'))
return qs
def get_videos_for_user(self, user):
from teams.search_indexes import TeamVideoLanguagesIndex
is_member = (user and user.is_authenticated()
and self.members.filter(user=user).exists())
if is_member:
return TeamVideoLanguagesIndex.results_for_members(self).filter(team_id=self.id)
else:
return TeamVideoLanguagesIndex.results().filter(team_id=self.id)
# Projects
@property
def default_project(self):
"""Return the default project for this team.
If it doesn't already exist it will be created.
TODO: Move the creation into a signal on the team to avoid creating
multiple default projects here?
"""
try:
return Project.objects.get(team=self, slug=Project.DEFAULT_NAME)
except Project.DoesNotExist:
p = Project(team=self,name=Project.DEFAULT_NAME)
p.save()
return p
@property
def has_projects(self):
"""Return whether this team has projects other than the default one."""
return self.project_set.count() > 1
# Readable/writeable language codes
def get_writable_langs(self):
"""Return a list of language code strings that are writable for this team.
This value may come from memcache.
"""
return TeamLanguagePreference.objects.get_writable(self)
def get_readable_langs(self):
"""Return a list of language code strings that are readable for this team.
This value may come from memcache.
"""
return TeamLanguagePreference.objects.get_readable(self)
def get_team_languages(self, since=None):
query_sl = NewSubtitleLanguage.objects.filter(video__in=self.videos.all())
new_languages = []
if since:
query_sl = query_sl.filter(id__in=NewSubtitleVersion.objects.filter(video__in=self.videos.all(),
created__gt=datetime.datetime.now() - datetime.timedelta(days=since)).order_by('subtitle_language').values_list('subtitle_language', flat=True).distinct())
new_languages = list(NewSubtitleLanguage.objects.filter(video__in=self.videos_since(since)).values_list('language_code', 'subtitles_complete'))
query_sl = query_sl.values_list('language_code', 'subtitles_complete')
languages = list(query_sl)
def first_member(x):
return x[0]
complete_languages = map(first_member, filter(lambda x: x[1], languages))
incomplete_languages = map(first_member, filter(lambda x: not x[1], languages))
new_languages = map(first_member, new_languages)
if since:
return (complete_languages, incomplete_languages, new_languages)
else:
return (complete_languages, incomplete_languages)
# This needs to be constructed after the model definition since we need a
# reference to the class itself.
Team._meta.permissions = TEAM_PERMISSIONS
# Project
class ProjectManager(models.Manager):
def for_team(self, team_identifier):
"""Return all non-default projects for the given team with the given identifier.
The team_identifier passed may be an actual Team object, or a string
containing a team slug, or the primary key of a team as an integer.
"""
if hasattr(team_identifier, "pk"):
team = team_identifier
elif isinstance(team_identifier, int):
team = Team.objects.get(pk=team_identifier)
elif isinstance(team_identifier, str):
team = Team.objects.get(slug=team_identifier)
return Project.objects.filter(team=team).exclude(name=Project.DEFAULT_NAME)
class Project(models.Model):
# All tvs belong to a project, wheather the team has enabled them or not
# the default project is just a convenience UI that pretends to be part of
# the team . If this ever gets changed, you need to change migrations/0044
DEFAULT_NAME = "_root"
team = models.ForeignKey(Team)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(blank=True)
name = models.CharField(max_length=255, null=False)
description = models.TextField(blank=True, null=True, max_length=2048)
guidelines = models.TextField(blank=True, null=True, max_length=2048)
slug = models.SlugField(blank=True)
order = models.PositiveIntegerField(default=0)
workflow_enabled = models.BooleanField(default=False)
objects = ProjectManager()
def __unicode__(self):
if self.is_default_project:
return u"---------"
return u"%s" % (self.name)
def display(self, default_project_label=None):
if self.is_default_project and default_project_label is not None:
return default_project_label
return self.__unicode__()
def save(self, slug=None,*args, **kwargs):
self.modified = datetime.datetime.now()
if slug is not None:
self.slug = pan_slugify(slug)
elif not self.slug:
self.slug = pan_slugify(self.name)
super(Project, self).save(*args, **kwargs)
@property
def is_default_project(self):
"""Return whether this project is a default project for a team."""
return self.name == Project.DEFAULT_NAME
def get_site_url(self):
"""Return the full, absolute URL for this project, including http:// and the domain."""
return '%s://%s%s' % (DEFAULT_PROTOCOL, Site.objects.get_current().domain, self.get_absolute_url())
@models.permalink
def get_absolute_url(self):
return ('teams:project_video_list', [self.team.slug, self.slug])
@property
def videos_count(self):
"""Return the number of videos in this project.
Caches the result in-object for performance.
"""
if not hasattr(self, '_videos_count'):
setattr(self, '_videos_count', TeamVideo.objects.filter(project=self).count())
return self._videos_count
def _count_tasks(self):
qs = tasks.filter(team_video__project = self)
# quick, check, are there more than 1000 tasks, if so return 1001, and
# let the UI display > 1000
if qs[1000:1001].exists():
return 1001
else:
return qs.count()
@property
def tasks_count(self):
"""Return the number of incomplete, undeleted tasks in this project.
Caches the result in-object for performance.
"""
tasks = Task.objects.filter(team=self.team, deleted=False, completed=None)
if not hasattr(self, '_tasks_count'):
setattr(self, '_tasks_count', self._count_tasks())
return self._tasks_count
class Meta:
unique_together = (
("team", "name",),
("team", "slug",),
)
permissions = PROJECT_PERMISSIONS
# TeamVideo
class TeamVideo(models.Model):
THUMBNAIL_SIZE = (288, 162)
team = models.ForeignKey(Team)
video = models.OneToOneField(Video)
description = models.TextField(blank=True,
help_text=_(u'Use this space to explain why you or your team need to '
u'caption or subtitle this video. Adding a note makes '
u'volunteers more likely to help out!'))
thumbnail = S3EnabledImageField(upload_to='teams/video_thumbnails/', null=True, blank=True,
help_text=_(u'We automatically grab thumbnails for certain sites, e.g. Youtube'),
thumb_sizes=(THUMBNAIL_SIZE, (120,90),))
all_languages = models.BooleanField(_('Need help with all languages'), default=False,
help_text=_(u'If you check this, other languages will not be displayed.'))
added_by = models.ForeignKey(User, null=True)
# this is an auto_add like field, but done on the model save so the
# admin doesn't throw a fit
created = models.DateTimeField(blank=True)
partner_id = models.CharField(max_length=100, blank=True, default="")
project = models.ForeignKey(Project)
class Meta:
unique_together = (('team', 'video'),)
def __unicode__(self):
return unicode(self.video)
@models.permalink
def get_absolute_url(self):
return ('teams:team_video', [self.pk])
def get_tasks_page_url(self):
return "%s?team_video=%s" % (self.team.get_tasks_page_url(), self.pk)
def get_thumbnail(self):
if self.thumbnail:
return self.thumbnail.thumb_url(*TeamVideo.THUMBNAIL_SIZE)
video_thumb = self.video.get_thumbnail(fallback=False)
if video_thumb:
return video_thumb
return "%simages/video-no-thumbnail-medium.png" % settings.STATIC_URL
def _original_language(self):
if not hasattr(self, 'original_language_code'):
sub_lang = self.video.subtitle_language()
setattr(self, 'original_language_code', None if not sub_lang else sub_lang.language)
return getattr(self, 'original_language_code')
def save(self, *args, **kwargs):
if not hasattr(self, "project"):
self.project = self.team.default_project
assert self.project.team == self.team, \
"%s: Team (%s) is not equal to project's (%s) team (%s)"\
% (self, self.team, self.project, self.project.team)
if not self.pk:
self.created = datetime.datetime.now()
self.video.cache.invalidate()
self.video.clear_team_video_cache()
super(TeamVideo, self).save(*args, **kwargs)
def is_checked_out(self, ignore_user=None):
'''Return whether this video is checked out in a task.
If a user is given, checkouts by that user will be ignored. This
provides a way to ask "can user X check out or work on this task?".
This is similar to the writelocking done on Videos and
SubtitleLanguages.
'''
tasks = self.task_set.filter(
# Find all tasks for this video which:
deleted=False, # - Aren't deleted
assignee__isnull=False, # - Are assigned to someone
language="", # - Aren't specific to a language
completed__isnull=True, # - Are unfinished
)
if ignore_user:
tasks = tasks.exclude(assignee=ignore_user)
return tasks.exists()
# Convenience functions
def subtitles_started(self):
"""Return whether subtitles have been started for this video."""
from subtitles.models import SubtitleLanguage
return (SubtitleLanguage.objects.having_nonempty_versions()
.filter(video=self.video)
.exists())
def subtitles_finished(self):
"""Return whether at least one set of subtitles has been finished for this video."""
qs = (self.video.newsubtitlelanguage_set.having_public_versions()
.filter(subtitles_complete=True))
for lang in qs:
if lang.is_synced():
return True
return False
def get_workflow(self):
"""Return the appropriate Workflow for this TeamVideo."""
return Workflow.get_for_team_video(self)
def move_to(self, new_team, project=None):
"""
Moves this TeamVideo to a new team.
This method expects you to have run the correct permissions checks.
"""
old_team = self.team
if old_team == new_team and project == self.project:
return
within_team = (old_team == new_team)
# these imports are here to avoid circular imports, hacky
from teams.signals import api_teamvideo_new
from teams.signals import video_moved_from_team_to_team
from videos import metadata_manager
# For now, we'll just delete any tasks associated with the moved video.
if not within_team:
self.task_set.update(deleted=True)
# We move the video by just switching the team, instead of deleting and
# recreating it.
self.team = new_team
# projects are always team dependent:
if project:
self.project = project
else:
self.project = new_team.default_project
self.save()
if not within_team:
# We need to make any as-yet-unmoderated versions public.
# TODO: Dedupe this and the team video delete signal.
video = self.video
video.newsubtitleversion_set.extant().update(visibility='public')
video.is_public = new_team.is_visible
video.moderated_by = new_team if new_team.moderates_videos() else None
video.save()
TeamVideoMigration.objects.create(from_team=old_team,
to_team=new_team,
to_project=self.project)
# Update search data and other things
video_changed_tasks.delay(video.pk)
# Create any necessary tasks.
autocreate_tasks(self)
# fire a http notification that a new video has hit this team:
api_teamvideo_new.send(self)
video_moved_from_team_to_team.send(sender=self,
destination_team=new_team, video=self.video)
def get_task_for_editor(self, language_code):
if not hasattr(self, '_editor_task'):
self._editor_task = self._get_task_for_editor(language_code)
return self._editor_task
def _get_task_for_editor(self, language_code):
task_set = self.task_set.incomplete().filter(language=language_code)
# 2533: We can get 2 review tasks if we include translate/transcribe
# tasks in the results. This is because when we have a task id and
# the user clicks endorse, we do the following:
# - save the subtitles
# - save the task, setting subtitle_version to the version that we
# just saved
#
# However, the task code creates a task on both of those steps. I'm not
# sure exactly what the old editor does to make this not happen, but
# it's safest to just not send task_id in that case
task_set = task_set.filter(type__in=(Task.TYPE_IDS['Review'],
Task.TYPE_IDS['Approve']))
# This assumes there is only 1 incomplete tasks at once, hopefully
# that's a good enough assumption to hold until we dump tasks for the
# collab model.
tasks = list(task_set[:1])
if tasks:
return tasks[0]
else:
return None
@staticmethod
def get_videos_non_language_ids(team, language_code, non_empty_language_code=False):
if non_empty_language_code:
return TeamVideo.objects.filter(
team=team).exclude(
video__primary_audio_language_code__gt=language_code).values_list('id', flat=True)
return TeamVideo.objects.filter(
team=team).exclude(
video__primary_audio_language_code=language_code).values_list('id', flat=True)
class TeamVideoMigration(models.Model):
from_team = models.ForeignKey(Team, related_name='+')
to_team = models.ForeignKey(Team, related_name='+')
to_project = models.ForeignKey(Project, related_name='+')
datetime = models.DateTimeField()
def __init__(self, *args, **kwargs):
if 'datetime' not in kwargs:
kwargs['datetime'] = self.now()
models.Model.__init__(self, *args, **kwargs)
@staticmethod
def now():
# Make now a function so we can patch it in the unittests
return datetime.datetime.now()
def _create_translation_tasks(team_video, subtitle_version=None):
"""Create any translation tasks that should be autocreated for this video.
subtitle_version should be the original SubtitleVersion that these tasks
will probably be translating from.
"""
preferred_langs = TeamLanguagePreference.objects.get_preferred(team_video.team)
for lang in preferred_langs:
# Don't create tasks for languages that are already complete.
sl = team_video.video.subtitle_language(lang)
if sl and sl.is_complete_and_synced():
continue
# Don't create tasks for languages that already have one. This includes
# review/approve tasks and such.
# Doesn't matter if it's complete or not.
task_exists = Task.objects.not_deleted().filter(
team=team_video.team, team_video=team_video, language=lang
).exists()
if task_exists:
continue
# Otherwise, go ahead and create it.
task = Task(team=team_video.team, team_video=team_video,
language=lang, type=Task.TYPE_IDS['Translate'])
# we should only update the team video after all tasks for
# this video are saved, else we end up with a lot of
# wasted tasks
task.save(update_team_video_index=False)
tasks.update_one_team_video.delay(team_video.pk)
def autocreate_tasks(team_video):
workflow = Workflow.get_for_team_video(team_video)
existing_subtitles = team_video.video.completed_subtitle_languages(public_only=True)
# We may need to create a transcribe task, if there are no existing subs.
if workflow.autocreate_subtitle and not existing_subtitles:
if not team_video.task_set.not_deleted().exists():
original_language = team_video.video.primary_audio_language_code
Task(team=team_video.team,
team_video=team_video,
subtitle_version=None,
language= original_language or '',
type=Task.TYPE_IDS['Subtitle']
).save()
# If there are existing subtitles, we may need to create translate tasks.
#
# TODO: This sets the "source version" for the translations to an arbitrary
# language's version. In practice this probably won't be a problem
# because most teams will transcribe one language and then send to a
# new team for translation, but we can probably be smarter about this
# if we spend some time.
if workflow.autocreate_translate and existing_subtitles:
_create_translation_tasks(team_video)
def team_video_save(sender, instance, created, **kwargs):
"""Update the Solr index for this team video.
TODO: Rename this to something more specific.
"""
tasks.update_one_team_video.delay(instance.id)
def team_video_delete(sender, instance, **kwargs):
"""Perform necessary actions for when a TeamVideo is deleted.
TODO: Split this up into separate signals.
"""
from videos import metadata_manager
# not using an async task for this since the async task
# could easily execute way after the instance is gone,
# and backend.remove requires the instance.
tv_search_index = site.get_index(TeamVideo)
tv_search_index.backend.remove(instance)
try:
video = instance.video
# we need to publish all unpublished subs for this video:
NewSubtitleVersion.objects.filter(video=video,
visibility='private').update(visibility='public')
video.is_public = True
video.moderated_by = None
video.save()
metadata_manager.update_metadata(video.pk)
video.update_search_index()
except Video.DoesNotExist:
pass
if instance.video_id is not None:
Video.cache.invalidate_by_pk(instance.video_id)
def on_language_deleted(sender, **kwargs):
"""When a language is deleted, delete all tasks associated with it."""
team_video = sender.video.get_team_video()
if not team_video:
return
Task.objects.filter(team_video=team_video,
language=sender.language_code).delete()
# check if there are no more source languages for the video, and in that
# case delete all transcribe tasks. Don't delete:
# - transcribe tasks that have already been started
# - review tasks
# - approve tasks
if not sender.video.has_public_version():
# filtering on new_subtitle_version=None excludes all 3 cases where we
# don't want to delete tasks
Task.objects.filter(team_video=team_video,
new_subtitle_version=None).delete()
def team_video_autocreate_task(sender, instance, created, raw, **kwargs):
"""Create subtitle/translation tasks for a newly added TeamVideo, if necessary."""
if created and not raw:
autocreate_tasks(instance)
def team_video_add_video_moderation(sender, instance, created, raw, **kwargs):
"""Set the .moderated_by attribute on a newly created TeamVideo's Video, if necessary."""
if created and not raw and instance.team.moderates_videos():
instance.video.moderated_by = instance.team
instance.video.save()
def team_video_rm_video_moderation(sender, instance, **kwargs):
"""Clear the .moderated_by attribute on a newly deleted TeamVideo's Video, if necessary."""
try:
# when removing a video, this will be triggered by the fk constraing
# and will be already removed
instance.video.moderated_by = None
instance.video.save()
except Video.DoesNotExist:
pass
post_save.connect(team_video_save, TeamVideo, dispatch_uid="teams.teamvideo.team_video_save")
post_save.connect(team_video_autocreate_task, TeamVideo, dispatch_uid='teams.teamvideo.team_video_autocreate_task')
post_save.connect(team_video_add_video_moderation, TeamVideo, dispatch_uid='teams.teamvideo.team_video_add_video_moderation')
post_delete.connect(team_video_delete, TeamVideo, dispatch_uid="teams.teamvideo.team_video_delete")
post_delete.connect(team_video_rm_video_moderation, TeamVideo, dispatch_uid="teams.teamvideo.team_video_rm_video_moderation")
language_deleted.connect(on_language_deleted, dispatch_uid="teams.subtitlelanguage.language_deleted")
# TeamMember
class TeamMemberManager(models.Manager):
use_for_related_fields = True
def create_first_member(self, team, user):
"""Make sure that new teams always have an 'owner' member."""
tm = TeamMember(team=team, user=user, role=ROLE_OWNER)
tm.save()
return tm
def admins(self):
return self.filter(role__in=(ROLE_OWNER, ROLE_ADMIN))
class TeamMember(models.Model):
ROLE_OWNER = ROLE_OWNER
ROLE_ADMIN = ROLE_ADMIN
ROLE_MANAGER = ROLE_MANAGER
ROLE_CONTRIBUTOR = ROLE_CONTRIBUTOR
ROLES = (
(ROLE_OWNER, _("Owner")),
(ROLE_MANAGER, _("Manager")),
(ROLE_ADMIN, _("Admin")),
(ROLE_CONTRIBUTOR, _("Contributor")),
)
team = models.ForeignKey(Team, related_name='members')
user = models.ForeignKey(User, related_name='team_members')
role = models.CharField(max_length=16, default=ROLE_CONTRIBUTOR, choices=ROLES, db_index=True)
created = models.DateTimeField(default=datetime.datetime.now, null=True,
blank=True)
objects = TeamMemberManager()
def __unicode__(self):
return u'%s' % self.user
def save(self, *args, **kwargs):
super(TeamMember, self).save(*args, **kwargs)
Team.cache.invalidate_by_pk(self.team_id)
def delete(self):
super(TeamMember, self).delete()
Team.cache.invalidate_by_pk(self.team_id)
def project_narrowings(self):
"""Return any project narrowings applied to this member."""
return self.narrowings.filter(project__isnull=False)
def language_narrowings(self):
"""Return any language narrowings applied to this member."""
return self.narrowings.filter(project__isnull=True)
def project_narrowings_fast(self):
"""Return any project narrowings applied to this member.
Caches the result in-object for speed.
"""
return [n for n in self.narrowings_fast() if n.project]
def language_narrowings_fast(self):
"""Return any language narrowings applied to this member.
Caches the result in-object for speed.
"""
return [n for n in self.narrowings_fast() if n.language]
def narrowings_fast(self):
"""Return any narrowings (both project and language) applied to this member.
Caches the result in-object for speed.
"""
if hasattr(self, '_cached_narrowings'):
if self._cached_narrowings is not None:
return self._cached_narrowings
self._cached_narrowings = self.narrowings.all()
return self._cached_narrowings
def has_max_tasks(self):
"""Return whether this member has the maximum number of tasks."""
max_tasks = self.team.max_tasks_per_member
if max_tasks:
if self.user.task_set.incomplete().filter(team=self.team).count() >= max_tasks:
return True
return False
def is_manager(self):
"""Test if the user is a manager or above."""
return self.role in (ROLE_OWNER, ROLE_ADMIN, ROLE_MANAGER)
def is_admin(self):
"""Test if the user is an admin or owner."""
return self.role in (ROLE_OWNER, ROLE_ADMIN)
class Meta:
unique_together = (('team', 'user'),)
def clear_tasks(sender, instance, *args, **kwargs):
"""Unassign all tasks assigned to a user.
Used when deleting a user from a team.
"""
tasks = instance.team.task_set.incomplete().filter(assignee=instance.user)
tasks.update(assignee=None)
pre_delete.connect(clear_tasks, TeamMember, dispatch_uid='teams.members.clear-tasks-on-delete')
# MembershipNarrowing
class MembershipNarrowing(models.Model):
"""Represent narrowings that can be made on memberships.
A single MembershipNarrowing can apply to a project or a language, but not both.
"""
member = models.ForeignKey(TeamMember, related_name="narrowings")
project = models.ForeignKey(Project, null=True, blank=True)
language = models.CharField(max_length=24, blank=True,
choices=translation.ALL_LANGUAGE_CHOICES)
added_by = models.ForeignKey(TeamMember, related_name="narrowing_includer", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, blank=None)
modified = models.DateTimeField(auto_now=True, blank=None)
def __unicode__(self):
if self.project:
return u"Permission restriction for %s to project %s " % (self.member, self.project)
else:
return u"Permission restriction for %s to language %s " % (self.member, self.language)
def save(self, *args, **kwargs):
# Cannot have duplicate narrowings for a language.
if self.language:
duplicate_exists = MembershipNarrowing.objects.filter(
member=self.member, language=self.language
).exclude(id=self.id).exists()
assert not duplicate_exists, "Duplicate language narrowing detected!"
# Cannot have duplicate narrowings for a project.
if self.project:
duplicate_exists = MembershipNarrowing.objects.filter(
member=self.member, project=self.project
).exclude(id=self.id).exists()
assert not duplicate_exists, "Duplicate project narrowing detected!"
super(MembershipNarrowing, self).save(*args, **kwargs)
Team.cache.invalidate_by_pk(self.member.team_id)
def delete(self):
super(MembershipNarrowing, self).delete()
Team.cache.invalidate_by_pk(self.member.team_id)
class TeamSubtitleNote(SubtitleNoteBase):
team = models.ForeignKey(Team, related_name='+')
class ApplicationInvalidException(Exception):
pass
class ApplicationManager(models.Manager):
def can_apply(self, team, user):
"""
A user can apply either if he is not a member of the team yet, the
team hasn't said no to the user (either application denied or removed the user'
and if no applications are pending.
"""
sour_application_exists = self.filter(team=team, user=user, status__in=[
Application.STATUS_MEMBER_REMOVED, Application.STATUS_DENIED,
Application.STATUS_PENDING]).exists()
if sour_application_exists:
return False
return not team.is_member(user)
def open(self, team=None, user=None):
qs = self.filter(status=Application.STATUS_PENDING)
if team:
qs = qs.filter(team=team)
if user:
qs = qs.filter(user=user)
return qs
# Application
class Application(models.Model):
team = models.ForeignKey(Team, related_name='applications')
user = models.ForeignKey(User, related_name='team_applications')
note = models.TextField(blank=True)
# None -> not acted upon
# True -> Approved
# False -> Rejected
STATUS_PENDING,STATUS_APPROVED, STATUS_DENIED, STATUS_MEMBER_REMOVED,\
STATUS_MEMBER_LEFT = xrange(0, 5)
STATUSES = (
(STATUS_PENDING, u"Pending"),
(STATUS_APPROVED, u"Approved"),
(STATUS_DENIED, u"Denied"),
(STATUS_MEMBER_REMOVED, u"Member Removed"),
(STATUS_MEMBER_LEFT, u"Member Left"),
)
STATUSES_IDS = dict([choice[::-1] for choice in STATUSES])
status = models.PositiveIntegerField(default=STATUS_PENDING, choices=STATUSES)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(blank=True, null=True)
# free text keeping a log of changes to this application
history = models.TextField(blank=True, null=True)
objects = ApplicationManager()
class Meta:
unique_together = (('team', 'user', 'status'),)
def approve(self, author, interface):
"""Approve the application.
This will create an appropriate TeamMember if this application has
not been already acted upon
"""
if self.status not in (Application.STATUS_PENDING, Application.STATUS_MEMBER_LEFT):
raise ApplicationInvalidException("")
member, created = TeamMember.objects.get_or_create(team=self.team, user=self.user)
if created:
notifier.team_member_new.delay(member.pk)
self.modified = datetime.datetime.now()
self.status = Application.STATUS_APPROVED
self.save(author=author, interface=interface)
return self
def deny(self, author, interface):
"""
Marks the application as not approved, then
Queue a Celery task that will handle properly denying this
application.
"""
if self.status != Application.STATUS_PENDING:
raise ApplicationInvalidException("")
self.modified = datetime.datetime.now()
self.status = Application.STATUS_DENIED
self.save(author=author, interface=interface)
notifier.team_application_denied.delay(self.pk)
return self
def on_member_leave(self, author, interface):
"""
Marks the appropriate status, but users can still
reapply to a team if they so desire later.
"""
self.status = Application.STATUS_MEMBER_LEFT
self.save(author=author, interface=interface)
def on_member_removed(self, author, interface):
"""
Marks the appropriate status so that user's cannot reapply
to a team after being removed.
"""
self.status = Application.STATUS_MEMBER_REMOVED
self.save(author=author, interface=interface)
def _generate_history_line(self, new_status, author=None, interface=None):
author = author or "?"
interface = interface or "web UI"
new_status = new_status if new_status != None else Application.STATUS_PENDING
for value,name in Application.STATUSES:
if value == new_status:
status = name
assert status
return u"%s by %s from %s (%s)\n" % (status, author, interface, datetime.datetime.now())
def save(self, dispatches_http_callback=True, author=None, interface=None, *args, **kwargs):
"""
Saves the model, but also appends a line on the history for that
model, like these:
- CoolGuy Approved through the web UI.
- Arthur Left team through the web UI.
This way,we can keep one application per user per team, never
delete them (so the messages stay current) and we still can
track history
"""
self.history = (self.history or "") + self._generate_history_line(self.status, author, interface)
super(Application, self).save(*args, **kwargs)
if dispatches_http_callback:
from teams.signals import api_application_new
api_application_new.send(self)
def __unicode__(self):
return "Application: %s - %s - %s" % (self.team.slug, self.user.username, self.get_status_display())
# Invites
class InviteExpiredException(Exception):
pass
class InviteManager(models.Manager):
def pending_for(self, team, user):
return self.filter(team=team, user=user, approved=None)
def acted_on(self, team, user):
return self.filter(team=team, user=user, approved__notnull=True)
class Invite(models.Model):
team = models.ForeignKey(Team, related_name='invitations')
user = models.ForeignKey(User, related_name='team_invitations')
note = models.TextField(blank=True, max_length=200)
author = models.ForeignKey(User)
role = models.CharField(max_length=16, choices=TeamMember.ROLES,
default=TeamMember.ROLE_CONTRIBUTOR)
# None -> not acted upon
# True -> Approved
# False -> Rejected
approved = models.NullBooleanField(default=None)
objects = InviteManager()
def accept(self):
"""Accept this invitation.
Creates an appropriate TeamMember record, sends a notification and
deletes itself.
"""
if self.approved is not None:
raise InviteExpiredException("")
self.approved = True
member, created = TeamMember.objects.get_or_create(
team=self.team, user=self.user, role=self.role)
if created:
notifier.team_member_new.delay(member.pk)
self.save()
return True
def deny(self):
"""Deny this invitation.
Could be useful to send a notification here in the future.
"""
if self.approved is not None:
raise InviteExpiredException("")
self.approved = False
self.save()
def message_json_data(self, data, msg):
data['can-reply'] = False
return data
# Workflows
class Workflow(models.Model):
REVIEW_CHOICES = (
(00, "Don't require review"),
(10, 'Peer must review'),
(20, 'Manager must review'),
(30, 'Admin must review'),
)
REVIEW_NAMES = dict(REVIEW_CHOICES)
REVIEW_IDS = dict([choice[::-1] for choice in REVIEW_CHOICES])
APPROVE_CHOICES = (
(00, "Don't require approval"),
(10, 'Manager must approve'),
(20, 'Admin must approve'),
)
APPROVE_NAMES = dict(APPROVE_CHOICES)
APPROVE_IDS = dict([choice[::-1] for choice in APPROVE_CHOICES])
team = models.ForeignKey(Team)
project = models.ForeignKey(Project, blank=True, null=True)
team_video = models.ForeignKey(TeamVideo, blank=True, null=True)
autocreate_subtitle = models.BooleanField(default=False)
autocreate_translate = models.BooleanField(default=False)
review_allowed = models.PositiveIntegerField(
choices=REVIEW_CHOICES, verbose_name='reviewers', default=0)
approve_allowed = models.PositiveIntegerField(
choices=APPROVE_CHOICES, verbose_name='approvers', default=0)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
class Meta:
unique_together = ('team', 'project', 'team_video')
@classmethod
def _get_target_team(cls, id, type):
"""Return the team for the given target.
The target is identified by id (its PK as an integer) and type (a string
of 'team_video', 'project', or 'team').
"""
if type == 'team_video':
return TeamVideo.objects.select_related('team').get(pk=id).team
elif type == 'project':
return Project.objects.select_related('team').get(pk=id).team
else:
return Team.objects.get(pk=id)
@classmethod
def get_for_target(cls, id, type, workflows=None):
'''Return the most specific Workflow for the given target.
If target object does not exist, None is returned.
If workflows is given, it should be a QS or List of all Workflows for
the TeamVideo's team. This will let you look it up yourself once and
use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
'''
if not workflows:
team = Workflow._get_target_team(id, type)
workflows = list(Workflow.objects.filter(team=team.id)
.select_related('project', 'team',
'team_video'))
else:
team = workflows[0].team
default_workflow = Workflow(team=team)
if not workflows:
return default_workflow
if type == 'team_video':
try:
return [w for w in workflows
if w.team_video and w.team_video.id == id][0]
except IndexError:
# If there's no video-specific workflow for this video, there
# might be a workflow for its project, so we'll start looking
# for that instead.
team_video = TeamVideo.objects.get(pk=id)
id, type = team_video.project_id, 'project'
if type == 'project':
try:
return [w for w in workflows
if w.project and w.project.workflow_enabled
and w.project.id == id and not w.team_video][0]
except IndexError:
# If there's no project-specific workflow for this project,
# there might be one for its team, so we'll fall through.
pass
if not team.workflow_enabled:
return default_workflow
return [w for w in workflows
if (not w.project) and (not w.team_video)][0]
@classmethod
def get_for_team_video(cls, team_video, workflows=None):
'''Return the most specific Workflow for the given team_video.
If workflows is given, it should be a QuerySet or List of all Workflows
for the TeamVideo's team. This will let you look it up yourself once
and use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
NOTE: This function caches the workflow for performance reasons. If the
workflow changes within the space of a single request that
_cached_workflow should be cleared.
'''
if not hasattr(team_video, '_cached_workflow'):
team_video._cached_workflow = Workflow.get_for_target(
team_video.id, 'team_video', workflows)
return team_video._cached_workflow
@classmethod
def get_for_project(cls, project, workflows=None):
'''Return the most specific Workflow for the given project.
If workflows is given, it should be a QuerySet or List of all Workflows
for the Project's team. This will let you look it up yourself once
and use it in many of these calls to avoid hitting the DB each time.
If workflows is not given it will be looked up with one DB query.
'''
return Workflow.get_for_target(project.id, 'project', workflows)
@classmethod
def add_to_team_videos(cls, team_videos):
'''Add the appropriate Workflow objects to each TeamVideo as .workflow.
This will only perform one DB query, and it will add the most specific
workflow possible to each TeamVideo.
This only exists for performance reasons.
'''
if not team_videos:
return []
workflows = list(Workflow.objects.filter(team=team_videos[0].team))
for tv in team_videos:
tv.workflow = Workflow.get_for_team_video(tv, workflows)
def get_specific_target(self):
"""Return the most specific target that this workflow applies to."""
return self.team_video or self.project or self.team
def __unicode__(self):
target = self.get_specific_target()
return u'Workflow %s for %s (%s %d)' % (
self.pk, target, target.__class__.__name__, target.pk)
# Convenience functions for checking if a step of the workflow is enabled.
@property
def review_enabled(self):
"""Return whether any form of review is enabled for this workflow."""
return True if self.review_allowed else False
@property
def approve_enabled(self):
"""Return whether any form of approval is enabled for this workflow."""
return True if self.approve_allowed else False
@property
def requires_review_or_approval(self):
"""Return whether a given workflow requires review or approval."""
return self.approve_enabled or self.review_enabled
@property
def requires_tasks(self):
"""Return whether a given workflow requires the use of tasks."""
return (self.requires_review_or_approval or self.autocreate_subtitle
or self.autocreate_translate)
# Tasks
class TaskManager(models.Manager):
def not_deleted(self):
"""Return a QS of tasks that are not deleted."""
return self.get_query_set().filter(deleted=False)
def incomplete(self):
"""Return a QS of tasks that are not deleted or completed."""
return self.not_deleted().filter(completed=None)
def complete(self):
"""Return a QS of tasks that are not deleted, but are completed."""
return self.not_deleted().filter(completed__isnull=False)
def _type(self, types, completed=None, approved=None):
"""Return a QS of tasks that are not deleted and are one of the given types.
types should be a list of strings matching a label in Task.TYPE_CHOICES.
completed should be one of:
* True (only show completed tasks)
* False (only show incomplete tasks)
* None (don't filter on completion status)
approved should be either None or a string matching a label in
Task.APPROVED_CHOICES.
"""
type_ids = [Task.TYPE_IDS[type] for type in types]
qs = self.not_deleted().filter(type__in=type_ids)
if completed == False:
qs = qs.filter(completed=None)
elif completed == True:
qs = qs.filter(completed__isnull=False)
if approved:
qs = qs.filter(approved=Task.APPROVED_IDS[approved])
return qs
def incomplete_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted or completed."""
return self._type(['Subtitle'], False)
def incomplete_translate(self):
"""Return a QS of translate tasks that are not deleted or completed."""
return self._type(['Translate'], False)
def incomplete_review(self):
"""Return a QS of review tasks that are not deleted or completed."""
return self._type(['Review'], False)
def incomplete_approve(self):
"""Return a QS of approve tasks that are not deleted or completed."""
return self._type(['Approve'], False)
def incomplete_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted or completed."""
return self._type(['Subtitle', 'Translate'], False)
def incomplete_review_or_approve(self):
"""Return a QS of review or approve tasks that are not deleted or completed."""
return self._type(['Review', 'Approve'], False)
def complete_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted, but are completed."""
return self._type(['Subtitle'], True)
def complete_translate(self):
"""Return a QS of translate tasks that are not deleted, but are completed."""
return self._type(['Translate'], True)
def complete_review(self, approved=None):
"""Return a QS of review tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Review'], True, approved)
def complete_approve(self, approved=None):
"""Return a QS of approve tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Approve'], True, approved)
def complete_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted, but are completed."""
return self._type(['Subtitle', 'Translate'], True)
def complete_review_or_approve(self, approved=None):
"""Return a QS of review or approve tasks that are not deleted, but are completed.
If approved is given the tasks are further filtered on their .approved
attribute. It must be a string matching one of the labels in
Task.APPROVED_CHOICES, like 'Rejected'.
"""
return self._type(['Review', 'Approve'], True, approved)
def all_subtitle(self):
"""Return a QS of subtitle tasks that are not deleted."""
return self._type(['Subtitle'])
def all_translate(self):
"""Return a QS of translate tasks that are not deleted."""
return self._type(['Translate'])
def all_review(self):
"""Return a QS of review tasks that are not deleted."""
return self._type(['Review'])
def all_approve(self):
"""Return a QS of tasks that are not deleted."""
return self._type(['Approve'])
def all_subtitle_or_translate(self):
"""Return a QS of subtitle or translate tasks that are not deleted."""
return self._type(['Subtitle', 'Translate'])
def all_review_or_approve(self):
"""Return a QS of review or approve tasks that are not deleted."""
return self._type(['Review', 'Approve'])
class Task(models.Model):
TYPE_CHOICES = (
(10, 'Subtitle'),
(20, 'Translate'),
(30, 'Review'),
(40, 'Approve'),
)
TYPE_NAMES = dict(TYPE_CHOICES)
TYPE_IDS = dict([choice[::-1] for choice in TYPE_CHOICES])
APPROVED_CHOICES = (
(10, 'In Progress'),
(20, 'Approved'),
(30, 'Rejected'),
)
APPROVED_NAMES = dict(APPROVED_CHOICES)
APPROVED_IDS = dict([choice[::-1] for choice in APPROVED_CHOICES])
APPROVED_FINISHED_IDS = (20, 30)
type = models.PositiveIntegerField(choices=TYPE_CHOICES)
team = models.ForeignKey(Team)
team_video = models.ForeignKey(TeamVideo)
language = models.CharField(max_length=16,
choices=translation.ALL_LANGUAGE_CHOICES,
blank=True, db_index=True)
assignee = models.ForeignKey(User, blank=True, null=True)
subtitle_version = models.ForeignKey(SubtitleVersion, blank=True, null=True)
new_subtitle_version = models.ForeignKey(NewSubtitleVersion,
blank=True, null=True)
# The original source version being reviewed or approved.
#
# For example, if person A creates two versions while working on a subtitle
# task:
#
# v1 v2
# --o---o
# s s
#
# and then the reviewer and approver make some edits
#
# v1 v2 v3 v4 v5
# --o---o---o---o---o
# s s r r a
# *
#
# the review_base_version will be v2. Once approved, if an edit is made it
# needs to be approved as well, and the same thing happens:
#
# v1 v2 v3 v4 v5 v6 v7
# --o---o---o---o---o---o---o
# s s r r a e a
# *
#
# This is used when rejecting versions, and may be used elsewhere in the
# future as well.
review_base_version = models.ForeignKey(SubtitleVersion, blank=True,
null=True,
related_name='tasks_based_on')
new_review_base_version = models.ForeignKey(NewSubtitleVersion, blank=True,
null=True,
related_name='tasks_based_on_new')
deleted = models.BooleanField(default=False)
# TODO: Remove this field.
public = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
completed = models.DateTimeField(blank=True, null=True)
expiration_date = models.DateTimeField(blank=True, null=True)
# Arbitrary priority for tasks. Some teams might calculate this
# on complex criteria and expect us to be able to sort tasks on it.
# Higher numbers mean higher priority
priority = models.PositiveIntegerField(blank=True, default=0, db_index=True)
# Review and Approval -specific fields
approved = models.PositiveIntegerField(choices=APPROVED_CHOICES,
null=True, blank=True)
body = models.TextField(blank=True, default="")
objects = TaskManager()
def __unicode__(self):
return u'Task %s (%s) for %s' % (self.id or "unsaved",
self.get_type_display(),
self.team_video)
@property
def summary(self):
"""
Return a brief summary of the task
"""
output = unicode(self.team_video)
if self.body:
output += unicode(self.body.split('\n',1)[0].strip()[:20])
return output
@staticmethod
def now():
"""datetime.datetime.now as a method
This lets us patch it in the unittests.
"""
return datetime.datetime.now()
def is_subtitle_task(self):
return self.type == Task.TYPE_IDS['Subtitle']
def is_translate_task(self):
return self.type == Task.TYPE_IDS['Translate']
def is_review_task(self):
return self.type == Task.TYPE_IDS['Review']
def is_approve_task(self):
return self.type == Task.TYPE_IDS['Approve']
@property
def workflow(self):
'''Return the most specific workflow for this task's TeamVideo.'''
return Workflow.get_for_team_video(self.team_video)
@staticmethod
def add_cached_video_urls(tasks):
"""Add the cached_video_url attribute to a list of atkss
cached_video_url is the URL as a string for the video.
"""
team_video_pks = [t.team_video_id for t in tasks]
video_urls = (VideoUrl.objects
.filter(video__teamvideo__id__in=team_video_pks)
.filter(primary=True))
video_url_map = dict((vu.video_id, vu.effective_url)
for vu in video_urls)
for t in tasks:
t.cached_video_url = video_url_map.get(t.team_video.video_id)
def _add_comment(self, lang_ct=None):
"""Add a comment on the SubtitleLanguage for this task with the body as content."""
if self.body.strip():
if lang_ct is None:
lang_ct = ContentType.objects.get_for_model(NewSubtitleLanguage)
comment = Comment(
content=self.body,
object_pk=self.new_subtitle_version.subtitle_language.pk,
content_type=lang_ct,
submit_date=self.completed,
user=self.assignee,
)
comment.save()
notifier.send_video_comment_notification.delay(
comment.pk, version_pk=self.new_subtitle_version.pk)
def future(self):
"""Return whether this task expires in the future."""
return self.expiration_date > self.now()
# Functions related to task completion.
def _send_back(self, sends_notification=True):
"""Handle "rejection" of this task.
This will:
* Create a new task with the appropriate type (translate or subtitle).
* Try to reassign it to the previous assignee, leaving it unassigned
if that's not possible.
* Send a notification unless sends_notification is given as False.
NOTE: This function does not modify the *current* task in any way.
"""
# when sending back, instead of always sending back
# to the first step (translate/subtitle) go to the
# step before this one:
# Translate/Subtitle -> Review -> Approve
# also, you can just send back approve and review tasks.
if self.type == Task.TYPE_IDS['Approve'] and self.workflow.review_enabled:
type = Task.TYPE_IDS['Review']
else:
is_primary = (self.new_subtitle_version
.subtitle_language
.is_primary_audio_language())
if is_primary:
type = Task.TYPE_IDS['Subtitle']
else:
type = Task.TYPE_IDS['Translate']
# let's guess which assignee should we use
# by finding the last user that did this task type
previous_task = Task.objects.complete().filter(
team_video=self.team_video, language=self.language, team=self.team, type=type
).order_by('-completed')[:1]
if previous_task:
assignee = previous_task[0].assignee
else:
assignee = None
# The target assignee may have left the team in the mean time.
if not self.team.members.filter(user=assignee).exists():
assignee = None
task = Task(team=self.team, team_video=self.team_video,
language=self.language, type=type,
assignee=assignee)
task.new_subtitle_version = self.new_subtitle_version
task.set_expiration()
task.save()
if sends_notification:
# notify original submiter (assignee of self)
notifier.reviewed_and_sent_back.delay(self.pk)
return task
def complete_approved(self, user):
"""Mark a review/approve task as Approved and complete it.
:param user: user who is approving he task
:returns: next task in the workflow.
"""
self.assignee = user
self.approved = Task.APPROVED_IDS['Approved']
return self.complete()
def complete_rejected(self, user):
"""Mark a review/approve task as Rejected and complete it.
:param user: user who is approving he task
:returns: next task in the workflow.
"""
self.assignee = user
self.approved = Task.APPROVED_IDS['Rejected']
return self.complete()
def complete(self):
'''Mark as complete and return the next task in the process if applicable.'''
self.completed = self.now()
self.save()
return { 'Subtitle': self._complete_subtitle,
'Translate': self._complete_translate,
'Review': self._complete_review,
'Approve': self._complete_approve,
}[Task.TYPE_NAMES[self.type]]()
def _can_publish_directly(self, subtitle_version):
from teams.permissions import can_publish_edits_immediately
type = {10: 'Review',
20: 'Review',
30: 'Approve'}.get(self.type)
tasks = (Task.objects._type([type], True, 'Approved')
.filter(language=self.language))
return (can_publish_edits_immediately(self.team_video,
self.assignee,
self.language) and
subtitle_version and
subtitle_version.previous_version() and
subtitle_version.previous_version().is_public() and
subtitle_version.subtitle_language.is_complete_and_synced() and
tasks.exists())
def _find_previous_assignee(self, type):
"""Find the previous assignee for a new review/approve task for this video.
NOTE: This is different than finding out the person to send a task back
to! This is for saying "who reviewed this task last time?".
For now, we'll assign the review/approval task to whomever did it last
time (if it was indeed done), but only if they're still eligible to
perform it now.
"""
from teams.permissions import can_review, can_approve
if type == 'Approve':
# Check if this is a post-publish edit.
# According to #1039 we don't wanna auto-assign the assignee
version = self.get_subtitle_version()
if (version and
version.is_public() and
version.subtitle_language.is_complete_and_synced()):
return None
type = Task.TYPE_IDS['Approve']
can_do = can_approve
elif type == 'Review':
type = Task.TYPE_IDS['Review']
can_do = partial(can_review, allow_own=True)
else:
return None
last_task = self.team_video.task_set.complete().filter(
language=self.language, type=type
).order_by('-completed')[:1]
if last_task:
candidate = last_task[0].assignee
if candidate and can_do(self.team_video, candidate, self.language):
return candidate
def _complete_subtitle(self):
"""Handle the messy details of completing a subtitle task."""
sv = self.get_subtitle_version()
# TL;DR take a look at #1206 to know why i did this
if self.workflow.requires_review_or_approval and not self._can_publish_directly(sv):
if self.workflow.review_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Review'],
assignee=self._find_previous_assignee('Review'))
task.set_expiration()
task.save()
elif self.workflow.approve_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
else:
# Subtitle task is done, and there is no approval or review
# required, so we mark the version as approved.
sv.publish()
# We need to make sure this is updated correctly here.
from videos import metadata_manager
metadata_manager.update_metadata(self.team_video.video.pk)
if self.workflow.autocreate_translate:
# TODO: Switch to autocreate_task?
_create_translation_tasks(self.team_video, sv)
task = None
return task
def _complete_translate(self):
"""Handle the messy details of completing a translate task."""
sv = self.get_subtitle_version()
# TL;DR take a look at #1206 to know why i did this
if self.workflow.requires_review_or_approval and not self._can_publish_directly(sv):
if self.workflow.review_enabled:
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Review'],
assignee=self._find_previous_assignee('Review'))
task.set_expiration()
task.save()
elif self.workflow.approve_enabled:
# The review step may be disabled. If so, we check the approve step.
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
else:
sv.publish()
# We need to make sure this is updated correctly here.
from videos import metadata_manager
metadata_manager.update_metadata(self.team_video.video.pk)
task = None
return task
def _complete_review(self):
"""Handle the messy details of completing a review task."""
approval = self.approved == Task.APPROVED_IDS['Approved']
sv = self.get_subtitle_version()
if approval:
self._ensure_language_complete(sv.subtitle_language)
self._add_comment()
task = None
if self.workflow.approve_enabled:
# Approval is enabled, so...
if approval:
# If the reviewer thought these subtitles were good we create
# the next task.
task = Task(team=self.team, team_video=self.team_video,
new_subtitle_version=sv,
new_review_base_version=sv,
language=self.language, type=Task.TYPE_IDS['Approve'],
assignee=self._find_previous_assignee('Approve'))
task.set_expiration()
task.save()
# Notify the appropriate users.
notifier.reviewed_and_pending_approval.delay(self.pk)
else:
# Otherwise we send the subtitles back for improvement.
task = self._send_back()
else:
# Approval isn't enabled, so the ruling of this Review task
# determines whether the subtitles go public.
if approval:
# Make these subtitles public!
self.new_subtitle_version.publish()
# If the subtitles are okay, go ahead and autocreate translation
# tasks if necessary.
if self.workflow.autocreate_translate:
_create_translation_tasks(self.team_video, sv)
# Notify the appropriate users and external services.
notifier.reviewed_and_published.delay(self.pk)
else:
# Send the subtitles back for improvement.
task = self._send_back()
# Before we go, we need to record who reviewed these subtitles, so if
# necessary we can "send back" to them later.
if self.assignee:
sv.set_reviewed_by(self.assignee)
return task
def do_complete_approve(self, lang_ct=None):
return self._complete_approve(lang_ct=lang_ct)
def _complete_approve(self, lang_ct=None):
"""Handle the messy details of completing an approve task."""
approval = self.approved == Task.APPROVED_IDS['Approved']
sv = self.get_subtitle_version()
if approval:
self._ensure_language_complete(sv.subtitle_language)
self._add_comment(lang_ct=lang_ct)
if approval:
# The subtitles are acceptable, so make them public!
self.new_subtitle_version.publish()
# Create translation tasks if necessary.
if self.workflow.autocreate_translate:
_create_translation_tasks(self.team_video, sv)
task = None
# Notify the appropriate users.
notifier.approved_notification.delay(self.pk, approval)
else:
# Send the subtitles back for improvement.
task = self._send_back()
# Before we go, we need to record who approved these subtitles, so if
# necessary we can "send back" to them later.
if self.assignee:
sv.set_approved_by(self.assignee)
if approval:
api_subtitles_approved.send(sv)
else:
api_subtitles_rejected.send(sv)
return task
def _ensure_language_complete(self, subtitle_language):
if not subtitle_language.subtitles_complete:
subtitle_language.subtitles_complete = True
subtitle_language.save()
def get_perform_url(self):
"""Return a URL for whatever dialog is used to perform this task."""
return reverse('teams:perform_task', args=(self.team.slug, self.id))
def tasks_page_perform_link_text(self):
"""Get the link text for perform link on the tasks page."""
if self.assignee:
return _('Resume')
else:
return _('Start now')
def get_widget_url(self):
"""Get the URL to edit the video for this task. """
return reverse("subtitles:subtitle-editor", kwargs={
"video_id": self.team_video.video.video_id,
"language_code": self.language
})
def needs_start_dialog(self):
"""Check if this task needs the start dialog.
The only time we need it is when a user is starting a
transcribe/translate task. We don't need it for review/approval, or
if the task is being resumed.
"""
# We use the start dialog for select two things:
# - primary audio language
# - language of the subtitles
return (self.language == '' or
self.team_video.video.primary_audio_language_code == '')
def get_reviewer(self):
"""For Approve tasks, return the last user to Review these subtitles.
May be None if this task is not an Approve task, or if we can't figure
out the last reviewer for any reason.
"""
if self.get_type_display() == 'Approve':
previous = Task.objects.complete().filter(
team_video=self.team_video,
language=self.language,
team=self.team,
type=Task.TYPE_IDS['Review']).order_by('-completed')[:1]
if previous:
return previous[0].assignee
def set_expiration(self):
"""Set the expiration_date of this task. Does not save().
Requires that self.team and self.assignee be set correctly.
"""
if not self.assignee or not self.team.task_expiration:
self.expiration_date = None
else:
limit = datetime.timedelta(days=self.team.task_expiration)
self.expiration_date = self.now() + limit
def get_subtitle_version(self):
""" Gets the subtitle version related to this task.
If the task has a subtitle_version attached, return it and
if not, try to find it throught the subtitle language of the video.
Note: we need this since we don't attach incomplete subtitle_version
to the task (and if we do we need to set the status to unmoderated and
that causes the version to get published).
"""
# autocreate sets the subtitle_version to another
# language's subtitle_version and that was breaking
# not only the interface but the new upload method.
if (self.new_subtitle_version and
self.new_subtitle_version.language_code == self.language):
return self.new_subtitle_version
if not hasattr(self, "_subtitle_version"):
language = self.team_video.video.subtitle_language(self.language)
self._subtitle_version = (language.get_tip(public=False)
if language else None)
return self._subtitle_version
def is_blocked(self):
"""Return whether this task is "blocked".
"Blocked" means that it's a translation task but the source language
isn't ready to be translated yet.
"""
subtitle_version = self.get_subtitle_version()
if not subtitle_version:
return False
source_language = subtitle_version.subtitle_language.get_translation_source_language()
if not source_language:
return False
can_perform = (source_language and
source_language.is_complete_and_synced())
if self.get_type_display() != 'Translate':
if self.get_type_display() in ('Review', 'Approve'):
# review and approve tasks will be blocked if they're
# a translation and they have a draft and the source
# language no longer has published version
if not can_perform or source_language.language_code == self.language:
return True
return not can_perform
def save(self, update_team_video_index=True, *args, **kwargs):
is_review_or_approve = self.get_type_display() in ('Review', 'Approve')
if self.language:
if not self.language in translation.ALL_LANGUAGE_CODES:
raise ValidationError(
"Subtitle Language should be a valid code.")
result = super(Task, self).save(*args, **kwargs)
if update_team_video_index:
tasks.update_one_team_video.delay(self.team_video.pk)
Video.cache.invalidate_by_pk(self.team_video.video_id)
return result
# Settings
class SettingManager(models.Manager):
use_for_related_fields = True
def guidelines(self):
"""Return a QS of settings related to team guidelines."""
keys = [key for key, name in Setting.KEY_CHOICES
if name.startswith('guidelines_')]
return self.get_query_set().filter(key__in=keys)
def messages(self):
"""Return a QS of settings related to team messages."""
keys = [key for key, name in Setting.KEY_CHOICES
if name.startswith('messages_')]
return self.get_query_set().filter(key__in=keys)
def messages_guidelines(self):
"""Return a QS of settings related to team messages or guidelines."""
return self.get_query_set().filter(key__in=Setting.MESSAGE_KEYS)
def with_names(self, names):
return self.filter(key__in=[Setting.KEY_IDS[name] for name in names])
def all_messages(self):
messages = {}
for key in Setting.MESSAGE_KEYS:
name = Setting.KEY_NAMES[key]
messages[name] = self.instance.get_default_message(name)
messages.update({
s.key_name: s.data
for s in self.messages_guidelines()
if s.data
})
return messages
class Setting(models.Model):
KEY_CHOICES = (
(100, 'messages_invite'),
(101, 'messages_manager'),
(102, 'messages_admin'),
(103, 'messages_application'),
(104, 'messages_joins'),
(200, 'guidelines_subtitle'),
(201, 'guidelines_translate'),
(202, 'guidelines_review'),
# 300s means if this team will block those notifications
(300, 'block_invitation_sent_message'),
(301, 'block_application_sent_message'),
(302, 'block_application_denided_message'),
(303, 'block_team_member_new_message'),
(304, 'block_team_member_leave_message'),
(305, 'block_task_assigned_message'),
(306, 'block_reviewed_and_published_message'),
(307, 'block_reviewed_and_pending_approval_message'),
(308, 'block_reviewed_and_sent_back_message'),
(309, 'block_approved_message'),
(310, 'block_new_video_message'),
# 400 is for text displayed on web pages
(401, 'pagetext_welcome_heading'),
)
KEY_NAMES = dict(KEY_CHOICES)
KEY_IDS = dict([choice[::-1] for choice in KEY_CHOICES])
MESSAGE_KEYS = [
key for key, name in KEY_CHOICES
if name.startswith('messages_') or name.startswith('guidelines_')
or name.startswith('pagetext_')
]
MESSAGE_DEFAULTS = {
'pagetext_welcome_heading': _("Help %(team)s reach a world audience"),
}
key = models.PositiveIntegerField(choices=KEY_CHOICES)
data = models.TextField(blank=True)
team = models.ForeignKey(Team, related_name='settings')
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
objects = SettingManager()
class Meta:
unique_together = (('key', 'team'),)
def __unicode__(self):
return u'%s - %s' % (self.team, self.key_name)
@property
def key_name(self):
"""Return the key name for this setting.
TODO: Remove this and replace with get_key_display()?
"""
return Setting.KEY_NAMES[self.key]
# TeamLanguagePreferences
class TeamLanguagePreferenceManager(models.Manager):
def _generate_writable(self, team):
"""Return the set of language codes that are writeable for this team."""
unwritable = self.for_team(team).filter(allow_writes=False, preferred=False).values("language_code")
unwritable = set([x['language_code'] for x in unwritable])
return translation.ALL_LANGUAGE_CODES - unwritable
def _generate_readable(self, team):
"""Return the set of language codes that are readable for this team."""
unreadable = self.for_team(team).filter(allow_reads=False, preferred=False).values("language_code")
unreadable = set([x['language_code'] for x in unreadable])
return translation.ALL_LANGUAGE_CODES - unreadable
def _generate_preferred(self, team):
"""Return the set of language codes that are preferred for this team."""
preferred = self.for_team(team).filter(preferred=True).values("language_code")
return set([x['language_code'] for x in preferred])
def for_team(self, team):
"""Return a QS of all language preferences for the given team."""
return self.get_query_set().filter(team=team)
def on_changed(cls, sender, instance, *args, **kwargs):
"""Perform any necessary actions when a language preference changes.
TODO: Refactor this out of the manager...
"""
from teams.cache import invalidate_lang_preferences
invalidate_lang_preferences(instance.team)
def get_readable(self, team):
"""Return the set of language codes that are readable for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_readable_langs
return get_readable_langs(team)
def get_writable(self, team):
"""Return the set of language codes that are writeable for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_writable_langs
return get_writable_langs(team)
def get_preferred(self, team):
"""Return the set of language codes that are preferred for this team.
This value may come from memcache if possible.
"""
from teams.cache import get_preferred_langs
return get_preferred_langs(team)
class TeamLanguagePreference(models.Model):
"""Represent language preferences for a given team.
First, TLPs may mark a language as "preferred". If that's the case then the
other attributes of this model are irrelevant and can be ignored.
"Preferred" languages will have translation tasks automatically created for
them when subtitles are added.
If preferred is False, the TLP describes a *restriction* on the language
instead. Writing in that language may be prevented, or both reading and
writing may be prevented.
(Note: "writing" means not only writing new subtitles but also creating
tasks, etc)
This is how the restriction settings should interact. TLP means that we
have created a TeamLanguagePreference for that team and language.
| Action | NO | allow_read=True, | allow_read=False, |
| | TLP | allow_write=False | allow_write=False |
========================================================================================
| assignable as tasks | X | | |
| assignable as narrowing | X | | |
| listed on the widget for viewing | X | X | |
| listed on the widget for improving | X | | |
| returned from the api read operations | X | X | |
| upload / write operations from the api | X | | |
| show up on the start dialog | X | | |
+----------------------------------------+-----+-------------------+-------------------+
Remember, this table only applies if preferred=False. If the language is
preferred the "restriction" attributes are effectively garbage. Maybe we
should make the column nullable to make this more clear?
allow_read=True, allow_write=True, preferred=False is invalid. Just remove
the row all together.
"""
team = models.ForeignKey(Team, related_name="lang_preferences")
language_code = models.CharField(max_length=16)
allow_reads = models.BooleanField(default=False)
allow_writes = models.BooleanField(default=False)
preferred = models.BooleanField(default=False)
objects = TeamLanguagePreferenceManager()
class Meta:
unique_together = ('team', 'language_code')
def clean(self, *args, **kwargs):
if self.allow_reads and self.allow_writes:
raise ValidationError("No sense in having all allowed, just remove the preference for this language.")
if self.preferred and (self.allow_reads or self.allow_writes):
raise ValidationError("Cannot restrict a preferred language.")
super(TeamLanguagePreference, self).clean(*args, **kwargs)
def __unicode__(self):
return u"%s preference for team %s" % (self.language_code, self.team)
post_save.connect(TeamLanguagePreference.objects.on_changed, TeamLanguagePreference)
# TeamNotificationSettings
class TeamNotificationSettingManager(models.Manager):
def notify_team(self, team_pk, event_name, **kwargs):
"""Notify the given team of a given event.
Finds the matching notification settings for this team, instantiates
the notifier class, and sends the appropriate notification.
If the notification settings has an email target, sends an email.
If the http settings are filled, then sends the request.
This can be ran as a Celery task, as it requires no objects to be passed.
"""
try:
team = Team.objects.get(pk=team_pk)
except Team.DoesNotExist:
logger.error("A pk for a non-existent team was passed in.",
extra={"team_pk": team_pk, "event_name": event_name})
return
try:
if team.partner:
notification_settings = self.get(partner=team.partner)
else:
notification_settings = self.get(team=team)
except TeamNotificationSetting.DoesNotExist:
return
notification_settings.notify(event_name, **kwargs)
class TeamNotificationSetting(models.Model):
"""Info on how a team should be notified of changes to its videos.
For now, a team can be notified by having a http request sent with the
payload as the notification information. This cannot be hardcoded since
teams might have different urls for each environment.
Some teams have strict requirements on mapping video ids to their internal
values, and also their own language codes. Therefore we need to configure
a class that can do the correct mapping.
TODO: allow email notifications
"""
EVENT_VIDEO_NEW = "video-new"
EVENT_VIDEO_EDITED = "video-edited"
EVENT_LANGUAGE_NEW = "language-new"
EVENT_LANGUAGE_EDITED = "language-edit"
EVENT_LANGUAGE_DELETED = "language-deleted"
EVENT_SUBTITLE_NEW = "subs-new"
EVENT_SUBTITLE_APPROVED = "subs-approved"
EVENT_SUBTITLE_REJECTED = "subs-rejected"
EVENT_APPLICATION_NEW = 'application-new'
team = models.OneToOneField(Team, related_name="notification_settings",
null=True, blank=True)
partner = models.OneToOneField('Partner',
related_name="notification_settings", null=True, blank=True)
# the url to post the callback notifing partners of new video activity
request_url = models.URLField(blank=True, null=True)
basic_auth_username = models.CharField(max_length=255, blank=True, null=True)
basic_auth_password = models.CharField(max_length=255, blank=True, null=True)
# not being used, here to avoid extra migrations in the future
email = models.EmailField(blank=True, null=True)
# integers mapping to classes, see unisubs-integration/notificationsclasses.py
notification_class = models.IntegerField(default=1,)
objects = TeamNotificationSettingManager()
def get_notification_class(self):
try:
from ted.notificationclasses import NOTIFICATION_CLASS_MAP
return NOTIFICATION_CLASS_MAP[self.notification_class]
except ImportError:
logger.exception("Apparently unisubs-integration is not installed")
def notify(self, event_name, **kwargs):
"""Resolve the notification class for this setting and fires notfications."""
notification_class = self.get_notification_class()
if not notification_class:
logger.error("Could not find notification class %s" % self.notification_class)
return
notification = notification_class(self.team, self.partner,
event_name, **kwargs)
if self.request_url:
success, content = notification.send_http_request(
self.request_url,
self.basic_auth_username,
self.basic_auth_password
)
return success, content
# FIXME: spec and test this, for now just return
return
def __unicode__(self):
if self.partner:
return u'NotificationSettings for partner %s' % self.partner
return u'NotificationSettings for team %s' % self.team
class BillingReport(models.Model):
# use BillingRecords to signify completed work
TYPE_BILLING_RECORD = 2
# use approval tasks to signify completed work
TYPE_APPROVAL = 3
# Like TYPE_APPROVAL, but centered on the users who subtitle/review the
# work
TYPE_APPROVAL_FOR_USERS = 4
TYPE_CHOICES = (
(TYPE_BILLING_RECORD, 'Crowd sourced'),
(TYPE_APPROVAL, 'Professional services'),
(TYPE_APPROVAL_FOR_USERS, 'On-demand translators'),
)
teams = models.ManyToManyField(Team, related_name='billing_reports')
start_date = models.DateField()
end_date = models.DateField()
csv_file = S3EnabledFileField(blank=True, null=True,
upload_to='teams/billing/')
processed = models.DateTimeField(blank=True, null=True)
type = models.IntegerField(choices=TYPE_CHOICES,
default=TYPE_BILLING_RECORD)
def __unicode__(self):
if hasattr(self, 'id') and self.id is not None:
team_count = self.teams.all().count()
else:
team_count = 0
return "%s teams (%s - %s)" % (team_count,
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
def _get_approved_tasks(self):
return Task.objects.complete_approve().filter(
approved=Task.APPROVED_IDS['Approved'],
team__in=self.teams.all(),
completed__range=(self.start_date, self.end_date))
def _report_date(self, datetime):
return datetime.strftime('%Y-%m-%d %H:%M:%S')
def generate_rows_type_approval(self):
header = (
'Team',
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Translation?',
'Approver',
'Date',
)
rows = [header]
for approve_task in self._get_approved_tasks():
video = approve_task.team_video.video
project = approve_task.team_video.project.name if approve_task.team_video.project else 'none'
version = approve_task.new_subtitle_version
language = version.subtitle_language
subtitle_task = (Task.objects.complete_subtitle_or_translate()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0]
rows.append((
approve_task.team.name,
video.title_display(),
video.video_id,
project,
approve_task.language,
get_minutes_for_version(version, False),
language.is_primary_audio_language(),
subtitle_task.type==Task.TYPE_IDS['Translate'],
unicode(approve_task.assignee),
self._report_date(approve_task.completed),
))
return rows
def generate_rows_type_approval_for_users(self):
header = (
'User',
'Task Type',
'Team',
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Approver',
'Note',
'Date',
'Pay Rate',
)
data_rows = []
for approve_task in self._get_approved_tasks():
video = approve_task.team_video.video
project = approve_task.team_video.project.name if approve_task.team_video.project else 'none'
version = approve_task.get_subtitle_version()
language = version.subtitle_language
all_tasks = [approve_task]
try:
all_tasks.append((Task.objects.complete_subtitle_or_translate()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0])
except IndexError:
# no subtitling task, probably the review task was manually
# created.
pass
try:
all_tasks.append((Task.objects.complete_review()
.filter(team_video=approve_task.team_video,
language=approve_task.language)
.order_by('-completed'))[0])
except IndexError:
# review not enabled
pass
for task in all_tasks:
data_rows.append((
unicode(task.assignee),
task.get_type_display(),
approve_task.team.name,
video.title_display(),
video.video_id,
project,
language.language_code,
get_minutes_for_version(version, False),
language.is_primary_audio_language(),
unicode(approve_task.assignee),
unicode(task.body),
self._report_date(task.completed),
task.assignee.pay_rate_code,
))
data_rows.sort(key=lambda row: row[0])
return [header] + data_rows
def generate_rows_type_billing_record(self):
rows = []
for i,team in enumerate(self.teams.all()):
rows = rows + BillingRecord.objects.csv_report_for_team(team,
self.start_date, self.end_date, add_header=i == 0)
return rows
def generate_rows(self):
if self.type == BillingReport.TYPE_BILLING_RECORD:
rows = self.generate_rows_type_billing_record()
elif self.type == BillingReport.TYPE_APPROVAL:
rows = self.generate_rows_type_approval()
elif self.type == BillingReport.TYPE_APPROVAL_FOR_USERS:
rows = self.generate_rows_type_approval_for_users()
else:
raise ValueError("Unknown type: %s" % self.type)
return rows
def convert_unicode_to_utf8(self, rows):
def _convert(value):
if isinstance(value, unicode):
return value.encode("utf-8")
else:
return value
return [tuple(_convert(v) for v in row) for row in rows]
def process(self):
"""
Generate the correct rows (including headers), saves it to a tempo file,
then set's that file to the csv_file property, which if , using the S3
storage will take care of exporting it to s3.
"""
try:
rows = self.generate_rows()
except StandardError:
logger.error("Error generating billing report: (id: %s)", self.id)
self.csv_file = None
else:
self.csv_file = self.make_csv_file(rows)
self.processed = datetime.datetime.utcnow()
self.save()
def make_csv_file(self, rows):
rows = self.convert_unicode_to_utf8(rows)
fn = '/tmp/bill-%s-teams-%s-%s-%s-%s.csv' % (
self.teams.all().count(),
self.start_str, self.end_str,
self.get_type_display(), self.pk)
with open(fn, 'w') as f:
writer = csv.writer(f)
writer.writerows(rows)
return File(open(fn, 'r'))
@property
def start_str(self):
return self.start_date.strftime("%Y%m%d")
@property
def end_str(self):
return self.end_date.strftime("%Y%m%d")
class BillingReportGenerator(object):
def __init__(self, all_records, add_header=True):
if add_header:
self.rows = [self.header()]
else:
self.rows = []
all_records = list(all_records)
self.make_language_number_map(all_records)
self.make_languages_without_records(all_records)
for video, records in groupby(all_records, lambda r: r.video):
records = list(records)
if video:
for lang in self.languages_without_records.get(video.id, []):
self.rows.append(
self.make_row_for_lang_without_record(video, lang))
for r in records:
self.rows.append(self.make_row(video, r))
def header(self):
return [
'Video Title',
'Video ID',
'Project',
'Language',
'Minutes',
'Original',
'Language number',
'Team',
'Created',
'Source',
'User',
]
def make_row(self, video, record):
return [
(video and video.title_display()) or "----",
(video and video.video_id) or "deleted",
(record.project.name if record.project else 'none'),
(record.new_subtitle_language and record.new_subtitle_language.language_code) or "----",
record.minutes,
record.is_original,
(self.language_number_map and (record.id in self.language_number_map) and self.language_number_map[record.id]) or "----",
record.team.slug,
record.created.strftime('%Y-%m-%d %H:%M:%S'),
record.source,
record.user.username,
]
def make_language_number_map(self, records):
self.language_number_map = {}
videos = set(r.video for r in records)
video_counts = dict((v and v.id, 0) for v in videos)
qs = (BillingRecord.objects
.filter(video__in=videos)
.order_by('created'))
for record in qs:
vid = record.video and record.video.id
video_counts[vid] += 1
self.language_number_map[record.id] = video_counts[vid]
def make_languages_without_records(self, records):
self.languages_without_records = {}
videos = [r.video for r in records]
language_ids = [r.new_subtitle_language_id for r in records]
no_billing_record_where = """\
NOT EXISTS (
SELECT 1
FROM teams_billingrecord br
WHERE br.new_subtitle_language_id = subtitles_subtitlelanguage.id
)"""
qs = (NewSubtitleLanguage.objects
.filter(video__in=videos, subtitles_complete=True)
.exclude(id__in=language_ids).
extra(where=[no_billing_record_where]))
for lang in qs:
vid = lang.video_id
if vid not in self.languages_without_records:
self.languages_without_records[vid] = [lang]
else:
self.languages_without_records[vid].append(lang)
def make_row_for_lang_without_record(self, video, language):
return [
video.title_display(),
video.video_id,
'none',
language.language_code,
0,
language.is_primary_audio_language(),
0,
'unknown',
language.created.strftime('%Y-%m-%d %H:%M:%S'),
'unknown',
'unknown',
]
class BillingRecordManager(models.Manager):
def data_for_team(self, team, start, end):
return self.filter(team=team, created__gte=start, created__lte=end)
def csv_report_for_team(self, team, start, end, add_header=True):
all_records = self.data_for_team(team, start, end)
generator = BillingReportGenerator(all_records, add_header)
return generator.rows
def insert_records_for_translations(self, billing_record):
"""
IF you've translated from an incomplete language, and later on that
language is completed, we must check if any translations are now
complete and therefore should have billing records with them
"""
translations = billing_record.new_subtitle_language.get_dependent_subtitle_languages()
inserted = []
for translation in translations:
version = translation.get_tip(public=False)
if version:
inserted.append(self.insert_record(version))
return filter(bool, inserted)
def insert_record(self, version):
"""
Figures out if this version qualifies for a billing record, and
if so creates one. This should be self contained, e.g. safe to call
for any version. No records should be created if not needed, and it
won't create multiples.
If this language has translations it will check if any of those are now
eligible for BillingRecords and create one accordingly.
"""
from teams.models import BillingRecord
celery_logger.debug('insert billing record')
language = version.subtitle_language
video = language.video
tv = video.get_team_video()
if not tv:
celery_logger.debug('not a team video')
return
if not language.is_complete_and_synced(public=False):
celery_logger.debug('language not complete')
return
try:
# we already have a record
previous_record = BillingRecord.objects.get(video=video,
new_subtitle_language=language)
# make sure we update it
celery_logger.debug('a billing record for this language exists')
previous_record.is_original = \
video.primary_audio_language_code == language.language_code
previous_record.save()
return
except BillingRecord.DoesNotExist:
pass
if NewSubtitleVersion.objects.filter(
subtitle_language=language,
created__lt=BILLING_CUTOFF).exclude(
pk=version.pk).exists():
celery_logger.debug('an older version exists')
return
is_original = language.is_primary_audio_language()
source = version.origin
team = tv.team
project = tv.project
new_record = BillingRecord.objects.create(
video=video,
project = project,
new_subtitle_version=version,
new_subtitle_language=language,
is_original=is_original, team=team,
created=version.created,
source=source,
user=version.author)
from_translations = self.insert_records_for_translations(new_record)
return new_record, from_translations
def get_minutes_for_version(version, round_up_to_integer):
"""
Return the number of minutes the subtitles specified in version
"""
subs = version.get_subtitles()
if len(subs) == 0:
return 0
for sub in subs:
if sub.start_time is not None:
start_time = sub.start_time
break
# we shouldn't have an end time set without a start time, but handle
# it just in case
if sub.end_time is not None:
start_time = sub.end_time
break
else:
return 0
for sub in reversed(subs):
if sub.end_time is not None:
end_time = sub.end_time
break
# we shouldn't have an end time not set, but check for that just in
# case
if sub.start_time is not None:
end_time = sub.start_time
break
else:
return 0
duration_seconds = (end_time - start_time) / 1000.0
minutes = duration_seconds/60.0
if round_up_to_integer:
minutes = int(ceil(minutes))
return minutes
class BillingRecord(models.Model):
# The billing record should still exist if the video is deleted
video = models.ForeignKey(Video, blank=True, null=True, on_delete=models.SET_NULL)
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.SET_NULL)
subtitle_version = models.ForeignKey(SubtitleVersion, null=True,
blank=True, on_delete=models.SET_NULL)
new_subtitle_version = models.ForeignKey(NewSubtitleVersion, null=True,
blank=True, on_delete=models.SET_NULL)
subtitle_language = models.ForeignKey(SubtitleLanguage, null=True,
blank=True, on_delete=models.SET_NULL)
new_subtitle_language = models.ForeignKey(NewSubtitleLanguage, null=True,
blank=True, on_delete=models.SET_NULL)
minutes = models.FloatField(blank=True, null=True)
is_original = models.BooleanField(default=False)
team = models.ForeignKey(Team)
created = models.DateTimeField()
source = models.CharField(max_length=255)
user = models.ForeignKey(User)
objects = BillingRecordManager()
class Meta:
unique_together = ('video', 'new_subtitle_language')
def __unicode__(self):
return "%s - %s" % (self.video and self.video.video_id,
self.new_subtitle_language and self.new_subtitle_language.language_code)
def save(self, *args, **kwargs):
if not self.minutes and self.minutes != 0.0:
self.minutes = self.get_minutes()
assert self.minutes is not None
return super(BillingRecord, self).save(*args, **kwargs)
def get_minutes(self):
return get_minutes_for_version(self.new_subtitle_version, True)
class Partner(models.Model):
name = models.CharField(_(u'name'), max_length=250, unique=True)
slug = models.SlugField(_(u'slug'), unique=True)
can_request_paid_captions = models.BooleanField(default=False)
# The `admins` field specifies users who can do just about anything within
# the partner realm.
admins = models.ManyToManyField('auth.CustomUser',
related_name='managed_partners', blank=True, null=True)
def __unicode__(self):
return self.name
def is_admin(self, user):
return user in self.admins.all()
|
agpl-3.0
| -5,704,561,719,830,586,000
| 36.958653
| 232
| 0.608513
| false
| 4.212928
| false
| false
| false
|
codedsk/hubcheck
|
hubcheck/pageobjects/po_time_overview_page.py
|
1
|
1349
|
from hubcheck.pageobjects.po_time_base_page import TimeBasePage
from hubcheck.pageobjects.basepageelement import Link
class TimeOverviewPage(TimeBasePage):
"""time overview page"""
def __init__(self,browser,catalog,groupid=None):
super(TimeOverviewPage,self).__init__(browser,catalog)
self.path = "/time/overview"
# load hub's classes
TimeOverviewPage_Locators = self.load_class('TimeOverviewPage_Locators')
TimeOverview = self.load_class('TimeOverview')
# update this object's locator
self.locators.update(TimeOverviewPage_Locators.locators)
# setup page object's components
self.overview = TimeOverview(self,{'base':'overview'})
def get_active_hubs_count(self):
return self.overview.get_active_hubs_count()
def get_active_tasks_count(self):
return self.overview.get_active_tasks_count()
def get_total_hours_count(self):
return self.overview.get_total_hours_count()
def goto_hubs(self):
self.overview.goto_hubs()
def goto_tasks(self):
self.overview.goto_tasks()
def goto_records(self):
self.overview.goto_records()
class TimeOverviewPage_Locators_Base(object):
"""locators for TimeOverviewPage object"""
locators = {
'overview' : "css=#plg_time_overview",
}
|
mit
| 2,818,290,263,119,417,000
| 28.977778
| 80
| 0.673091
| false
| 3.768156
| false
| false
| false
|
madeso/prettygood
|
dotnet/Tagger/TagValidator.py
|
1
|
2567
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using MusicBrainz;
using PrettyGood.Util;
namespace Tagger
{
class TagValidator
{
public bool validate(IdTag tag)
{
Artist artist = null;
if (string.IsNullOrEmpty(tag.Artist) == false) artist = getArtist(tag.Artist);
if (artist == null) return false;
Release album = null;
if (string.IsNullOrEmpty(tag.Album) == false) album = getRelease(artist, tag.Album);
Track track = null;
if (string.IsNullOrEmpty(tag.Title))
{
int num = int.Parse(tag.TrackNumber.RemoveLeadingZeros());
num %= 100;
track = album.GetTracks()[num];
}
else
{
foreach (var t in Track.Query(tag.Title, artist.GetName()))
{
track = t;
break;
}
}
if (track == null) return false;
if (album == null)
{
foreach (var r in track.GetReleases())
{
album = r;
break;
}
}
tag.Artist = artist.GetName();
tag.Album = album.GetTitle();
tag.TrackNumber = track.GetTrackNumber(album).ToString();
tag.TotalTracks = album.GetTracks().Count.ToString();
//tag.Year = album.GetReleaseRelations()[0].BeginDate;
return true;
}
private Release getRelease(Artist artist, string a)
{
string album = a.ToLower();
foreach (Release r in artist.GetReleases())
{
if (album == r.GetTitle().ToLower()) return r;
}
return null;
}
private Artist getArtist(string art)
{
string artist = art.ToLower();
if (artists.ContainsKey(artist)) return artists[artist];
Artist info = null;
System.Threading.Thread.Sleep(500);
foreach (Artist a in Artist.Query(artist))
{
string name = a.GetName();
if (artist.Contains(name.ToLower()))
{
info = a;
break;
}
}
artists.Add(artist, info);
return info;
}
Dictionary<string, Artist> artists = new Dictionary<string, Artist>();
}
}
|
mit
| -570,241,561,569,758,850
| 27.186813
| 96
| 0.475244
| false
| 4.47644
| false
| false
| false
|
clasnake/recommender
|
similarity.py
|
1
|
6369
|
from __future__ import division
from math import sqrt
def sim_distance(prefs, item1, item2):
#get the list of shared items
si = {};
for item in prefs[item1]:
if item in prefs[item2]:
si[item] = 1;
#if they have no shared items,return 0;
if len(si) == 0: return 0;
#Add the squares of all the differences
sum_of_squares = sum(
[pow(prefs[item1][item] - prefs[item2][item], 2) for item in prefs[item1] if item in prefs[item2]])
return 1 / (1 + sqrt(sum_of_squares))
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs, p1, p2):
# Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]: si[item] = 1
# if they are no ratings in common, return 0
if len(si) == 0: return 0
# Sum calculations
n = len(si)
# Sums of all the preferences
sum1 = float(sum([prefs[p1][it] for it in si]))
sum2 = float(sum([prefs[p2][it] for it in si]))
# Sums of the squares
sum1Sq = float(sum([pow(prefs[p1][it], 2) for it in si]))
sum2Sq = float(sum([pow(prefs[p2][it], 2) for it in si]))
# Sum of the products
pSum = float(sum([prefs[p1][it] * prefs[p2][it] for it in si]))
# Calculate r (Pearson score)
num = float(pSum - (sum1 * sum2 / n))
den = float(sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n)))
if den == 0: return 0
r = float(num / den)
return round(r, 7)
def sim_pearson1(prefs, person1, person2):
#get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
#if they have no shared items, return 0
if len(si) == 0: return 0
#find the number of elements
n = len(si)
#add up all the prefs
sum1 = sum([prefs[person1][item] for item in si])
sum2 = sum([prefs[person2][item] for item in si])
#calculate the mean of the critics of p1 and p2
mean1 = sum1 / n;
mean2 = sum2 / n;
#calculate the covariance
covariance = sum([(prefs[person1][item] - mean1) * (prefs[person2][item] - mean2) for item in si]) / n
#calculate the standard_deviation
sd1 = sqrt(sum([pow(prefs[person1][item] - mean1, 2) for item in si]) / n)
sd2 = sqrt(sum([pow(prefs[person2][item] - mean2, 2) for item in si]) / n)
if sd1 * sd2 == 0: return 0
#calculate the pearson correlation improved
pearson = (covariance / (sd1 * sd2))
return pearson
def sim_pearson_improved(prefs, person1, person2):
#get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
#if they have no shared items, return 0
if len(si) == 0: return 0
#find the number of elements
n = len(si)
#get the count of items rated by person
count1 = 0
count2 = 0
for person in prefs[person1]:
count1 += 1
for item in prefs[person2]:
count2 += 1
totalCount = count1 + count2 - n
#add up all the prefs
sum1 = sum([prefs[person1][item] for item in si])
sum2 = sum([prefs[person2][item] for item in si])
#calculate the mean of the critics of p1 and p2
mean1 = sum1 / n;
mean2 = sum2 / n;
#calculate the covariance
covariance = sum([(prefs[person1][item] - mean1) * (prefs[person2][item] - mean2) for item in si]) / n
#calculate the standard_deviation
sd1 = sqrt(sum([pow(prefs[person1][item] - mean1, 2) for item in si]) / n)
sd2 = sqrt(sum([pow(prefs[person2][item] - mean2, 2) for item in si]) / n)
if sd1 * sd2 == 0: return 0
#calculate the pearson correlation improved
pearson = (covariance / (sd1 * sd2)) * (float(n) / float(totalCount))
#print n,count,float(n)/float(count),pearson
return pearson
def sim_cosine(prefs, item1, item2):
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
if len(si) == 0: return 0
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos
def sim_cosine_improved(prefs, item1, item2):
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
n = len(si)
if n == 0: return 0
count1 = 0
count2 = 0
for item in prefs[item1]:
count1 += 1
for item in prefs[item2]:
count2 += 1
totalCount = count1 + count2 - n
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos * (float(n) / float(totalCount))
def sim_Jaccard(s1, s2, length):
count = 0
for i in range(0, length):
if s1[i] == '1' and s2[i] == '1':
count += 1
if s1[i] == '1\n' and s2[i] == '1\n':
count += 1
return count / (length - count)
def sim_itemType(s1, s2, length):
count = 0
for i in range(0, length):
if s1[i] == '1' and s2[i] == '1':
count += 1
if s1[i] == '1\n' and s2[i] == '1\n':
count += 1
return count / 5
def sim_cosine_improved_tag(prefs, item1, item2, movieTags):
common = 0
for i in movieTags[item1]:
if i in movieTags[item2]:
common += 1
if common >= 5:
return 0.8
else:
si = {}
for i in prefs[item1]:
if i in prefs[item2]:
si[i] = 1
#print si
n = len(si)
if n == 0: return 0
count1 = 0
count2 = 0
for item in prefs[item1]:
count1 += 1
for item in prefs[item2]:
count2 += 1
totalCount = count1 + count2 - n
x = sqrt(sum([prefs[item1][it] ** 2 for it in si]))
y = sqrt(sum([prefs[item2][it] ** 2 for it in si]))
xy = sum([prefs[item1][it] * prefs[item2][it] for it in si])
cos = xy / (x * y)
return cos * (float(n) / float(totalCount))
#def sim_pearson_improved_typeAdded(prefs,item1,item2):
# pearson_improved=sim_pearson_improved(prefs,item1,item2)
# item_type=itemSimSet[item1][item2]
# return 0.9*(pearson_improved+1)/2.0+0.1*item_type
|
mit
| 2,447,981,508,787,601,000
| 27.560538
| 107
| 0.566808
| false
| 3.040095
| false
| false
| false
|
fossdevil/Assignments
|
Machine Learning/Assignment3Final/ML4.py
|
1
|
3746
|
import numpy as np
import scipy
import matplotlib.pyplot as plt
import random
# N points in d dimensions
def generatePoints(n,d):
points = []
for i in range(0,n):
point = np.random.normal(0,1,d);
p = point**2;
den = np.sqrt(sum(p));
point = list(point/den);
points.append(point);
return points;
def interPointDistance(points,n,d):
distMat = []
distance = 0;
for i in range(0,n):
disti = []
for j in range(0,n):
distance = np.linalg.norm(list(np.asarray(points[i])-np.asarray(points[j])));
disti.append(distance);
distMat.append(disti);
return distMat;
def projection(points,subspace,n):
projPoint = []
subspacet = np.asmatrix(subspace);
subspace = subspacet.T;
for i in range(0,n):
inv = np.linalg.inv(np.dot(subspacet,subspace));
proj = np.dot(np.dot(np.dot(subspace,inv),subspacet),points[i]);
projPoint.append(proj);
return projPoint;
def subspaceGen(n,d):
subspace = [];
subv = np.zeros(d);
r = np.arange(0,d);
k = list(random.sample(r,n));
j = 0;
for i in range(0,n):
subv = np.zeros(d);
subv[k[j]] = 1;
j = j+1;
subspace.append(subv);
return subspace;
n = 50;
d = 200;
points50 = generatePoints(n,d);
distMat = interPointDistance(points50,n,d);
print("Please open file \"Solution4.txt\":");
filename = "Solution4.txt"
target = open(filename,'w');
target.write("The interpoint distance Matrix is as follows:\n");
for i in range(0,n):
target.write(str(distMat[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
subspaces1 = np.asmatrix(subspaceGen(1,d));
subspaces2 = np.asmatrix(subspaceGen(2,d));
subspaces3 = np.asmatrix(subspaceGen(3,d));
subspaces10 = np.asmatrix(subspaceGen(10,d));
subspaces50 = np.asmatrix(subspaceGen(50,d));
projPoint1 = projection(points50,subspaces1,n);
projPoint2 = projection(points50,subspaces2,n);
projPoint3 = projection(points50,subspaces3,n);
projPoint10 = projection(points50,subspaces10,n);
projPoint50 = projection(points50,subspaces50,n);
distMat1 = interPointDistance(projPoint1,n,d);
distMat2 = interPointDistance(projPoint2,n,d);
distMat3 = interPointDistance(projPoint3,n,d);
distMat10 = interPointDistance(projPoint10,n,d);
distMat50 = interPointDistance(projPoint50,n,d);
num = np.sqrt(1.0/200);
diff1 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat1));
num = np.sqrt(2.0/200);
diff2 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat2));
num = np.sqrt(3.0/200);
diff3 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat3));
num = np.sqrt(10.0/200);
diff10 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat10));
num = np.sqrt(50.0/200);
diff50 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat50));
target.write("Difference matrix is as follows:\n");
target.write("For k = 1");
target.write("\n");
for i in range(0,n):
target.write(str(diff1[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 2");
target.write("\n");
for i in range(0,n):
target.write(str(diff2[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 3");
target.write("\n");
for i in range(0,n):
target.write(str(diff3[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 10");
target.write("\n");
for i in range(0,n):
target.write(str(diff10[i]));
target.write("\n");
target.write("\n");
target.write("\n");
target.write("\n");
target.write("For k = 50");
target.write("\n");
for i in range(0,n):
target.write(str(diff50[i]));
target.write("\n");
target.close();
|
mit
| 5,465,868,872,613,046,000
| 26.544118
| 82
| 0.652429
| false
| 2.77071
| false
| false
| false
|
4383/street-workout-database
|
sport/web/commons/templatetags/common_tags.py
|
1
|
3392
|
__author__ = 'herve.beraud'
from datetime import datetime, timedelta
from django import template
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.timesince import timesince
from community.models import InformationMessage
from exercises.models import Category
from exercises.models import MuscleGroup
from exercises.models import Muscle
register = template.Library()
@register.inclusion_tag('common_tags/show_exercises_menu.html')
def show_exercises_menu():
categories = Category.objects.filter(active=True).count()
muscles_groups = MuscleGroup.objects.filter(active=True).count()
muscles = Muscle.objects.filter(active=True).count()
return {'categories': categories, 'muscles_group': muscles_groups, 'muscles': muscles}
@register.inclusion_tag('common_tags/image_gallery.html')
def images_gallery(images):
return {"images": images}
@register.inclusion_tag('common_tags/grid-list-gallery.html')
def grid_list_gallery(items,
display_level=True,
display_menu=True,
shortcut_menu=True,
semantic_type="exercise",
margin_bottom=False
):
return {"items": items,
"display_level": display_level,
"display_menu": display_menu,
"shortcut_menu": shortcut_menu,
"semantic_type": semantic_type,
"margin_bottom": margin_bottom
}
@register.inclusion_tag('common_tags/video_gallery.html')
def videos_gallery(videos):
return {"videos": videos}
@register.inclusion_tag('common_tags/grid-list-gallery-menu.html')
def grid_list_gallery_menu():
return {}
@register.inclusion_tag('common_tags/display_information_message.html', takes_context=True)
def display_information_message(context):
expiration_date = datetime.today() + timedelta(days=365)
cookie_date_format = "%a, %d %b %Y %I:%M:%S GMT"
try:
information_message = InformationMessage.objects.filter(
active=True,
display_date__lte=datetime.now(), expiration_date__gt=datetime.now()).latest('publish_date')
request = context['request']
if information_message.display_once:
try:
already_read_information_message_id = int(request.COOKIES.get('information_message_id'))
if already_read_information_message_id == information_message.id:
information_message = None
# Cookie not found
except TypeError:
pass
except ObjectDoesNotExist:
information_message = None
return {"information_message": information_message, "expiration_date": expiration_date.strftime(cookie_date_format)}
@register.simple_tag
def current_version():
return settings.CURRENT_VERSION
@register.simple_tag
def current_revision():
return settings.CURRENT_REVISION
@register.simple_tag
def last_update_date_since():
now = datetime.now()
update = datetime.fromtimestamp(settings.LAST_UPDATE_DATE)
return timesince(update, now)
@register.simple_tag
def last_update_date():
return datetime.fromtimestamp(settings.LAST_UPDATE_DATE)
@register.simple_tag
def last_update_status():
return settings.LAST_UPDATE_STATUS
@register.simple_tag
def debugging():
return settings.DEBUG
|
gpl-2.0
| 5,402,247,503,451,819,000
| 29.558559
| 120
| 0.680425
| false
| 3.916859
| false
| false
| false
|
openprocurement/openprocurement.auctions.dgf
|
openprocurement/auctions/dgf/views/other/question.py
|
1
|
3280
|
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
apply_patch,
context_unpack,
get_now,
json_view,
opresource,
save_auction,
)
from openprocurement.auctions.core.validation import (
validate_question_data,
validate_patch_question_data,
)
from openprocurement.auctions.core.views.mixins import AuctionQuestionResource
@opresource(name='dgfOtherAssets:Auction Questions',
collection_path='/auctions/{auction_id}/questions',
path='/auctions/{auction_id}/questions/{question_id}',
auctionsprocurementMethodType="dgfOtherAssets",
description="Auction questions")
class AuctionQuestionResource(AuctionQuestionResource):
@json_view(content_type="application/json", validators=(validate_question_data,), permission='create_question')
def collection_post(self):
"""Post a question
"""
auction = self.request.validated['auction']
if auction.status != 'active.tendering' or get_now() < auction.enquiryPeriod.startDate or get_now() > auction.enquiryPeriod.endDate:
self.request.errors.add('body', 'data', 'Can add question only in enquiryPeriod')
self.request.errors.status = 403
return
question = self.request.validated['question']
if any([i.status != 'active' for i in auction.lots if i.id == question.relatedItem]):
self.request.errors.add('body', 'data', 'Can add question only in active lot status')
self.request.errors.status = 403
return
auction.questions.append(question)
if save_auction(self.request):
self.LOGGER.info('Created auction question {}'.format(question.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_question_create'}, {'question_id': question.id}))
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, question_id=question.id, _query={})
return {'data': question.serialize("view")}
@json_view(content_type="application/json", permission='edit_auction', validators=(validate_patch_question_data,))
def patch(self):
"""Post an Answer
"""
auction = self.request.validated['auction']
if auction.status != 'active.tendering':
self.request.errors.add('body', 'data', 'Can\'t update question in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
if any([i.status != 'active' for i in auction.lots if i.id == self.request.context.relatedItem]):
self.request.errors.add('body', 'data', 'Can update question only in active lot status')
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated auction question {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_question_patch'}))
return {'data': self.request.context.serialize(auction.status)}
|
apache-2.0
| -685,575,204,288,687,500
| 51.063492
| 141
| 0.651829
| false
| 3.995128
| false
| false
| false
|
tommy-u/chaco
|
chaco/multi_line_plot.py
|
1
|
16640
|
""" Defines the MultiLinePlot class.
"""
from __future__ import with_statement
# Standard library imports
import warnings
from math import ceil, floor
# Major library imports
import numpy as np
from numpy import argsort, array, invert, isnan, take, transpose
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, LineStyle
from traits.api import Float, List, Str, Trait, \
Bool, Callable, Property, cached_property, Instance, Array
from traitsui.api import Item, View, ScrubberEditor, HGroup
from array_data_source import ArrayDataSource
from base import arg_find_runs, bin_search
from base_xy_plot import BaseXYPlot
class MultiLinePlot(BaseXYPlot):
""" A plot consisting of multiple lines.
The data to be plotted must come from a two-dimensional array with shape M by N
stored in a MultiArrayDataSource object. M is the number of lines to be plotted,
and N is the number of points in each line.
Constructor Parameters
----------------------
index : instance of an ArrayDataSource
These are the 'x' or abscissa coordinates.
yindex : instance of ArrayDataSource
These are the 'y' coordinates.
value : instance of a MultiArrayDataSource
Note that the `scale`, `offset` and `normalized_amplitude` attributes of the
MultiLinePlot control the projection of the traces into the (x,y)
plot. In simplest case, `scale=1` and `offset=0`, and `normalized_amplitude`
controls the scaling of the traces relative to their base y value.
global_min, global_max : float
The minimum and maximum values of the data in `value`. For large
arrays, computing these could take excessive time, so they must be
provided when an instance is created.
normalized_amplitude : Float
color : ColorTrait
color_func : Callable or None
If not None, this Callable overrides `color`. The argument to `color_func`
will be the integer index of the trace to be rendered. `color_func` must
return an RGBA 4-tuple.
Default: None
orientation : str
Must be 'v' or 'h' (for 'vertical' or 'horizontal', respectively). This is
the orientation of the index axis (i.e. the 'x' axis).
Default: 'h'
fast_clip : bool
If True, traces whose *base* 'y' coordinate is outside the value axis range
are not plotted, even if some of the data in the curve extends into the plot
region.
Default: False
line_width : float
Width of the plotted lines.
line_style :
The style of the trace lines in the plot.
The following are from the original LinePlot code, and are untested:
selected_color
selected_line_style
"""
# M and N appearing in the comments are as defined in the docstring.
yindex = Instance(ArrayDataSource)
# amplitude = Float(0.0)
# `scale` and `offset` provide a more general transformation, but are currently
# untested.
scale = Float(1.0)
offset = Float(0.0)
fast_clip = Bool(False)
# The color of the lines.
color = black_color_trait
# A function that returns the color of lines. Overrides `color` if not None.
color_func = Trait(None, None, Callable)
# The color to use to highlight the line when selected.
selected_color = ColorTrait("lightyellow")
# The style of the selected line.
selected_line_style = LineStyle("solid")
# The name of the key in self.metadata that holds the selection mask
metadata_name = Str("selections")
# The thickness of the line.
line_width = Float(1.0)
# The line dash style.
line_style = LineStyle
use_global_bounds = Bool(True)
# Minimum value in the `value` data source. This must be provided
# in the call to the constructor.
global_min = Float
# Maximum value in the `value` data source. This must be provided
# in the call to the constructor.
global_max = Float
# Normalized amplitude is the value exposed to the user.
normalized_amplitude = Float(-0.5)
amplitude_scale = Property(Float, depends_on=['global_min', 'global_max', 'data',
'use_global_bounds', 'yindex'])
amplitude = Property(Float, depends_on=['normalized_amplitude',
'amplitude_scale'])
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# The projected 2D numpy array.
_trace_data = Property(Array, depends_on=['index', 'index.data_changed',
'value', 'value.data_changed', 'yindex', 'yindex.data_changed',
'amplitude', 'scale', 'offset'])
# Cached list of non-NaN arrays of (x,y) data-space points; regardless of
# self.orientation, this is always stored as (index_pt, value_pt). This is
# different from the default BaseXYPlot definition.
_cached_data_pts = List
# Cached list of non-NaN arrays of (x,y) screen-space points.
_cached_screen_pts = List
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
def trait_view(self, obj):
"""Create a minimalist View, with just the amplitude and color attributes."""
# Minimalist Traits UI View for customizing the plot: only the trace amplitude
# and line color are exposed.
view = View(
HGroup(
Item('use_global_bounds'),
# Item('normalized_amplitude'),
# Item('normalized_amplitude', editor=RangeEditor()),
Item('normalized_amplitude',
editor=ScrubberEditor(increment=0.2, hover_color=0xFFFFFF, active_color=0xA0CD9E,
border_color=0x0000FF)),
),
Item("color", label="Trace color", style="simple"),
width=480,
title="Trace Plot Line Attributes",
buttons=["OK", "Cancel"])
return view
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
# See base_xy_plot.py for these:
## def hittest(self, screen_pt, threshold=7.0):
## def interpolate(self, index_value):
def get_screen_points(self):
self._gather_points()
scrn_pts_list = [[self.map_screen(ary) for ary in line]
for line in self._cached_data_pts]
return scrn_pts_list
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
@cached_property
def _get_amplitude_scale(self):
"""
If the amplitude is set to this value, the largest trace deviation from
its base y coordinate will be equal to the y coordinate spacing.
"""
# Note: Like the rest of the current code, this ignores the `scale` attribute.
if self.yindex is not None:
coordinates = self.yindex.get_data()
else:
coordinates = []
if len(coordinates) > 1:
dy = coordinates[1] - coordinates[0]
if dy == 0:
dy = 1.0
else:
# default coordinate spacing if there is only 1 coordinate
dy = 1.0
if self.use_global_bounds:
max_abs = max(abs(self.global_min), abs(self.global_max))
else:
data = self.value._data
max_abs = np.max(np.abs(data))
if max_abs == 0:
amp_scale = 0.5 * dy
else:
amp_scale = 0.5 * dy / max_abs
return amp_scale
@cached_property
def _get_amplitude(self):
amplitude = self.normalized_amplitude * self.amplitude_scale
return amplitude
@cached_property
def _get__trace_data(self):
"""Compute the transformed data."""
# Get the array from `value`
data = self.value._data
coordinates = self.yindex.get_data()
channel_data = self.scale*(self.amplitude*data + coordinates[:,np.newaxis]) \
+ self.offset
return channel_data
def _gather_points(self):
"""
Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
if not self.index or not self.value:
return
index = self.index.get_data()
varray = self._trace_data
if varray.size == 0:
self._cached_data_pts = []
self._cached_valid = True
return
coordinates = self.yindex.get_data()
if self.fast_clip:
coord_min = float(coordinates[0])
coord_max = coordinates[-1]
slice_min = max(0,ceil((varray.shape[0]-1)*(self.value_range.low - coord_min)/(coord_max - coord_min)))
slice_max = min(varray.shape[0], 1+floor((varray.shape[0]-1)*(self.value_range.high - coord_min)/(coord_max - coord_min)))
varray = varray[slice_min:slice_max]
# FIXME: The y coordinates must also be sliced to match varray.
# Check to see if the data is completely outside the view region.
outside = False
# Check x coordinates.
low, high = self.index.get_bounds()
if low > self.index_range.high or high < self.index_range.low:
outside = True
# Check y coordinates. Use varray because it is nased on the yindex,
# but has been shifted up or down depending on the values.
ylow, yhigh = varray.min(), varray.max()
if ylow > self.value_range.high or yhigh < self.value_range.low:
outside = True
if outside:
self._cached_data_pts = []
self._cached_valid = True
return
if len(index) == 0 or varray.shape[0] == 0 or varray.shape[1] == 0 \
or len(index) != varray.shape[1]:
self._cached_data_pts = []
self._cache_valid = True
return
size_diff = varray.shape[1] - len(index)
if size_diff > 0:
warnings.warn('Chaco.LinePlot: value.shape[1] %d - len(index) %d = %d\n' \
% (varray.shape[1], len(index), size_diff))
index_max = len(index)
varray = varray[:,:index_max]
else:
index_max = varray.shape[1]
index = index[:index_max]
# Split the index and value raw data into non-NaN chunks.
# nan_mask is a boolean M by N array.
nan_mask = invert(isnan(varray)) & invert(isnan(index))
blocks_list = []
for nm in nan_mask:
blocks = [b for b in arg_find_runs(nm, "flat") if nm[b[0]] != 0]
blocks_list.append(blocks)
line_points = []
for k, blocks in enumerate(blocks_list):
points = []
for block in blocks:
start, end = block
block_index = index[start:end]
block_value = varray[k, start:end]
index_mask = self.index_mapper.range.mask_data(block_index)
runs = [r for r in arg_find_runs(index_mask, "flat") \
if index_mask[r[0]] != 0]
# Check to see if our data view region is between two points in the
# index data. If so, then we have to reverse map our current view
# into the appropriate index and draw the bracketing points.
if runs == []:
data_pt = self.map_data((self.x_mapper.low_pos, self.y_mapper.low_pos))
if self.index.sort_order == "none":
indices = argsort(index)
sorted_index = take(index, indices)
sorted_value = take(varray[k], indices)
sort = 1
else:
sorted_index = index
sorted_value = varray[k]
if self.index.sort_order == "ascending":
sort = 1
else:
sort = -1
ndx = bin_search(sorted_index, data_pt, sort)
if ndx == -1:
# bin_search can return -1 if data_pt is outside the bounds
# of the source data
continue
z = transpose(array((sorted_index[ndx:ndx+2],
sorted_value[ndx:ndx+2])))
points.append(z)
else:
# Expand the width of every group of points so we draw the lines
# up to their next point, outside the plot area
data_end = len(index_mask)
for run in runs:
start, end = run
if start != 0:
start -= 1
if end != data_end:
end += 1
run_data = transpose(array((block_index[start:end],
block_value[start:end])))
points.append(run_data)
line_points.append(points)
self._cached_data_pts = line_points
self._cache_valid = True
return
# See base_xy_plot.py for:
## def _downsample(self):
## def _downsample_vectorized(self):
def _render(self, gc, line_points, selected_points=None):
if len(line_points) == 0:
return
with gc:
gc.set_antialias(True)
gc.clip_to_rect(self.x, self.y, self.width, self.height)
render = self._render_normal
if selected_points is not None:
gc.set_stroke_color(self.selected_color_)
gc.set_line_width(self.line_width+10.0)
gc.set_line_dash(self.selected_line_style_)
render(gc, selected_points)
if self.color_func is not None:
# Existence of self.color_func overrides self.color.
color_func = self.color_func
else:
color_func = lambda k: self.color_
tmp = list(enumerate(line_points))
# Note: the list is reversed for testing with _render_filled.
for k, points in reversed(tmp):
color = color_func(k)
# Apply the alpha
alpha = color[-1] if len(color) == 4 else 1
color = color[:3] + (alpha * self.alpha,)
gc.set_stroke_color(color)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
render(gc, points)
# Draw the default axes, if necessary
self._draw_default_axes(gc)
def _render_normal(self, gc, points):
for ary in points:
if len(ary) > 0:
gc.begin_path()
gc.lines(ary)
gc.stroke_path()
return
def _render_icon(self, gc, x, y, width, height):
with gc:
gc.set_stroke_color(self.color_)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
gc.set_antialias(0)
gc.move_to(x, y+height/2)
gc.line_to(x+width, y+height/2)
gc.stroke_path()
def _alpha_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _color_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_style_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_width_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _amplitude_changed(self):
self.value.data_changed = True
self.invalidate_draw()
self.request_redraw()
return
def __getstate__(self):
state = super(MultiLinePlot,self).__getstate__()
for key in ['traits_view']:
if state.has_key(key):
del state[key]
return state
|
bsd-3-clause
| -2,342,800,567,750,205,400
| 34.031579
| 134
| 0.534796
| false
| 4.275437
| false
| false
| false
|
pylover/network-interfaces
|
network_interfaces/stanza.py
|
1
|
4473
|
# -*- coding: utf-8 -*-
import re
from .helpers import clean_list, list_hash
__author__ = 'vahid'
class Stanza(object):
_type = None
_filename = None
_headers = None
def __init__(self, filename, *headers):
self._filename = filename
self._headers = list(headers)
def __repr__(self):
return ' '.join(self._headers)
def _headers_hash(self):
result = 0
for h in self._headers:
result ^= h.__hash__()
return result
def __hash__(self):
return \
self._type.__hash__() ^ \
self._headers_hash()
@classmethod
def is_stanza(cls, s):
return re.match(r'^(iface|mapping|auto|allow-|source).*', s)
@classmethod
def subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in s.subclasses()]
@classmethod
def create(cls, header, filename):
cells = re.split('\s+', header)
cells = clean_list(cells)
stanza_type = cells[0]
subclasses = cls.subclasses()
# Checking for exact match
for subclass in subclasses:
if subclass._type and stanza_type == subclass._type:
return subclass(filename, *cells)
# Partial start match
for subclass in subclasses:
if subclass._type and stanza_type.startswith(subclass._type):
return subclass(filename, *cells)
def validate(self, allow_correction=False):
pass
class MultilineStanza(Stanza):
_items = None
def __init__(self, *args, **kwargs):
super(MultilineStanza, self).__init__(*args, **kwargs)
self._items = []
def __getattr__(self, item):
try:
return self[item]
except (KeyError, IndexError):
return super(MultilineStanza, self).__getattribute__(item)
#raise AttributeError('%s %s' % (object.__repr__(self), item))
def __setattr__(self, key, value):
if hasattr(self.__class__, key):
super(Stanza, self).__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, item):
if hasattr(self.__class__, item):
super(Stanza, self).__delattr__(item)
else:
del self[item]
def __contains__(self, item):
return self.__getitem_internal(item) is not None
def __getitem__(self, item):
if not isinstance(item, str):
raise TypeError(type(item))
result = self.__getitem_internal(item)
if not result:
raise KeyError(item)
return ' '.join(result[1:])
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError(type(key))
values = re.split('\s', value)
cells = self.__getitem_internal(key)
if not cells:
self.add_entry(' '.join([key] + values))
else:
del cells[1:]
cells += values
def __delitem__(self, item):
if not isinstance(item, str):
raise TypeError(type(item))
self.__delitem_internal(item)
def __repr__(self):
items = [(i[0], ' '.join(i[1:]).strip()) for i in self._items]
return '%s\n%s\n' % (
super(MultilineStanza, self).__repr__(),
'\n'.join([' %s %s' % (i[0], i[1]) for i in items if i[1]]))
def __hash__(self):
return super(MultilineStanza, self).__hash__() ^ self._items_hash()
def update(self, other):
if isinstance(other, dict):
for k, v in other.items():
self[k.replace('_', '-')] = v
else:
raise ValueError('A dict is required, but %s was passed.' % type(other))
def _items_hash(self):
result = 0
for i in self._items:
result ^= list_hash(i)
return result
def add_entry(self, l):
cells = re.split('\s+', l)
cells = clean_list(cells)
if cells and cells not in self._items:
self._items.append(cells)
def __getitem_internal(self, item):
key = item.replace('_', '-')
for i in self._items:
if i[0] == key:
return i
return None
def __delitem_internal(self, item):
key = item.replace('_', '-')
for i in self._items:
if i[0] == key:
self._items.remove(i)
return
|
gpl-3.0
| -2,740,219,951,952,730,600
| 27.673077
| 84
| 0.521127
| false
| 4.066364
| false
| false
| false
|
regardscitoyens/twitter-parlementaires
|
download_twitter.py
|
1
|
1413
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, json
from twitter import Twitter, OAuth
from twitterconfig import KEY, SECRET, OAUTH_TOKEN, OAUTH_SECRET
if len(sys.argv) < 3:
sys.stderr.write("Please input both Twitter list's owner_screen_name and slug\n")
exit(1)
LIST_USER, LIST_ID = sys.argv[1:3]
if not os.path.isdir(".cache"):
os.makedirs(".cache")
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, KEY, SECRET))
accounts = {}
page = 1
args = {
"owner_screen_name": LIST_USER,
"include_entities": "false",
"skip_status": "true",
"count": 5000,
"cursor": -1
}
try:
args["list_id"] = long(LIST_ID)
except:
args["slug"] = LIST_ID
while args["cursor"]:
res = t.lists.members(**args)
with open(os.path.join('.cache', 'twitter-%s-%s.json' % (LIST_USER, args["cursor"] if args["cursor"] != -1 else 0)), 'w') as f:
json.dump(res, f)
args["cursor"] = res.get('next_cursor', res.get('next_cursor_str', 0))
new = 0
for account in res['users']:
name = account['screen_name'].lower()
if name not in accounts:
accounts[name] = account
new += 1
print("[INFO/%s] page %s -> %s results including %s new ; new total: %s" % (LIST_ID, page, len(res['users']), new, len(accounts)))
page += 1
with open(os.path.join('.cache', 'twitter-%s.json' % LIST_ID), 'w') as f:
json.dump(accounts, f)
|
agpl-3.0
| -6,752,805,189,565,542,000
| 29.717391
| 134
| 0.602265
| false
| 3.025696
| false
| false
| false
|
senser/xmppBot
|
ZenPacks/community/xmppBot/Jabber/plugins/setjid.py
|
1
|
4107
|
"""Check if the sender is a valid zenoss admin. For access control"""
from Jabber.Plugins import Plugin
from Jabber.ZenAdapter import ZenAdapter
from Jabber.Options import Options
from optparse import OptionError
import transaction
class SetJid(Plugin):
name = 'mapuser'
capabilities = ['setjid', 'mapuser', 'help']
def call(self, args, sender, log, **kw):
log.debug('mapuser plugin running with %s' % args)
opts = self.options()
# parse the options
try:
(options, arguments) = opts.parse_args(args)
log.debug('Done parsing arguments. Options are "%s", arguments expanded to %s' % (options, arguments))
except OptionError, message:
return str(message)
if options.zenUser is None or options.jabberId is None:
return 'NO. -u and -j are both required.'
adapter = ZenAdapter()
jabberId = options.jabberId.lower()
haveUser = False
for user in adapter.userSettings():
if user.id.lower() == options.zenUser.lower():
haveUser = True
try:
currentId = user.getProperty('JabberId')
except AttributeError:
currentId = False
if currentId:
if options.jabberId == currentId.lower():
if options.force:
self.mapIds(jabberId, user)
return 'This user mapping already looks like this. Forced option was used, so I set it anyway.'
else:
return 'This user mapping already looks like this.'
if '/' in sender:
sender = sender.split('/')[0]
if currentId.lower() == sender.lower():
if options.force:
return 'This is your Zenoss user id, and the mapping is already set correctly. Changing it will prevent you from communicating with me. If you really want to change it, do so from the Zenoss interface or -f.'
else:
self.mapIds(jabberId, user)
return 'This is your Zenoss user id, and the mapping is already set correctly. However, the force option was used, so I set it anyway. Since this will probably break communication with me, you can change it back from the Zope interface.'
log.debug('Setting the jabberid mapping property to %s for zenuser %s' % (jabberId, user))
self.mapIds(jabberId, user)
break
if haveUser:
return 'JabberId for this user has been saved. Thanks.'
else:
return 'Sorry! I Could not find a Zenoss user by the name %s' % options.zenUser
def mapIds(self, jabberId, zenUser):
self.setPropertyIfNeeded(zenUser)
zenUser._updateProperty('JabberId', jabberId)
transaction.commit()
def setPropertyIfNeeded(self, zenUser):
if not zenUser.hasProperty('JabberId'):
zenUser.manage_addProperty('JabberId', '', 'string')
zenUser._setProperty('JabberId', '', 'string')
try:
zenUser.getProperty('JabberId')
except AttributeError:
zenUser.manage_addProperty('JabberId', '', 'string')
# unnecessary?
#zenUser._setProperty('JabberId', '', 'string')
def private(self):
return False
def options(self):
parser = Options(description = 'Acknowledge events by eventid', prog = 'ack')
parser.add_option('-u', '--user', dest='zenUser', help='Zenoss username (must already exist in zenoss).')
parser.add_option('-j', '--jid', dest='jabberId', help='JabberID to map to the zenoss user.')
parser.add_option('-f', '--force', dest='force', action='store_true', help='Force association even if it could disallow your own user. USE WITH CAUTION.')
return parser
def help(self):
opts = self.options()
return str(opts.help())
|
gpl-2.0
| -3,075,416,412,988,753,400
| 45.146067
| 267
| 0.582177
| false
| 4.359873
| false
| false
| false
|
daymer/xWIKI_Karma
|
CustomModules/mysql-connector-python-2.1.7/lib/cpy_distutils.py
|
1
|
24414
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the DistUtils command 'build_ext'
"""
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.errors import DistutilsExecError
from distutils.util import get_platform
from distutils.dir_util import copy_tree
from distutils import log
from glob import glob
import os
import shlex
import struct
from subprocess import Popen, PIPE, STDOUT, check_call
import sys
import platform
import shutil
ARCH_64BIT = sys.maxsize > 2**32 # Works with Python 2.6 and greater
py_arch = '64-bit' if ARCH_64BIT else '32-bit'
CEXT_OPTIONS = [
('with-mysql-capi=', None,
"Location of MySQL C API installation or path to mysql_config"),
('extra-compile-args=', None,
"Extra compile args"),
('extra-link-args=', None,
"Extra link args")
]
CEXT_STATIC_OPTIONS = [
('static', None,
"Link C libraries statically with the C Extension"),
]
INSTALL_OPTIONS = [
('byte-code-only=', None,
"Remove Python .py files; leave byte code .pyc only"),
]
def win_dll_is64bit(dll_file):
"""Check if a Windows DLL is 64 bit or not
Returns True if the library dll_file is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'nt':
raise OSError("win_ddl_is64bit only useful on Windows")
with open(dll_file, 'rb') as fp:
# IMAGE_DOS_HEADER
e_magic = fp.read(2)
if e_magic != b'MZ':
raise ValueError("Wrong magic in header")
fp.seek(60)
offset = struct.unpack("I", fp.read(4))[0]
# IMAGE_FILE_HEADER
fp.seek(offset)
file_header = fp.read(6)
(signature, machine) = struct.unpack("<4sH", file_header)
if machine == 0x014c: # IMAGE_FILE_MACHINE_I386
return False
elif machine in (0x8664, 0x2000): # IMAGE_FILE_MACHINE_I386/AMD64
return True
def unix_lib_is64bit(lib_file):
"""Check if a library on UNIX is 64 bit or not
This function uses the `file` command to check if a library on
UNIX-like platforms is 32 or 64 bit.
Returns True if the library is 64bit.
Raises ValueError when magic of header is invalid.
Raises IOError when file could not be read.
Raises OSError when execute on none-Windows platform.
Returns True or False.
"""
if os.name != 'posix':
raise OSError("unix_lib_is64bit only useful on UNIX-like systems")
if os.isdir(lib_file):
mysqlclient_libs = []
for root, _, files in os.walk(lib_file):
for filename in files:
filepath = os.path.join(root, filename)
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
if mysqlclient_libs:
break
# give priority to .so files instead of .a
mysqlclient_libs.sort()
lib_file = mysqlclient_libs[-1]
log.debug("# Using file command to test lib_file {0}".format(lib_file))
if platform.uname() == 'SunOS':
cmd_list = ['file', '-L', lib_file]
else:
cmd_list = ['file', '-L', lib_file]
prc = Popen(cmd_list, stdin=PIPE, stderr=STDOUT,
stdout=PIPE)
stdout = prc.communicate()[0]
stdout = stdout.split(':')[1]
log.debug("# lib_file {0} stdout: {1}".format(lib_file, stdout))
if 'x86_64' in stdout or 'x86-64' in stdout or '32-bit' not in stdout:
return True
return False
def parse_mysql_config_info(options, stdout):
log.debug("# stdout: {0}".format(stdout))
info = {}
for option, line in zip(options, stdout.split('\n')):
log.debug("# option: {0}".format(option))
log.debug("# line: {0}".format(line))
info[option] = line.strip()
ver = info['version']
if '-' in ver:
ver, _ = ver.split('-', 2)
info['version'] = tuple([int(v) for v in ver.split('.')[0:3]])
libs = shlex.split(info['libs'])
info['lib_dir'] = libs[0].replace('-L', '')
info['libs'] = [ lib.replace('-l', '') for lib in libs[1:] ]
if platform.uname()[0] == 'SunOS':
info['lib_dir'] = info['lib_dir'].replace('-R', '')
info['libs'] = [lib.replace('-R', '') for lib in info['libs']]
log.debug("# info['libs']: ")
for lib in info['libs']:
log.debug("# {0}".format(lib))
libs = shlex.split(info['libs_r'])
info['lib_r_dir'] = libs[0].replace('-L', '')
info['libs_r'] = [ lib.replace('-l', '') for lib in libs[1:] ]
info['include'] = [x.strip() for x in info['include'].split('-I')[1:]]
return info
def get_mysql_config_info(mysql_config):
"""Get MySQL information using mysql_config tool
Returns a dict.
"""
options = ['cflags', 'include', 'libs', 'libs_r', 'plugindir', 'version']
cmd = [mysql_config] + [ "--{0}".format(opt) for opt in options ]
try:
proc = Popen(cmd, stdout=PIPE, universal_newlines=True)
stdout, _ = proc.communicate()
except OSError as exc:
raise DistutilsExecError("Failed executing mysql_config: {0}".format(
str(exc)))
info = parse_mysql_config_info(options, stdout)
# Try to figure out the architecture
info['arch'] = None
if os.name == 'posix':
if platform.uname()[0] == 'SunOS':
print("info['lib_dir']: {0}".format(info['lib_dir']))
print("info['libs'][0]: {0}".format(info['libs'][0]))
pathname = os.path.abspath(os.path.join(info['lib_dir'],
'lib',
info['libs'][0])) + '/*'
else:
pathname = os.path.join(info['lib_dir'],
'lib' + info['libs'][0]) + '*'
print("# Looking mysqlclient_lib at path: {0}".format(pathname))
log.debug("# searching mysqlclient_lib at: %s", pathname)
libs = glob(pathname)
mysqlclient_libs = []
for filepath in libs:
_, filename = os.path.split(filepath)
log.debug("# filename {0}".format(filename))
if filename.startswith('libmysqlclient') and \
not os.path.islink(filepath) and \
'_r' not in filename and \
'.a' not in filename:
mysqlclient_libs.append(filepath)
mysqlclient_libs.sort()
stdout = None
try:
log.debug("# mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
for mysqlclient_lib in mysqlclient_libs:
log.debug("#+ {0}".format(mysqlclient_lib))
log.debug("# tested mysqlclient_lib[-1]: "
"{0}".format(mysqlclient_libs[-1]))
if platform.uname()[0] == 'SunOS':
print("mysqlclient_lib: {0}".format(mysqlclient_libs[-1]))
cmd_list = ['file', mysqlclient_libs[-1]]
else:
cmd_list = ['file', '-L', mysqlclient_libs[-1]]
proc = Popen(cmd_list, stdout=PIPE,
universal_newlines=True)
stdout, _ = proc.communicate()
stdout = stdout.split(':')[1]
except OSError as exc:
raise DistutilsExecError(
"Although the system seems POSIX, the file-command could not "
"be executed: {0}".format(str(exc)))
if stdout:
if '64' in stdout:
info['arch'] = "x86_64"
else:
info['arch'] = "i386"
else:
raise DistutilsExecError(
"Failed getting out put from the file-command"
)
else:
raise DistutilsExecError(
"Cannot determine architecture on {0} systems".format(os.name))
return info
def remove_cext(distribution):
"""Remove the C Extension from the distribution
This function can be useful in Distutils commands for creating
pure Python modules.
"""
to_remove = []
for ext_mod in distribution.ext_modules:
if ext_mod.name == '_mysql_connector':
to_remove.append(ext_mod)
for ext_mod in to_remove:
distribution.ext_modules.remove(ext_mod)
class BuildExtDynamic(build_ext):
"""Build Connector/Python C Extension"""
description = "build Connector/Python C Extension"
user_options = build_ext.user_options + CEXT_OPTIONS
min_connector_c_version = None
arch = None
_mysql_config_info = None
def initialize_options(self):
build_ext.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
def _finalize_connector_c(self, connc_loc):
"""Finalize the --with-connector-c command line argument
"""
platform = get_platform()
self._mysql_config_info = None
min_version = BuildExtDynamic.min_connector_c_version
err_invalid_loc = "MySQL C API location is invalid; was %s"
mysql_config = None
err_version = "MySQL C API {0}.{1}.{2} or later required".format(
*BuildExtDynamic.min_connector_c_version)
if not os.path.exists(connc_loc):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
if os.path.isdir(connc_loc):
# if directory, and no mysql_config is available, figure out the
# lib/ and include/ folders from the the filesystem
mysql_config = os.path.join(connc_loc, 'bin', 'mysql_config')
if os.path.isfile(mysql_config) and \
os.access(mysql_config, os.X_OK):
connc_loc = mysql_config
log.debug("# connc_loc: {0}".format(connc_loc))
else:
# Probably using MS Windows
myconfigh = os.path.join(connc_loc, 'include', 'my_config.h')
if not os.path.exists(myconfigh):
log.error("MySQL C API installation invalid "
"(my_config.h not found)")
sys.exit(1)
else:
with open(myconfigh, 'rb') as fp:
for line in fp.readlines():
if b'#define VERSION' in line:
version = tuple([
int(v) for v in
line.split()[2].replace(
b'"', b'').split(b'.')
])
if version < min_version:
log.error(err_version);
sys.exit(1)
break
# On Windows we check libmysql.dll
if os.name == 'nt':
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysql.dll')
connc_64bit = win_dll_is64bit(lib)
# On OSX we check libmysqlclient.dylib
elif 'macos' in platform:
lib = os.path.join(self.with_mysql_capi, 'lib',
'libmysqlclient.dylib')
connc_64bit = unix_lib_is64bit(lib)
# On other Unices we check libmysqlclient (follow symlinks)
elif os.name == 'posix':
connc_64bit = unix_lib_is64bit(connc_loc)
else:
raise OSError("Unsupported platform: %s" % os.name)
include_dirs = [os.path.join(connc_loc, 'include')]
if os.name == 'nt':
libraries = ['libmysql']
else:
libraries = ['-lmysqlclient']
library_dirs = os.path.join(connc_loc, 'lib')
log.debug("# connc_64bit: {0}".format(connc_64bit))
if connc_64bit:
self.arch = 'x86_64'
else:
self.arch = 'i386'
# We were given the location of the mysql_config tool (not on Windows)
if not os.name == 'nt' and os.path.isfile(connc_loc) \
and os.access(connc_loc, os.X_OK):
mysql_config = connc_loc
# Check mysql_config
myc_info = get_mysql_config_info(mysql_config)
log.debug("# myc_info: {0}".format(myc_info))
if myc_info['version'] < min_version:
log.error(err_version)
sys.exit(1)
include_dirs = myc_info['include']
libraries = myc_info['libs']
library_dirs = myc_info['lib_dir']
self._mysql_config_info = myc_info
self.arch = self._mysql_config_info['arch']
connc_64bit = self.arch == 'x86_64'
for include_dir in include_dirs:
if not os.path.exists(include_dir):
log.error(err_invalid_loc, connc_loc)
sys.exit(1)
# Set up the build_ext class
self.include_dirs.extend(include_dirs)
self.libraries.extend(libraries)
self.library_dirs.append(library_dirs)
# We try to offer a nice message when the architecture of Python
# is not the same as MySQL Connector/C binaries.
print("# self.arch: {0}".format(self.arch))
if ARCH_64BIT != connc_64bit:
log.error("Python is {0}, but does not "
"match MySQL C API {1} architecture, "
"type: {2}"
"".format(py_arch,
'64-bit' if connc_64bit else '32-bit',
self.arch))
sys.exit(1)
def finalize_options(self):
self.set_undefined_options(
'install',
('extra_compile_args', 'extra_compile_args'),
('extra_link_args', 'extra_link_args'),
('with_mysql_capi', 'with_mysql_capi'))
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def fix_compiler(self):
platform = get_platform()
cc = self.compiler
if not cc:
return
if 'macosx-10.9' in platform:
for needle in ['-mno-fused-madd']:
try:
cc.compiler.remove(needle)
cc.compiler_so.remove(needle)
except ValueError:
# We are removing, so OK when needle not there
pass
for name, args in cc.__dict__.items():
if not args or not isinstance(args, list):
continue
new_args = []
enum_args = enumerate(args)
for i, arg in enum_args:
if arg == '-arch':
# Skip not needed architecture
if args[i+1] != self.arch:
next(enum_args)
else:
new_args.append(arg)
else:
new_args.append(arg)
try:
cc.setattr(name, new_args)
except AttributeError:
# Old class
cc.__dict__[name] = new_args
# Add system headers to Extensions extra_compile_args
sysheaders = [ '-isystem' + dir for dir in cc.include_dirs]
for ext in self.extensions:
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
# Add system headers
for sysheader in sysheaders:
if sysheader not in ext.extra_compile_args:
ext.extra_compile_args.append(sysheader)
# Stop warnings about unknown pragma
if os.name != 'nt':
ext.extra_compile_args.append('-Wno-unknown-pragmas')
def run(self):
"""Run the command"""
if os.name == 'nt':
for ext in self.extensions:
# Use the multithread, static version of the run-time library
ext.extra_compile_args.append("/MT")
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
build_ext.run(self)
else:
self.real_build_extensions = self.build_extensions
self.build_extensions = lambda: None
build_ext.run(self)
self.fix_compiler()
self.real_build_extensions()
class BuildExtStatic(BuildExtDynamic):
"""Build and Link libraries statically with the C Extensions"""
user_options = build_ext.user_options + CEXT_OPTIONS
def finalize_options(self):
install_obj = self.distribution.get_command_obj('install')
install_obj.with_mysql_capi = self.with_mysql_capi
install_obj.extra_compile_args = self.extra_compile_args
install_obj.extra_link_args = self.extra_link_args
install_obj.static = True
options_pairs = []
if not self.extra_compile_args:
options_pairs.append(('extra_compile_args', 'extra_compile_args'))
if not self.extra_link_args:
options_pairs.append(('extra_link_args', 'extra_link_args'))
if not self.with_mysql_capi:
options_pairs.append(('with_mysql_capi', 'with_mysql_capi'))
if options_pairs:
self.set_undefined_options('install', *options_pairs)
build_ext.finalize_options(self)
print("# Python architecture: {0}".format(py_arch))
print("# Python ARCH_64BIT: {0}".format(ARCH_64BIT))
self.connc_lib = os.path.join(self.build_temp, 'connc', 'lib')
self.connc_include = os.path.join(self.build_temp, 'connc', 'include')
if self.with_mysql_capi:
self._finalize_connector_c(self.with_mysql_capi)
def _finalize_connector_c(self, connc_loc):
if not os.path.isdir(connc_loc):
log.error("MySQL C API should be a directory")
sys.exit(1)
log.info("Copying MySQL libraries")
copy_tree(os.path.join(connc_loc, 'lib'), self.connc_lib)
log.info("Copying MySQL header files")
copy_tree(os.path.join(connc_loc, 'include'), self.connc_include)
# Remove all but static libraries to force static linking
if os.name == 'posix':
log.info("Removing non-static MySQL libraries from %s" % self.connc_lib)
for lib_file in os.listdir(self.connc_lib):
lib_file_path = os.path.join(self.connc_lib, lib_file)
if os.path.isfile(lib_file_path) and not lib_file.endswith('.a'):
os.unlink(os.path.join(self.connc_lib, lib_file))
def fix_compiler(self):
BuildExtDynamic.fix_compiler(self)
include_dirs = []
library_dirs = []
libraries = []
if os.name == 'posix':
include_dirs.append(self.connc_include)
library_dirs.append(self.connc_lib)
if self.with_mysql_capi:
libraries.append("mysqlclient")
# As we statically link and the "libmysqlclient.a" library
# carry no information what it depends on, we need to
# manually add library dependencies here.
if platform.system() not in ["Darwin", "Windows"]:
libraries.append("rt")
for ext in self.extensions:
ext.include_dirs.extend(include_dirs)
ext.library_dirs.extend(library_dirs)
ext.libraries.extend(libraries)
# Add extra compile args
if self.extra_compile_args:
ext.extra_compile_args.extend(self.extra_compile_args.split())
# Add extra link args
if self.extra_link_args:
ext.extra_link_args.extend(self.extra_link_args.split())
class InstallLib(install_lib):
user_options = install_lib.user_options + CEXT_OPTIONS + INSTALL_OPTIONS
boolean_options = ['byte-code-only']
def initialize_options(self):
install_lib.initialize_options(self)
self.byte_code_only = None
def finalize_options(self):
install_lib.finalize_options(self)
self.set_undefined_options('install',
('byte_code_only', 'byte_code_only'))
self.set_undefined_options('build', ('build_base', 'build_dir'))
def run(self):
self.build()
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
if self.byte_code_only:
for source_file in outfiles:
if os.path.join('mysql', '__init__.py') in source_file:
continue
log.info("Removing %s", source_file)
os.remove(source_file)
class Install(install):
"""Install Connector/Python C Extension"""
description = "install MySQL Connector/Python"
user_options = install.user_options + CEXT_OPTIONS + INSTALL_OPTIONS + \
CEXT_STATIC_OPTIONS
boolean_options = ['byte-code-only', 'static']
need_ext = False
def initialize_options(self):
install.initialize_options(self)
self.extra_compile_args = None
self.extra_link_args = None
self.with_mysql_capi = None
self.byte_code_only = None
self.static = None
def finalize_options(self):
if self.static:
log.info("Linking C Extension statically with libraries")
self.distribution.cmdclass['build_ext'] = BuildExtStatic
if self.byte_code_only is None:
self.byte_code_only = False
build_ext_obj = self.distribution.get_command_obj('build_ext')
build_ext_obj.with_mysql_capi = self.with_mysql_capi
build_ext_obj.extra_compile_args = self.extra_compile_args
build_ext_obj.extra_link_args = self.extra_link_args
build_ext_obj.static = self.static
if self.with_mysql_capi:
self.need_ext = True
if not self.need_ext:
remove_cext(self.distribution)
install.finalize_options(self)
def run(self):
if not self.need_ext:
log.info("Not Installing MySQL C Extension")
else:
log.info("Installing MySQL C Extension")
install.run(self)
|
apache-2.0
| -3,967,664,341,499,863,600
| 35.712782
| 84
| 0.560908
| false
| 3.964599
| true
| false
| false
|
thorwhalen/ut
|
ml/skwrap/feature_extraction/dict_vectorizer.py
|
1
|
7588
|
__author__ = 'thor'
from sklearn.feature_extraction import DictVectorizer
from sklearn.externals import six
import numpy as np
from pandas import DataFrame
from collections import Counter
class IterDictVectorizer(DictVectorizer):
"""Transforms lists of feature-value mappings or rows of a dataframe to vectors.
It is like DictVectorizer (whose description was copied below), but:
(1) works with pandas DataFrame X input (rows become feature-value mappings dict)
(2) a minimum number of feature=value counts can be specified (by min_count)
(3) The fit is faster than with DictVectorizer (at least with DataFrame input)
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
min_count: positive float or int:
If min_count >= 1, min_count is the minimum number of feature=value count.
If min_count < 1, min_count represent the minimum proportion of the data that should have feature=value
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
>>> from ut.ml.skwrap.feature_extraction import IterDictVectorizer
>>> from pandas import DataFrame
>>> v = IterDictVectorizer(sparse=False)
>>> D = DataFrame([{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}])
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
self.min_count = min_count
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
feature_template = "{}" + self.separator + "{}"
if isinstance(X, DataFrame):
counts_of = dict()
for col, val in X.items():
counts_of[col] = Counter(val.dropna())
self.feature_counts_ = {}
_min_count = self.min_count
if self.min_count < 1:
_min_count *= len(X)
else:
_min_count = self.min_count
self.df_columns_ = set()
for k, v in counts_of.items():
for kk, vv in v.items():
if vv >= _min_count:
self.feature_counts_[feature_template.format(k, kk)] = vv
self.df_columns_.add(k)
feature_names = list(self.feature_counts_.keys())
else:
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = feature_template.format(f, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def transform(self, X, y=None):
if isinstance(X, DataFrame):
X = map(lambda x: x[1].dropna().to_dict(), X.iterrows())
return super(IterDictVectorizer, self).transform(X)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
class IterDictVectorizerWithText(object):
def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True, min_count=0,
text_vectorizers={}):
self.dict_vectorizer = IterDictVectorizer(
dtype=dtype, separator=separator, sparse=sparse, sort=sort, min_count=min_count
)
self.text_vectorizers = text_vectorizers
def fit(self, X, y=None):
# input validation
assert isinstance(X, DataFrame), "X must be a pandas DataFrame"
if not set(self.text_vectorizers.keys()).issubset(X.columns):
RuntimeError("The following columns were specified in text_vectorizers, but were not in X:\n" +
" {}".format(set(self.text_vectorizers.keys()).difference(X.columns)))
# carry out the normal IterDictVectorizer.fit() for columns not in text_vectorizers
self.dict_vectorizer_cols_ = set(X.columns).difference(list(self.text_vectorizers.keys()))
self.dict_vectorizer.fit(X[self.dict_vectorizer_cols_])
self.vocabulary_ = self.dict_vectorizer.vocabulary_
# use the CounterVectorizers of text_vectorizers to fit the specified string columns
for col in set(X.columns).intersection(list(self.text_vectorizers.keys())):
self.text_vectorizers[col].fit(X[col])
offset = len(self.vocabulary_)
self.vocabulary_ = dict(self.vocabulary_,
**{k : v + offset for k, v in self.text_vectorizers[col].items()})
self.feature_names_ = list(self.vocabulary_.keys())
def transform(self, X, y=None):
X1 = self.dict_vectorizer.transform(X[self.dict_vectorizer_cols_])
X2 = np.hstack((map(lambda col: self.text_vectorizers[col].transform(X[col]), list(self.text_vectorizers.keys()))))
return np.hstack((X1, X2))
|
mit
| 8,670,410,577,151,383,000
| 37.912821
| 123
| 0.599895
| false
| 4.031881
| false
| false
| false
|
pebble/spacel-provision
|
src/test/provision/orbit/test_provider.py
|
1
|
1145
|
from mock import MagicMock
from spacel.provision.orbit.provider import ProviderOrbitFactory
from test import BaseSpaceAppTest, ORBIT_REGION
TEST_PROVIDER = 'test'
class TestProviderOrbitFactory(BaseSpaceAppTest):
def setUp(self):
super(TestProviderOrbitFactory, self).setUp()
self.provider = MagicMock()
self.orbit_factory = ProviderOrbitFactory({
TEST_PROVIDER: self.provider
})
self.orbit.regions[ORBIT_REGION].provider = TEST_PROVIDER
def test_get_orbit(self):
self.orbit_factory.orbit(self.orbit)
self.provider.orbit.assert_called_once_with(self.orbit,
regions=[ORBIT_REGION])
def test_get_orbit_provider_not_found(self):
self.orbit.regions[ORBIT_REGION].provider = 'does-not-exist'
self.orbit_factory.orbit(self.orbit)
self.provider.orbit.assert_not_called()
def test_get(self):
orbit_factory = ProviderOrbitFactory.get(None, None, None, None, None,
None)
self.assertEqual(2, len(orbit_factory._providers))
|
mit
| 3,872,497,890,873,925,000
| 34.78125
| 78
| 0.638428
| false
| 4.03169
| true
| false
| false
|
tensorflow/addons
|
tensorflow_addons/image/dense_image_warp.py
|
1
|
10213
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using per-pixel flow vectors."""
import tensorflow as tf
from tensorflow_addons.utils import types
from typing import Optional
@tf.function
def interpolate_bilinear(
grid: types.TensorLike,
query_points: types.TensorLike,
indexing: str = "ij",
name: Optional[str] = None,
) -> tf.Tensor:
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape
`[batch, N, 2]`.
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
name: a name for the operation (optional).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the
inputs invalid.
"""
if indexing != "ij" and indexing != "xy":
raise ValueError("Indexing mode must be 'ij' or 'xy'")
with tf.name_scope(name or "interpolate_bilinear"):
grid = tf.convert_to_tensor(grid)
query_points = tf.convert_to_tensor(query_points)
# grid shape checks
grid_static_shape = grid.shape
grid_shape = tf.shape(grid)
if grid_static_shape.dims is not None:
if len(grid_static_shape) != 4:
raise ValueError("Grid must be 4D Tensor")
if grid_static_shape[1] is not None and grid_static_shape[1] < 2:
raise ValueError("Grid height must be at least 2.")
if grid_static_shape[2] is not None and grid_static_shape[2] < 2:
raise ValueError("Grid width must be at least 2.")
# query_points shape checks
query_static_shape = query_points.shape
query_shape = tf.shape(query_points)
if query_static_shape.dims is not None:
if len(query_static_shape) != 3:
raise ValueError("Query points must be 3 dimensional.")
query_hw = query_static_shape[2]
if query_hw is not None and query_hw != 2:
raise ValueError("Query points last dimension must be 2.")
batch_size, height, width, channels = (
grid_shape[0],
grid_shape[1],
grid_shape[2],
grid_shape[3],
)
num_queries = query_shape[1]
query_type = query_points.dtype
grid_type = grid.dtype
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == "ij" else [1, 0]
unstacked_query_points = tf.unstack(query_points, axis=2, num=2)
for i, dim in enumerate(index_order):
with tf.name_scope("dim-" + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = grid_shape[i + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = tf.cast(size_in_indexing_dimension - 2, query_type)
min_floor = tf.constant(0.0, dtype=query_type)
floor = tf.math.minimum(
tf.math.maximum(min_floor, tf.math.floor(queries)), max_floor
)
int_floor = tf.cast(floor, tf.dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = tf.cast(queries - floor, grid_type)
min_alpha = tf.constant(0.0, dtype=grid_type)
max_alpha = tf.constant(1.0, dtype=grid_type)
alpha = tf.math.minimum(tf.math.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = tf.expand_dims(alpha, 2)
alphas.append(alpha)
flattened_grid = tf.reshape(grid, [batch_size * height * width, channels])
batch_offsets = tf.reshape(
tf.range(batch_size) * height * width, [batch_size, 1]
)
# This wraps tf.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using tf.gather_nd.
def gather(y_coords, x_coords, name):
with tf.name_scope("gather-" + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = tf.gather(flattened_grid, linear_coordinates)
return tf.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], "top_left")
top_right = gather(floors[0], ceils[1], "top_right")
bottom_left = gather(ceils[0], floors[1], "bottom_left")
bottom_right = gather(ceils[0], ceils[1], "bottom_right")
# now, do the actual interpolation
with tf.name_scope("interpolate"):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def _get_dim(x, idx):
if x.shape.ndims is None:
return tf.shape(x)[idx]
return x.shape[idx] or tf.shape(x)[idx]
@tf.function
def dense_image_warp(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a
dense flow field of offset vectors that define the correspondences of
pixel values in the output image back to locations in the source image.
Specifically, the pixel value at `output[b, j, i, c]` is
`images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c]`.
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
`(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1])`. For locations outside
of the image, we use the nearest pixel values at the image boundary.
NOTE: The definition of the flow field above is different from that
of optical flow. This function expects the negative forward flow from
output image to source image. Given two images `I_1` and `I_2` and the
optical flow `F_12` from `I_1` to `I_2`, the image `I_1` can be
reconstructed by `I_1_rec = dense_image_warp(I_2, -F_12)`.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type `tf.half`, `tf.float32`, or
`tf.float64`, and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if `height < 2` or `width < 2` or the inputs have the wrong
number of dimensions.
"""
with tf.name_scope(name or "dense_image_warp"):
image = tf.convert_to_tensor(image)
flow = tf.convert_to_tensor(flow)
batch_size, height, width, channels = (
_get_dim(image, 0),
_get_dim(image, 1),
_get_dim(image, 2),
_get_dim(image, 3),
)
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = tf.meshgrid(tf.range(width), tf.range(height))
stacked_grid = tf.cast(tf.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = tf.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = tf.reshape(
query_points_on_grid, [batch_size, height * width, 2]
)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened)
interpolated = tf.reshape(interpolated, [batch_size, height, width, channels])
return interpolated
@tf.function(experimental_implements="addons:DenseImageWarp")
def dense_image_warp_annotated(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Similar to dense_image_warp but annotated with experimental_implements.
IMPORTANT: This is a temporary function and will be removed after TensorFlow's
next release.
This annotation make the serialized function detectable by the TFLite MLIR
converter and allow the converter to convert it to corresponding TFLite op.
However, with the annotation, this function cannot be used with backprop
under `tf.GradientTape` objects.
"""
return dense_image_warp(image, flow, name)
|
apache-2.0
| 6,193,261,979,639,195,000
| 41.202479
| 87
| 0.61784
| false
| 3.840918
| false
| false
| false
|
Lana-B/Pheno4T
|
madanalysis/layout/histogram_core.py
|
1
|
2479
|
################################################################################
#
# Copyright (C) 2012-2013 Eric Conte, Benjamin Fuks
# The MadAnalysis development team, email: <ma5team@iphc.cnrs.fr>
#
# This file is part of MadAnalysis 5.
# Official website: <https://launchpad.net/madanalysis5>
#
# MadAnalysis 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MadAnalysis 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MadAnalysis 5. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
import logging
from math import sqrt
class HistogramCore:
def __init__(self):
import numpy
self.integral = 0
self.nevents = 0
self.nentries = 0
self.sumwentries = 0
self.sumw = 0
self.sumw2 = 0
self.sumwx = 0
self.sumw2x = 0
self.underflow = 0
self.overflow = 0
self.array = numpy.array([])
def ComputeIntegral(self):
self.integral = 0
for i in range(0,len(self.array)):
self.integral+=self.array[i]
self.integral += self.overflow
self.integral += self.underflow
def Print(self):
logging.info('nevents='+str(self.nevents)+\
' entries='+str(self.entries))
logging.info('sumw='+str(self.sumw)+\
' sumw2='+str(self.sumw2)+\
' sumwx='+str(self.sumwx)+\
' sumw2x='+str(self.sumw2x))
logging.info('underflow='+str(self.underflow)+\
' overflow='+str(self.overflow))
def GetMean(self):
if self.sumw==0:
return 0.
else:
return self.sumwx / self.sumw
def GetRMS(self):
if self.sumw==0:
return 0.
else:
mean = self.GetMean()
return sqrt(abs(self.sumw2x/self.sumw - mean*mean))
|
gpl-3.0
| 4,464,193,754,576,342,000
| 27.494253
| 80
| 0.541751
| false
| 3.837461
| false
| false
| false
|
Pandaaaa906/ChemErpSystem
|
ERP/templatetags/erp_extras.py
|
1
|
1616
|
# -*- coding: utf-8 -*-
from django import template
from django.db.models.query import QuerySet
import datetime
from django import template
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
from django.db.models import Q
register = template.Library()
@register.inclusion_tag('sidebar_tree.html')
def children_tag(person):
if isinstance(person, QuerySet):
children = person
else:
children = person.children.all()
#zip(children,map(children.content_type,children)
return {'children': children}
@register.filter(name='has_group')
def has_group(user, group_name):
return user.groups.filter(name=group_name).exists()
@register.filter(name='get_staff')
def get_staff(group_name):
group = Group.objects.get(name=group_name)
users = group.user_set.all()
return map(lambda x:x.id , users)
@register.filter(name='get_nameFromId')
def get_nameFromId(usrId):
if usrId:
user = User.objects.get(id=usrId)
return user.first_name+user.last_name
else:
return ""
@register.filter
def get_range( value ):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
return range( value )
@register.filter
def get_fields(model):
return model._meta.get_fields()
|
apache-2.0
| 684,355,398,414,032,100
| 25.080645
| 66
| 0.667698
| false
| 3.467811
| false
| false
| false
|
rplevka/robottelo
|
tests/upgrades/test_repository.py
|
1
|
9545
|
"""Test for Repository related Upgrade Scenarios
:Requirement: Upgraded Satellite
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Repositories
:Assignee: tpapaioa
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import os
from fabric.api import execute
from fabric.api import run
from nailgun import entities
from upgrade.helpers.docker import docker_execute_command
from upgrade_tests import post_upgrade
from upgrade_tests import pre_upgrade
from upgrade_tests.helpers.scenarios import create_dict
from upgrade_tests.helpers.scenarios import dockerize
from upgrade_tests.helpers.scenarios import get_entity_data
from upgrade_tests.helpers.scenarios import rpm1
from upgrade_tests.helpers.scenarios import rpm2
from robottelo import ssh
from robottelo.api.utils import create_sync_custom_repo
from robottelo.api.utils import promote
from robottelo.config import settings
from robottelo.logging import logger
from robottelo.upgrade_utility import create_repo
from robottelo.upgrade_utility import host_location_update
from robottelo.upgrade_utility import install_or_update_package
from robottelo.upgrade_utility import publish_content_view
UPSTREAM_USERNAME = 'rTtest123'
DOCKER_VM = settings.upgrade.docker_vm
FILE_PATH = '/var/www/html/pub/custom_repo/'
CUSTOM_REPO = f'https://{settings.server.hostname}/pub/custom_repo'
_, RPM1_NAME = os.path.split(rpm1)
_, RPM2_NAME = os.path.split(rpm2)
class TestScenarioRepositoryUpstreamAuthorizationCheck:
"""This test scenario is to verify the upstream username in post-upgrade for a custom
repository which does have a upstream username but not password set on it in pre-upgrade.
Test Steps:
1. Before Satellite upgrade, Create a custom repository and sync it.
2. Set the upstream username on same repository using foreman-rake.
3. Upgrade Satellite.
4. Check if the upstream username value is removed for same repository.
"""
@pre_upgrade
def test_pre_repository_scenario_upstream_authorization(self):
"""Create a custom repository and set the upstream username on it.
:id: preupgrade-11c5ceee-bfe0-4ce9-8f7b-67a835baf522
:steps:
1. Create a custom repository and sync it.
2. Set the upstream username on same repository using foreman-rake.
:expectedresults:
1. Upstream username should be set on repository.
:BZ: 1641785
"""
org = entities.Organization().create()
custom_repo = create_sync_custom_repo(org_id=org.id)
rake_repo = f'repo = Katello::Repository.find_by_id({custom_repo})'
rake_username = f'; repo.root.upstream_username = "{UPSTREAM_USERNAME}"'
rake_repo_save = '; repo.save!(validate: false)'
result = run(f"echo '{rake_repo}{rake_username}{rake_repo_save}'|foreman-rake console")
assert 'true' in result
global_dict = {self.__class__.__name__: {'repo_id': custom_repo}}
create_dict(global_dict)
@post_upgrade(depend_on=test_pre_repository_scenario_upstream_authorization)
def test_post_repository_scenario_upstream_authorization(self):
"""Verify upstream username for pre-upgrade created repository.
:id: postupgrade-11c5ceee-bfe0-4ce9-8f7b-67a835baf522
:steps:
1. Verify upstream username for pre-upgrade created repository using
foreman-rake.
:expectedresults:
1. upstream username should not exists on same repository.
:BZ: 1641785
"""
repo_id = get_entity_data(self.__class__.__name__)['repo_id']
rake_repo = f'repo = Katello::RootRepository.find_by_id({repo_id})'
rake_username = '; repo.root.upstream_username'
result = run(f"echo '{rake_repo}{rake_username}'|foreman-rake console")
assert UPSTREAM_USERNAME not in result
class TestScenarioCustomRepoCheck:
"""Scenario test to verify if we can create a custom repository and consume it
via client then we alter the created custom repository and satellite will be able
to sync back the repo.
Test Steps:
1. Before Satellite upgrade.
2. Create new Organization and Location.
3. Create Product, custom repo, cv.
4. Create activation key and add subscription in it.
5. Create a content host, register and install package on it.
6. Upgrade Satellite.
7. Remove Old package and add new package into custom repo.
8. Sync repo, publish new version of cv.
9. Try to install new package on client.
BZ: 1429201,1698549
"""
@pre_upgrade
def test_pre_scenario_custom_repo_check(self):
"""This is pre-upgrade scenario test to verify if we can create a
custom repository and consume it via content host.
:id: preupgrade-eb6831b1-c5b6-4941-a325-994a09467478
:steps:
1. Before Satellite upgrade.
2. Create new Organization, Location.
3. Create Product, custom repo, cv.
4. Create activation key and add subscription.
5. Create a content host, register and install package on it.
:expectedresults:
1. Custom repo is created.
2. Package is installed on Content host.
"""
org = entities.Organization().create()
loc = entities.Location(organization=[org]).create()
lce = entities.LifecycleEnvironment(organization=org).create()
product = entities.Product(organization=org).create()
create_repo(rpm1, FILE_PATH)
repo = entities.Repository(product=product.id, url=CUSTOM_REPO).create()
repo.sync()
content_view = publish_content_view(org=org, repolist=repo)
promote(content_view.version[0], lce.id)
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/'
'Packages/b/|grep {}'.format(
org.label, lce.name, content_view.label, product.label, repo.label, RPM1_NAME
)
)
assert result.return_code == 0
assert len(result.stdout) >= 1
subscription = entities.Subscription(organization=org).search(
query={'search': f'name={product.name}'}
)[0]
ak = entities.ActivationKey(
content_view=content_view, organization=org.id, environment=lce
).create()
ak.add_subscriptions(data={'subscription_id': subscription.id})
rhel7_client = dockerize(ak_name=ak.name, distro='rhel7', org_label=org.label)
client_container_id = [value for value in rhel7_client.values()][0]
client_container_name = [key for key in rhel7_client.keys()][0]
host_location_update(
client_container_name=client_container_name, logger_obj=logger, loc=loc
)
status = execute(
docker_execute_command,
client_container_id,
'subscription-manager identity',
host=DOCKER_VM,
)[DOCKER_VM]
assert org.name in status
install_or_update_package(client_hostname=client_container_id, package=RPM1_NAME)
scenario_dict = {
self.__class__.__name__: {
'content_view_name': content_view.name,
'lce_id': lce.id,
'lce_name': lce.name,
'org_label': org.label,
'prod_label': product.label,
'rhel_client': rhel7_client,
'repo_name': repo.name,
}
}
create_dict(scenario_dict)
@post_upgrade(depend_on=test_pre_scenario_custom_repo_check)
def test_post_scenario_custom_repo_check(self):
"""This is post-upgrade scenario test to verify if we can alter the
created custom repository and satellite will be able to sync back
the repo.
:id: postupgrade-5c793577-e573-46a7-abbf-b6fd1f20b06e
:steps:
1. Remove old and add new package into custom repo.
2. Sync repo , publish the new version of cv.
3. Try to install new package on client.
:expectedresults: Content host should able to pull the new rpm.
"""
entity_data = get_entity_data(self.__class__.__name__)
client = entity_data.get('rhel_client')
client_container_id = list(client.values())[0]
content_view_name = entity_data.get('content_view_name')
lce_id = entity_data.get('lce_id')
lce_name = entity_data.get('lce_name')
org_label = entity_data.get('org_label')
prod_label = entity_data.get('prod_label')
repo_name = entity_data.get('repo_name')
create_repo(rpm2, FILE_PATH, post_upgrade=True, other_rpm=rpm1)
repo = entities.Repository(name=repo_name).search()[0]
repo.sync()
content_view = entities.ContentView(name=content_view_name).search()[0]
content_view.publish()
content_view = entities.ContentView(name=content_view_name).search()[0]
promote(content_view.version[-1], lce_id)
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/'
'Packages/c/| grep {}'.format(
org_label, lce_name, content_view.label, prod_label, repo.label, RPM2_NAME
)
)
assert result.return_code == 0
assert len(result.stdout) >= 1
install_or_update_package(client_hostname=client_container_id, package=RPM2_NAME)
|
gpl-3.0
| 6,445,310,977,398,865,000
| 35.996124
| 95
| 0.651545
| false
| 3.811901
| true
| false
| false
|
YevgeniyaK/python_training
|
fixture/group.py
|
1
|
4957
|
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def return_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
'''
Форма создания новой группы
'''
def create(self, group):
wd = self.app.wd
self.open_group_page()
# init group greation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_group_page()
self.group_cache = None
'''
Удаление первой группы
'''
def delete_first_group(self):
self.delete_group_by_index(0)
'''
Удаление рандомной группы
'''
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
#submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация группы
'''
def change_group(self, group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("edit").click()
self.fill_group_form(group)
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
Модификация первой группы
'''
def modify_first_group(self):
self.modify_group_by_index(0)
'''
Модификация рандомной группы
'''
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
'''
fill group form
'''
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
'''
отдельный метод выбора первой группы
'''
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
'''
метод выбора рандомной группы
'''
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
'''
проверка на существование групп
'''
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
'''
Получение списка групп
'''
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name = text.strip(), id = id))
return list(self.group_cache)
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_group_page()
self.group_cache = None
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_group_page()
self.select_group_by_id(id)
# modification
wd.find_element_by_name("edit").click()
# fill
self.fill_group_form(new_group_data)
# submit
wd.find_element_by_name("update").click()
self.return_to_group_page()
self.group_cache = None
|
apache-2.0
| -8,982,111,332,978,443,000
| 26.114943
| 100
| 0.569097
| false
| 3.128647
| false
| false
| false
|
patcorwin/fossil
|
pdil/tool/fossil/rigging/splineTwist.py
|
1
|
24347
|
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import math
from pymel.core import curve, cluster, delete, dt, duplicate, expression, group, hide, ikHandle, insertKnotCurve, joint, move, orientConstraint, parent, parentConstraint, pointConstraint, xform
from ....add import simpleName, shortName
from .... import core
from .... import nodeApi
from .. import controllerShape
from .. import space
from ..cardRigging import MetaControl, ParamInfo
from . import _util as util
from .. import node
class OrientMode:
CLOSEST_JOINT = 'closest_joint'
WORLD = 'world'
AS_FIRST_JOINT = 'as_first_joint'
class TwistStyle:
'''
Used by splineIk. Advanced uses advanced twist while the others determin
which rotation axis drives the twist attribute.
'''
ADVANCED = 'Advanced'
X = 'X'
NEG_X = '-X'
Y = 'Y'
NEG_Y = '-Y'
Z = 'Z'
NEG_Z = '-Z'
@classmethod
def asChoices(cls):
choices = OrderedDict()
choices[cls.ADVANCED] = cls.ADVANCED
choices[cls.X] = cls.X
choices[cls.NEG_X] = cls.NEG_X
choices[cls.Y] = cls.Y
choices[cls.NEG_Y] = cls.NEG_Y
choices[cls.Z] = cls.Z
choices[cls.NEG_Z] = cls.NEG_Z
return choices
@util.adds('twist', 'stretch')
@util.defaultspec( {'shape': 'sphere', 'size': 10, 'color': 'blue 0.22'} )
def buildSplineTwist(start, end, controlCountOrCrv=4, twistInfDist=0, simplifyCurve=True,
tipBend=True, sourceBend=True, matchOrient=True, allowOffset=True, # noqa e128
useLeadOrient=False, # This is an backwards compatible option, mutually exclusive with matchOrient
twistStyle=TwistStyle.ADVANCED, duplicateCurve=True,
controlOrient=OrientMode.CLOSEST_JOINT,
name='', groupName='', controlSpec={}):
'''
Make a spline controller from `start` to `end`.
:param int twistInfDist: Default twist controls to falloff before hitting eachother.
Otherwise it is the number of joints on either side it will influence.
:param bool simplifyCurve: Only used if # of cvs is specified. Turning it
on will likely result it the curve not matching the existing joint position
but will be more evenly spaced per control.
:param bool tipBend: If True, an extra cv will be added at the second to
last joint, controlled by the last controller to ease out.
##:param bool applyDirectly: If True, rig the given joints, do not make a duplicate chain
:param bool useLeadOrient: If True, the controllers will be aligned the same
as the first joint.
**NOTE** I think this option only exists to preserve previous builds, this is pretty dumb
:param bool matchOrient: Does trueZero on the start and end. I'm not sure this makes sense.
.. todo::
* Add the same spline chain +X towards child that the neck has and test out advancedTwist()
* See if I can identify the closest joint to a control and orient to that
* The first joint has parent AND local, which are the same thing, keep this for convenience of selecting all the controls and editing attrs?
* Test specifying your own curve
* There is a float division error that can happen if there are too many control cvs.
* Verify twists work right with unsimplified curves (hint, I don't think they do).
'''
matchOrient = False
useLeadOrient = False
if isinstance( controlCountOrCrv, int ):
assert controlCountOrCrv > 3, "controlCount must be at least 4"
# The axis to twist and stretch on.
jointAxis = util.identifyAxis( start.listRelatives(type='joint')[0] )
# Make a duplicate chain for the IK that will also stretch.
stretchingChain = util.dupChain( start, end, '{0}_stretch' )
# &&& NOTE! This might affect advanced twist in some way.
# If the chain is mirrored, we need to reorient to point down x so the
# spline doesn't mess up when the main control rotates
if stretchingChain[1].tx.get() < 0:
# Despite aggresive zeroing of the source, the dup can still end up slightly
# off zero so force it.
for jnt in stretchingChain:
jnt.r.set(0, 0, 0)
joint( stretchingChain[0], e=True, oj='xyz', secondaryAxisOrient='yup', zso=True, ch=True)
joint( stretchingChain[-1], e=True, oj='none')
if isinstance( controlCountOrCrv, int ):
mainIk, _effector, crv = ikHandle( sol='ikSplineSolver',
sj=stretchingChain[0],
ee=stretchingChain[-1],
ns=controlCountOrCrv - 3,
simplifyCurve=simplifyCurve)
else:
if duplicateCurve:
crv = duplicate(controlCountOrCrv)[0]
else:
crv = controlCountOrCrv
mainIk, _effector = ikHandle( sol='ikSplineSolver',
sj=stretchingChain[0],
ee=stretchingChain[-1],
ccv=False,
pcv=False)
crv.getShape().worldSpace[0] >> mainIk.inCurve
hide(mainIk)
mainIk.rename( simpleName(start, "{0}_ikHandle") )
crv.rename( simpleName(start, "{0}_curve") )
if not name:
name = util.trimName(start)
if name.count(' '):
name, endName = name.split()
else:
endName = ''
# Only add a tipBend cv if number of cvs was specified.
if tipBend and isinstance( controlCountOrCrv, int ):
currentTrans = [ xform(cv, q=True, ws=True, t=True) for cv in crv.cv ]
insertKnotCurve( crv.u[1], nk=1, add=False, ib=False, rpo=True, cos=True, ch=True)
for pos, cv in zip(currentTrans, crv.cv[:-2]):
xform( cv, ws=True, t=pos )
xform( crv.cv[-2], ws=True, t=xform(end.getParent(), q=True, ws=True, t=True) )
xform( crv.cv[-1], ws=True, t=currentTrans[-1] )
# Only add a sourceBend cv if number of cvs was specified.
if sourceBend and isinstance( controlCountOrCrv, int ):
currentTrans = [ xform(cv, q=True, ws=True, t=True) for cv in crv.cv ]
insertKnotCurve( crv.u[1.2], nk=1, add=False, ib=False, rpo=True, cos=True, ch=True) # I honestly don't know why, but 1.2 must be different than 1.0
for pos, cv in zip(currentTrans[1:], crv.cv[2:]):
xform( cv, ws=True, t=pos )
xform( crv.cv[0], ws=True, t=currentTrans[0] )
xform( crv.cv[1], ws=True, t=xform(stretchingChain[1], q=True, ws=True, t=True) )
grp = group(em=True, p=node.mainGroup(), n=start.name() + "_splineTwist")
controls = util.addControlsToCurve(name + 'Ctrl', crv, controlSpec['main'])
for ctrl in controls:
core.dagObj.zero(ctrl).setParent( grp )
if controlOrient == OrientMode.CLOSEST_JOINT:
# Use the real chain to match orientations since the stretching chain might reorient to compensate for mirroring.
jointPos = {j: dt.Vector(xform(j, q=True, ws=True, t=True)) for j in util.getChain(start, end)}
aveSpacing = util.chainLength(stretchingChain) / (len(stretchingChain) - 1)
for ctrl in controls:
cpos = dt.Vector(xform(ctrl, q=True, ws=True, t=True))
distances = [ ( (jpos - cpos).length() / aveSpacing, j) for j, jpos in jointPos.items() ]
distances.sort()
''' Just use the closest joint if within 10% of the average spacing
Possible future improvement, look at two joints, and determine if
the control is between them and inbetween the orientation.
'''
if True: # distances[0][0] < 100:
r = xform(distances[0][1], q=True, ro=True, ws=True)
with core.dagObj.Solo(ctrl):
xform(ctrl, ro=r, ws=True)
core.dagObj.zero(ctrl)
"""
# Otherwise split the distances by the percentages
else:
#m1 = xform(distances[0][1], q=True, m=True, ws=True)
#m2 = xform(distances[1][1], q=True, m=True, ws=True)
distA, jointA = distances[0]
distB, jointB = distances[1]
x, y, z = midOrient2(jointA, jointB)
matrix = list(x) + [0] + list(y) + [0] + list(z) + [0] + xform(ctrl, q=True, ws=True, t=True) + [1.0]
print( ctrl, 'to', jointA, jointB )
with Solo(ctrl):
xform(ctrl, ws=True, m=matrix)
# Need to improve my matrix skills, for now it's easy enough to just rotate it
#rotate(ctrl, [0, 180, 0], os=1, r=1)
core.dagObj.zero(ctrl)
"""
if endName:
controls[-1].rename(endName + 'Ctrl')
if matchOrient:
util.trueZeroSetup(start, controls[0])
util.trueZeroSetup(end, controls[-1])
if tipBend:
if useLeadOrient and not matchOrient:
controls[-1].setRotation( end.getRotation(space='world'), space='world' )
parent( controls[-2].getChildren(), controls[-1] )
name = controls[-2].name()
delete( core.dagObj.zero(controls[-2]) )
if not endName:
controls[-1].rename(name)
controls[-2] = controls[-1]
controls.pop()
#core.dagObj.zero(controls[-2]).setParent(controls[-1])
#channels = [t + a for t in 'trs' for a in 'xyz']
#for channel in channels:
# controls[-2].attr( channel ).setKeyable(False)
# controls[-2].attr( channel ).lock()
if sourceBend:
names = []
for ctrl in controls[1:-1]:
names.append( ctrl.name() )
ctrl.rename( '__temp' )
endNum = -1 if endName else None
for name, cur in zip(names, controls[2:endNum] ):
cur.rename(name)
if useLeadOrient and not matchOrient:
controls[0].setRotation( start.getRotation(space='world'), space='world' )
parent( controls[1].getChildren(), controls[0] )
delete( core.dagObj.zero(controls[1]) )
del controls[1]
controls[0] = nodeApi.RigController.convert(controls[0])
controls[0].container = grp
stretchAttr, jointLenMultiplier = util.makeStretchySpline(controls[0], mainIk)
connectingCurve = addConnectingCurve(controls)
controls[0].visibility >> connectingCurve.visibility
# Make twist for everything but hide them all and drive the ones that overlap
# with spline controllers by the spline control.
if not twistInfDist:
numJoints = countJoints(start, end)
twistInfDist = int(math.ceil( numJoints - len(controls) ) / float(len(controls) - 1))
twistInfDist = max(1, twistInfDist)
noInherit = group(em=True, p=grp, n='NoInheritTransform')
core.dagObj.lockTrans(noInherit)
core.dagObj.lockRot(noInherit)
core.dagObj.lockScale(noInherit)
noInherit.inheritsTransform.set(False)
noInherit.inheritsTransform.lock()
# &&& If simplify curve is ON, the last joint gets constrained to the spinner?
# Otherwise it gets constrained to the offset or stretch joint, which I think is correct.
if allowOffset:
# If allowOffset, make another chain to handle the difference in joint positions.
offsetChain = util.dupChain( start, end, '{0}_offset' )
offsetChain[0].setParent(noInherit)
hide(offsetChain[0])
twists, constraints = addTwistControls( offsetChain, start, end, twistInfDist)
finalRigJoint = offsetChain[-1]
else:
twists, constraints = addTwistControls( stretchingChain, start, end, twistInfDist )
finalRigJoint = stretchingChain[-1]
# Constrain the end to the last controller so it doesn't pop off at all,
# but still respect the stretch attr.
pointConstraint(finalRigJoint, end, e=True, rm=True)
# Make a proxy that can allows respecting stretch being active or not.
endProxy = duplicate(end, po=True)[0]
endProxy.rename('endProxy')
hide(endProxy)
endProxy.setParent(grp)
stretchAttr >> core.constraints.pointConst( controls[-1], endProxy, mo=True )
core.math.opposite(stretchAttr) >> core.constraints.pointConst( finalRigJoint, endProxy )
constraints.point >> core.constraints.pointConst( endProxy, end )
hide(twists)
numControls = len(controls)
numTwists = len(twists)
for i, ctrl in enumerate(controls):
index = int(round( i * ((numTwists - 1) / (numControls - 1)) ))
util.drive( ctrl, 'twist', twists[index].attr('r' + jointAxis) )
space.add( ctrl, start.getParent(), 'local' )
parents = [start.getParent()] + controls[:-1]
stretchingChain[0].setParent(noInherit)
crv.setParent(noInherit)
hide(crv, stretchingChain[0])
connectingCurve.setParent( noInherit )
mainIk.setParent(grp)
# Do not want to scale but let rotate for "fk-like" space mode
for ctrl, _parent in zip(controls, parents):
core.dagObj.lockScale( ctrl )
if useLeadOrient:
ctrl.setRotation( start.getRotation(space='world'), space='world' )
core.dagObj.zero(ctrl)
space.addMain(ctrl)
space.add( ctrl, _parent, 'parent')
for i, ctrl in enumerate(controls[1:]):
controls[0].subControl[str(i)] = ctrl
# Must constrain AFTER controls (possibly) get orientd
orientConstraint( controls[-1], finalRigJoint, mo=True )
# Setup advanced twist
if twistStyle == TwistStyle.ADVANCED:
# &&& Test using advancedTwist() to replace the code beloew
util.advancedTwist(stretchingChain[0], stretchingChain[1], controls[0], controls[-1], mainIk)
'''
startAxis = duplicate( start, po=True )[0]
startAxis.rename( 'startAxis' )
startAxis.setParent( controls[0] )
endAxis = duplicate( start, po=True )[0]
endAxis.rename( 'endAxis' )
endAxis.setParent( controls[-1] )
endAxis.t.set(0, 0, 0)
mainIk.dTwistControlEnable.set(1)
mainIk.dWorldUpType.set(4)
startAxis.worldMatrix[0] >> mainIk.dWorldUpMatrix
endAxis.worldMatrix[0] >> mainIk.dWorldUpMatrixEnd
hide(startAxis, endAxis)
'''
else:
if twistStyle == TwistStyle.X:
controls[-1].rx >> mainIk.twist
elif twistStyle == TwistStyle.NEG_X:
core.math.multiply(controls[-1].rx, -1.0) >> mainIk.twist
elif twistStyle == TwistStyle.Y:
controls[-1].ry >> mainIk.twist
elif twistStyle == TwistStyle.NEG_Y:
core.math.multiply(controls[-1].ry, -1.0) >> mainIk.twist
elif twistStyle == TwistStyle.Z:
controls[-1].rz >> mainIk.twist
elif twistStyle == TwistStyle.NEG_Z:
core.math.multiply(controls[-1].rz, -1.0) >> mainIk.twist
# To make .twist work, the chain needs to follow parent joint
follow = group(em=True, p=grp)
target = start.getParent()
core.dagObj.matchTo(follow, stretchingChain[0])
parentConstraint( target, follow, mo=1 )
follow.rename(target + '_follow')
stretchingChain[0].setParent(follow)
# Constraint the offset (if exists) to the stretch last to account for any adjustments.
if allowOffset:
util.constrainAtoB(offsetChain[:-1], stretchingChain[:-1])
pointConstraint(stretchingChain[-1], offsetChain[-1], mo=True)
return controls[0], constraints
def addTwistControls(controlChain, boundChain, boundEnd, influenceDist=3):
'''
Put a rotation controller under each child of the controlChain to drive .rz
of the boundChain. They must both be the same size.
:param Joint controlChain: The first joint of the controlling rig (ideally pruned)
:param Joint boundChain: The first joint of joints being controlled by the spline.
:param Joint boundEnd: The last joint in the bound chain, used to address possible branching.
:param int influenceDist: How many adjacent joints are influenced (total #
is 2x since it influences both directions).
'''
obj = controlChain[0]
target = boundChain
#controlJoints = getChain( controlChain, findChild(controlChain, shortName(boundEnd)) )
controlJoints = controlChain
boundJoints = util.getChain( boundChain, util.findChild(boundChain, shortName(boundEnd)) )
assert len(controlJoints) == len(boundJoints), "Failure when adding twist controls, somehow the chains don't match length, contorls {0} != {1}".format( len(controlJoints), len(boundJoints) )
controls = []
groups = []
pointConstraints = []
orientConstraints = []
for i, (obj, target) in enumerate(zip(controlJoints, boundJoints)):
c = controllerShape.simpleCircle()
c.setParent(obj)
c.t.set(0, 0, 0)
c.r.set(0, 0, 0)
controls.append(c)
spinner = group(em=True, name='spinner%i' % i, p=target)
spinner.r.set(0, 0, 0)
spinner.setParent(obj)
spinner.t.set(0, 0, 0)
# Aligning the spinners to the bound joint means we don't have to offset
# the orientConstraint which means nicer numbers.
# spinner.setRotation( target.getRotation(space='world'), space='world' )
groups.append(spinner)
pointConstraints.append( core.constraints.pointConst( obj, target, maintainOffset=False ) )
orientConstraints.append( core.constraints.orientConst( spinner, target, maintainOffset=False ) )
children = obj.listRelatives(type='joint')
if children:
obj = children[0]
else:
obj = None
break
for pSrc, pDest in zip( pointConstraints[:-1], pointConstraints[1:]):
pSrc >> pDest
for oSrc, oDest in zip( orientConstraints[:-1], orientConstraints[1:]):
oSrc >> oDest
# &&& This and the i+7 reflect the number of controls that influence
bigList = [None] * influenceDist + controls + [None] * influenceDist
influenceRange = (influenceDist * 2) + 1
axis = util.identifyAxis(controlChain[0].listRelatives(type='joint')[0])
exp = []
for i, spinner in enumerate(groups):
exp.append(driverExpression( spinner, bigList[i: i + influenceRange], axis ))
expression( s=';\n'.join(exp) )
return controls, util.ConstraintResults( pointConstraints[0], orientConstraints[0] )
class SplineTwist(MetaControl):
''' Spline IK that provides control to twist individual sections. '''
ik_ = 'pdil.tool.fossil.rigging.splineTwist.buildSplineTwist'
ikInput = OrderedDict( [
('controlCountOrCrv', [
ParamInfo( 'CV count', 'How many cvs to use in auto generated curve', ParamInfo.INT, default=4, min=4 ),
ParamInfo( 'Curve', 'A nurbs curve to use for spline', ParamInfo.NODE_0 ),
] ),
('simplifyCurve',
ParamInfo( 'Simplify Curve', 'If True, the curve cvs will space out evenly, possibly altering the postions', ParamInfo.BOOL, default=True) ),
('twistInfDist',
ParamInfo( 'Twist influence', 'How many joints on one side are influenced by the twisting, zero means it is done automatically.', ParamInfo.INT, default=0, min=0) ),
('tipBend',
ParamInfo( 'Tip Bend', 'The tip control should influence the ease out bend', ParamInfo.BOOL, default=True) ),
('sourceBend',
ParamInfo( 'Source Bend', 'The source control should influence the ease in bend', ParamInfo.BOOL, default=True) ),
('matchOrient',
ParamInfo( 'Match Orient', "First and last controller are set to TrueZero'd", ParamInfo.BOOL, default=True) ),
('useLeadOrient',
ParamInfo( 'Lead Orient', 'The controls have the same orientation as the first joint', ParamInfo.BOOL, default=False) ),
('allowOffset',
ParamInfo( 'Allow Offset', 'If you Simplyify Curve, the joints will slightly shift unless you Allow Offset or the joints are straight', ParamInfo.BOOL, default=False) ),
('twistStyle',
ParamInfo( 'Twist Style', '0 = advanced, 1=x, 2=-x 3=y ...', ParamInfo.ENUM, enum=TwistStyle.asChoices(), default=TwistStyle.ADVANCED ) ),
('name',
ParamInfo( 'Name', 'Name', ParamInfo.STR, '')),
] )
fkArgs = {'translatable': True}
@classmethod
def readIkKwargs(cls, card, isMirroredSide, sideAlteration=lambda **kwargs: kwargs, kinematicType='ik'):
'''
Overriden to handle if a custom curve was given, which then needs to be duplicated, mirrored and
fed directly into the splineTwist.
'''
kwargs = cls.readKwargs(card, isMirroredSide, sideAlteration, kinematicType='ik')
if isMirroredSide:
if 'controlCountOrCrv' in kwargs and not isinstance( kwargs['controlCountOrCrv'], int ):
crv = kwargs['controlCountOrCrv']
crv = duplicate(crv)[0]
kwargs['controlCountOrCrv'] = crv
move( crv.sp, [0, 0, 0], a=True )
move( crv.rp, [0, 0, 0], a=True )
crv.sx.set(-1)
kwargs['duplicateCurve'] = False
return kwargs
def addConnectingCurve(objs):
'''
Given a list of objects, make a curve that links all of them.
'''
crv = curve( d=1, p=[(0, 0, 0)] * len(objs) )
grp = group(crv, n='connectingCurve')
for i, obj in enumerate(objs):
handle = cluster(crv.cv[i])[1]
pointConstraint( obj, handle )
handle.setParent( grp )
hide(handle)
crv.getShape().overrideEnabled.set( 1 )
crv.getShape().overrideDisplayType.set( 2 )
return grp
def countJoints(start, end):
count = 2
p = end.getParent()
while p and p != start:
p = p.getParent()
count += 1
if not p:
return 0
return count
def driverExpression( driven, controls, axis ):
'''
The `driven` node's .rz will be driven by the list of `controls`.
`controls` is a list of objects, and optional empty entries.
Example, if you have joints, A B C and controls X Y Z, you would do:
driverExpression( A, [None, X, Y] )
driverExpression( B, [X, Y, Z] )
driverExpression( C, [Y, Z, None] )
This means A will be fully influenced by X, and partially by Y.
B is fully influenced by Y and partially by X and Z.
'''
powers = calcInfluence(controls)
exp = []
for power, ctrl in zip(powers, controls):
if ctrl:
exp.append( '{0}.r{axis} * {1}'.format(ctrl, power, axis=axis) )
return '{0}.r{axis} = {1};'.format( driven, ' + '.join(exp), axis=axis )
def calcInfluence( controls ):
'''
Given a list (Maybe change to a number?) returns a list of power falloffs.
controls can have None placeholders
power falls off to end of controls
low upper
v v
0 1 2 3 4
# Result: [0.5, 0.75, 1.0, 0.75, 0.5]
low upper
v v
0 1 2 3 4 5
# Result: [0.5, 0.75, 1.0, 1.0, 0.75, 0.5]
'''
max = len(controls)
if len(controls) % 2 == 0:
upper = int(len(controls) / 2 + 1)
lower = upper - 2
else:
upper = int(len(controls) / 2 + 1)
lower = upper - 1
delta = 1 / float(lower) * 0.5
powers = [1.0] * len(controls)
#for i, (lowCtrl, upCtrl) in enumerate(zip(controls[upper:], reversed(controls[:lower]) ), 1):
for i, (lowCtrl, upCtrl) in enumerate(zip(range(upper, max), range( lower - 1, -1, -1 ) ), 1):
power = 1 - delta * i
powers[lowCtrl] = power
powers[upCtrl] = power
return powers
|
bsd-3-clause
| -3,334,268,302,058,666,000
| 37.341732
| 194
| 0.60759
| false
| 3.658452
| false
| false
| false
|
t00mas/datascience-python
|
classification/knearest.py
|
1
|
1554
|
import matplotlib
import matplotlib.pyplot as pyplot
import numpy
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
def get_iris_dataset():
iris = datasets.load_iris()
return iris.data[:, :2], iris.target
def get_knn_classifier(X, y, n_neighbors=None):
if not n_neighbors:
n_neighbors = 6
classifier = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')
classifier.fit(X, y)
return classifier, n_neighbors
def get_meshgrid(X, y, h=None):
if not h:
h = .02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
return numpy.meshgrid(
numpy.arange(x_min, x_max, h), numpy.arange(y_min, y_max, h))
def predict(classifier, mesh_xx, mesh_yy):
Z = classifier.predict(numpy.c_[mesh_xx.ravel(), mesh_yy.ravel()])
return Z.reshape(mesh_xx.shape)
def plot_classified_regions(X, y, classifier, n_neighbors):
xx, yy = get_meshgrid(X, y)
Z = predict(classifier, xx, yy)
pyplot.figure()
pyplot.pcolormesh(xx, yy, Z)
# Plot also the training points
cmap = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
pyplot.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, alpha=0.8)
pyplot.xlim(xx.min(), xx.max())
pyplot.ylim(yy.min(), yy.max())
pyplot.title("3-Class classification (k = %i)" % (n_neighbors))
pyplot.savefig('knearest.png')
X, y = get_iris_dataset()
knn, n_neighbors = get_knn_classifier(X, y)
plot_classified_regions(X, y, knn, n_neighbors)
|
mit
| 2,858,620,498,153,865,000
| 28.320755
| 80
| 0.63964
| false
| 2.971319
| false
| false
| false
|
arduino-org/s4t-iotronic
|
lib/test_pub.py
|
1
|
2229
|
wampAddress = 'ws://172.17.3.139:8181/ws'
wampRealm = 's4t'
#from threading import Thread
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.wamp import ApplicationSession
from twisted.internet.defer import inlineCallbacks
#import per test
from twisted.internet.defer import DeferredQueue
from twisted.internet import threads
#Classe autobahn per ka gestione della comunicazione con i dispositivi remoti
class AutobahnMRS(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Sessio attached [Connect to WAMP Router] Sub")
def onMessage(*args):
print args
try:
yield self.subscribe(onMessage, 'test')
print ("Subscribed to topic: test")
except Exception as e:
print("Exception:" +e)
#Classe autobahn per la gestione della comunicazione interna
class AutobahnIM(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("Sessio attached [Connect to WAMP Router] Pub")
try:
yield self.publish('test','YOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
print ("Publish to topic: test")
except Exception as e:
print("Exception:" +e)
#Classe per la gestione della comunicazioni con i dispositivi remoti
class ManageRemoteSystem:
def __init__(self):
self.runner = ApplicationRunner(url= wampAddress, realm = wampRealm)
def start(self):
self.runner.run(AutobahnMRS, start_reactor=False);
#Classe per la gestione della comunicazione interna al ManageRemoteSystem
class InternalMessages:
def __init__(self):
self.runner = ApplicationRunner(url= wampAddress, realm = wampRealm)
def start(self):
self.runner.run(AutobahnIM, start_reactor=False);
#Classe principale per il servizio iotronic
#class S4tServer:
def something():
count = 0
while True:
print('something:', count)
yield sleep(1)
count+=1
if __name__ == '__main__':
#import multiprocessing
server = ManageRemoteSystem()
#sendMessage = InternalMessages()
server.start()
#sendMessage.start()
from twisted.internet import reactor
reactor.run()
#thread1 = Thread(target = reactor.run())
#thread2 = Thread(target = something())
#thread2.start()
#thread1.start()
#thread1.daemon = True
#thread2.daemon = True
#thread2.join()
#thread1.join()
|
apache-2.0
| -9,203,456,461,136,472,000
| 21.979381
| 77
| 0.746074
| false
| 3.126227
| false
| false
| false
|
pitunti/alfaPitunti
|
plugin.video.alfa/channels/tupornotv.py
|
1
|
10715
|
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades",
url="http://tuporno.tv/pendientes"))
itemlist.append(
Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True))
itemlist.append(
Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades",
url="http://tuporno.tv/videosRecientes/", folder=True))
itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados",
url="http://tuporno.tv/topVideos/", folder=True))
itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def novedades(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion novedades
'''
<table border="0" cellpadding="0" cellspacing="0" ><tr><td align="center" width="100%" valign="top" height="160px">
<a href="/videos/cogiendo-en-el-bosque"><img src="imagenes/videos//c/o/cogiendo-en-el-bosque_imagen2.jpg" alt="Cogiendo en el bosque" border="0" align="top" /></a>
<h2><a href="/videos/cogiendo-en-el-bosque">Cogiendo en el bosque</a></h2>
'''
patronvideos = '<div class="relative">(.*?)</div><div class="video'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
# Titulo
try:
scrapedtitle = re.compile('title="(.+?)"').findall(match)[0]
except:
scrapedtitle = ''
try:
scrapedurl = re.compile('href="(.+?)"').findall(match)[0]
scrapedurl = urlparse.urljoin(url, scrapedurl)
except:
continue
try:
scrapedthumbnail = re.compile('src="(.+?)"').findall(match)[0]
scrapedthumbnail = urlparse.urljoin(url, scrapedthumbnail)
except:
scrapedthumbnail = ''
scrapedplot = ""
try:
duracion = re.compile('<div class="duracion">(.+?)<').findall(match)[0]
except:
try:
duracion = re.compile('\((.+?)\)<br').findall(match[3])[0]
except:
duracion = ""
# logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], duracion=["+duracion+"]")
# Añade al listado de XBMC
# trozos = scrapedurl.split("/")
# id = trozos[len(trozos)-1]
# videos = "http://149.12.64.129/videoscodiH264/"+id[0:1]+"/"+id[1:2]+"/"+id+".flv"
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
# ------------------------------------------------------
# Extrae el paginador
# ------------------------------------------------------
# <a href="/topVideos/todas/mes/2/" class="enlace_si">Siguiente </a>
patronsiguiente = '<a href="(.+?)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
scrapedurl = urlparse.urljoin(url, siguiente[0])
itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def masVistos(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True))
return itemlist
def categorias(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion categorias
# Patron de las entradas
if url == "http://tuporno.tv/categorias/":
patronvideos = '<li><a href="([^"]+)"' # URL
patronvideos += '>([^<]+)</a></li>' # TITULO
else:
patronvideos = '<a href="(.tags[^"]+)"' # URL
patronvideos += ' class="[^"]+">([^<]+)</a>' # TITULO
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
if match[1] in ["SexShop", "Videochat", "Videoclub"]:
continue
# Titulo
scrapedtitle = match[1]
scrapedurl = urlparse.urljoin(url, match[0])
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle.capitalize(), url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def masVotados(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/topVideos/todas/hoy",
folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades",
url="http://tuporno.tv/topVideos/todas/recientes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/topVideos/todas/semana",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/topVideos/todas/mes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/topVideos/todas/ano",
folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
else:
texto = item.extra.replace(" ", "+")
item.url = "http://tuporno.tv/buscador/?str=" + texto
try:
return getsearch(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def getsearch(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
patronvideos = '<div class="relative"><a href="(.videos[^"]+)"[^>]+><img.+?src="([^"]+)" alt="(.+?)" .*?<div class="duracion">(.+?)</div></div></div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist = []
for match in matches:
# Titulo
scrapedtitle = match[2].replace("<b>", "")
scrapedtitle = scrapedtitle.replace("</b>", "")
scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0])
scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1])
scrapedplot = ""
duracion = match[3]
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
'''<a href="/buscador/?str=busqueda&desde=HV_PAGINA_SIGUIENTE" class="enlace_si">Siguiente </a>'''
patronsiguiente = '<a href="([^"]+)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
patronultima = '<!--HV_SIGUIENTE_ENLACE'
ultpagina = re.compile(patronultima, re.DOTALL).findall(data)
scrapertools.printMatches(siguiente)
if len(ultpagina) == 0:
scrapedurl = urlparse.urljoin(item.url, siguiente[0])
itemlist.append(
Item(channel=item.channel, action="getsearch", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def play(item):
logger.info()
itemlist = []
# Lee la pagina del video
data = scrapertools.cachePage(item.url)
codVideo = scrapertools.get_match(data, 'body id="([^"]+)"')
logger.info("codVideo=" + codVideo)
# Lee la pagina con el codigo
# http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146
url = "http://tuporno.tv/flvurl.php?codVideo=" + codVideo + "&v=MAC%2011,5,502,146"
data = scrapertools.cachePage(url)
logger.info("data=" + data)
kpt = scrapertools.get_match(data, "kpt\=(.+?)\&")
logger.info("kpt=" + kpt)
# Decodifica
import base64
url = base64.decodestring(kpt)
logger.info("url=" + url)
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot,
server="Directo", folder=False))
return itemlist
|
gpl-3.0
| 8,343,848,909,474,973,000
| 39.549242
| 167
| 0.567865
| false
| 3.560027
| false
| false
| false
|
designcc/django-ccbasket
|
ccbasket_testproject/shop/views.py
|
1
|
1061
|
# -*- coding: utf-8 -*-
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.response import TemplateResponse
from models import SimpleProduct, ComplexProduct
logger = logging.getLogger('ccbasket')
def home(request):
return render_to_response('shop/home.html', {
}, context_instance=RequestContext(request))
def product(request, pk, model=SimpleProduct, template='shop/product.html'):
product = model.objects.get(pk=pk)
return render_to_response(template, {
'product': product
}, context_instance=RequestContext(request))
def index(request):
simple_products = SimpleProduct.objects.all()
complex_products = ComplexProduct.objects.all()
products = []
for product in simple_products:
products.append(product)
for product in complex_products:
products.append(product)
return render_to_response('shop/index.html', {
'products': products,
}, context_instance=RequestContext(request))
|
bsd-3-clause
| -8,419,915,165,657,524,000
| 26.921053
| 76
| 0.71065
| false
| 4.278226
| false
| false
| false
|
david-abel/simple_rl
|
simple_rl/tasks/maze_1d/Maze1DPOMDPClass.py
|
1
|
2420
|
# Python imports.
from collections import defaultdict
import random
# Other imports.
from simple_rl.pomdp.POMDPClass import POMDP
from simple_rl.tasks.maze_1d.Maze1DStateClass import Maze1DState
class Maze1DPOMDP(POMDP):
''' Class for a 1D Maze POMDP '''
ACTIONS = ['west', 'east']
OBSERVATIONS = ['nothing', 'goal']
def __init__(self):
self._states = [Maze1DState('left'), Maze1DState('middle'), Maze1DState('right'), Maze1DState('goal')]
# Initial belief is a uniform distribution over states
b0 = defaultdict()
for state in self._states: b0[state] = 0.25
POMDP.__init__(self, Maze1DPOMDP.ACTIONS, Maze1DPOMDP.OBSERVATIONS, self._transition_func, self._reward_func, self._observation_func, b0)
def _transition_func(self, state, action):
'''
Args:
state (Maze1DState)
action (str)
Returns:
next_state (Maze1DState)
'''
if action == 'west':
if state.name == 'left':
return Maze1DState('left')
if state.name == 'middle':
return Maze1DState('left')
if state.name == 'right':
return Maze1DState('goal')
if state.name == 'goal':
return Maze1DState(random.choice(['left', 'middle', 'right']))
if action == 'east':
if state.name == 'left':
return Maze1DState('middle')
if state.name == 'middle':
return Maze1DState('goal')
if state.name == 'right':
return Maze1DState('right')
if state.name == 'goal':
return Maze1DState(random.choice(['left', 'middle', 'right']))
raise ValueError('Invalid state: {} action: {} in 1DMaze'.format(state, action))
def _observation_func(self, state, action):
next_state = self._transition_func(state, action)
return 'goal' if next_state.name == 'goal' else 'nothing'
def _reward_func(self, state, action, next_state):
# next_state = self._transition_func(state, action)
observation = self._observation_func(state, action)
return (1. - self.step_cost) if (next_state.name == observation == 'goal') else (0. - self.step_cost)
def is_in_goal_state(self):
return self.cur_state.name == 'goal'
if __name__ == '__main__':
maze_pomdp = Maze1DPOMDP()
|
apache-2.0
| 2,584,774,365,799,531,000
| 35.666667
| 145
| 0.579752
| false
| 3.543192
| false
| false
| false
|
tenable/Tenable.io-SDK-for-Python
|
tenable_io/api/plugins.py
|
1
|
1572
|
from tenable_io.api.base import BaseApi
from tenable_io.api.models import PluginDetails, PluginFamilyDetails, PluginFamilyList
class PluginsApi(BaseApi):
def families(self, include_all=None):
"""Return list of plugin families.
:param include_all: Whether or not to include all plugins. Defaults to be less inclusive.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginFamilyList`.
"""
params = {'all': include_all}
response = self._client.get('plugins/families', params={k: v for (k, v) in params.items() if v})
return PluginFamilyList.from_json(response.text)
def family_details(self, family_id):
"""Return plugin family details.
:param family_id: Plugin family ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginFamilyDetails`.
"""
response = self._client.get('plugins/families/%(id)s', path_params={'id': family_id})
return PluginFamilyDetails.from_json(response.text)
def plugin_details(self, plugin_id):
"""Return plugin details.
:param plugin_id: Plugin ID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.PluginDetails`.
"""
response = self._client.get('plugins/plugin/%(id)s', path_params={'id': plugin_id})
return PluginDetails.from_json(response.text)
|
mit
| 4,282,903,785,587,915,000
| 42.666667
| 104
| 0.67112
| false
| 3.900744
| false
| false
| false
|
anarchivist/pyflag
|
src/plugins/Flash/AdvancedCommands.py
|
1
|
13927
|
""" These Flash commands allow more sophisticated operations, most of
which may not be needed by most users. Some operations are
specifically designed for testing and have little use in practice.
"""
import pyflag.pyflagsh as pyflagsh
import pyflag.Registry as Registry
import pyflag.DB as DB
import fnmatch
import pyflag.FileSystem as FileSystem
import pyflag.Scanner as Scanner
import time, types
import pyflag.pyflaglog as pyflaglog
import BasicCommands
import pyflag.ScannerUtils as ScannerUtils
import pyflag.conf
config=pyflag.conf.ConfObject()
class scan_path(pyflagsh.command):
""" This takes a path as an argument and runs the specified scanner on the path
this might be of more use than specifying inodes for the average user since if you load
two disk images, then you might have /disk1 and /disk2 and want to just run scans over
one of them, which is simpler to specify using /disk1. """
def help(self):
return "scan VFSPath [list of scanners]: Scans the VFS path with the scanners specified"
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['path']
def wait_for_scan(self, cookie):
""" Waits for scanners to complete """
pdbh = DB.DBO()
pdbh.check_index('jobs','cookie')
## Often this process owns a worker as well. In that case we can wake it up:
import pyflag.Farm as Farm
Farm.wake_workers()
## Wait until there are no more jobs left.
while 1:
pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r",
(cookie,
self.environment._CASE))
row = pdbh.fetch()
if row['total']==0: break
time.sleep(1)
def execute(self):
scanners=[]
if len(self.args)<2:
yield self.help()
return
elif type(self.args[1]) == types.ListType:
scanners = self.args[1]
else:
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
## Assume that people always want recursive - I think this makes sense
path = self.args[0]
if not path.endswith("*"):
path = path + "*"
## FIXME For massive images this should be broken up, as in the old GUI method
dbh=DB.DBO(self.environment._CASE)
dbh.execute("select inode.inode from inode join file on file.inode = inode.inode where file.path rlike %r", fnmatch.translate(path))
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
## This is a cookie used to identify our requests so that we
## can check they have been done later.
cookie = int(time.time())
for row in dbh:
inode = row['inode']
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = row['inode'],
arg3 = ','.join(scanners),
cookie=cookie,
)#
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
self.wait_for_scan(cookie)
yield "Scanning complete"
import pyflag.FlagFramework as FlagFramework
class init_flag_db(pyflagsh.command):
""" Creates the main flag db if needed """
def execute(self):
try:
dbh = DB.DBO()
except:
dbh = DB.DBO('mysql')
dbh.execute("create database `%s`" % config.FLAGDB)
dbh = DB.DBO()
FlagFramework.post_event("init_default_db", None)
yield "Done"
class delete_iosource(pyflagsh.command):
""" Deletes an iosource from the current case """
def complete(self, text, state):
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(name,1,%r) as abbrev,name from iosources where name like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['name']
def execute(self):
for iosource in self.args:
dbh = DB.DBO(self.environment._CASE)
dbh2 = dbh.clone()
dbh.delete('inode', where=DB.expand("inode like 'I%s|%%'", iosource))
dbh.execute("select * from filesystems where iosource = %r", iosource)
for row in dbh:
dbh2.delete('file', where=DB.expand("path like '%s%%'", iosource))
dbh.delete("iosources", where=DB.expand("name=%r", iosource))
yield "Removed IOSource %s" % iosource
class scan(pyflagsh.command):
""" Scan a glob of inodes with a glob of scanners """
def help(self):
return "scan inode [list of scanners]: Scans the inodes with the scanners specified"
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(inode,1,%r) as abbrev,inode from inode where inode like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['inode']
def execute(self):
if len(self.args)<2:
yield self.help()
return
## Try to glob the inode list:
dbh=DB.DBO(self.environment._CASE)
dbh.execute("select inode from inode where inode rlike %r",fnmatch.translate(self.args[0]))
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
## This is a cookie used to identify our requests so that we
## can check they have been done later.
cookie = int(time.time())
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
scanners = ScannerUtils.fill_in_dependancies(scanners)
for row in dbh:
inode = row['inode']
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = row['inode'],
arg3 = ','.join(scanners),
cookie=cookie,
)
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
if self.environment.interactive:
self.wait_for_scan(cookie)
yield "Scanning complete"
def wait_for_scan(self, cookie):
""" Waits for scanners to complete """
pdbh = DB.DBO()
## Often this process owns a worker as well. In that case we can wake it up:
import pyflag.Farm as Farm
Farm.wake_workers()
## Wait until there are no more jobs left.
while 1:
pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r", (cookie,
self.environment._CASE))
row = pdbh.fetch()
if row and row['total']==0: break
time.sleep(1)
class scan_file(scan,BasicCommands.ls):
""" Scan a file in the VFS by name """
def help(self):
return "scan file [list of scanners]: Scan the file with the scanners specified "
def complete(self, text,state):
if len(self.args)>2 or len(self.args)==2 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
else:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['path']
def execute(self):
if len(self.args)<2:
yield self.help()
return
pdbh = DB.DBO()
pdbh.mass_insert_start('jobs')
cookie = int(time.time())
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
for path in self.glob_files(self.args[:1]):
path, inode, inode_id = self.environment._FS.lookup(path = path)
## This is a cookie used to identify our requests so that we
## can check they have been done later.
pdbh.mass_insert(
command = 'Scan',
arg1 = self.environment._CASE,
arg2 = inode,
arg3 = ','.join(scanners),
cookie=cookie,
)
pdbh.mass_insert_commit()
## Wait for the scanners to finish:
if 1 or self.environment.interactive:
self.wait_for_scan(cookie)
yield "Scanning complete"
##
## This allows people to reset based on the VFS path
##
class scanner_reset_path(scan):
""" Reset all files under a specified path """
def help(self):
return "scanner_reset_path path [list of scanners]: Resets the inodes under the path given with the scanners specified"
def execute(self):
if len(self.args)<2:
yield self.help()
return
scanners = []
if type(self.args[1]) == types.ListType:
scanners = self.args[1]
else:
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
print "GETTING FACTORIES"
factories = Scanner.get_factories(self.environment._CASE, scanners)
print "OK NOW RESETING EM"
for f in factories:
f.reset_entire_path(self.args[0])
print "HOKAY"
yield "Reset Complete"
## There is little point in distributing this because its very quick anyway.
class scanner_reset(scan):
""" Reset multiple inodes as specified by a glob """
def help(self):
return "reset inode [list of scanners]: Resets the inodes with the scanners specified"
def execute(self):
if len(self.args)<2:
yield self.help()
return
scanners = []
for i in range(1,len(self.args)):
scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i]))
factories = Scanner.get_factories(self.environment._CASE, scanners)
for f in factories:
f.multiple_inode_reset(self.args[0])
yield "Resetting complete"
class load_and_scan(scan):
""" Load a filesystem and scan it at the same time """
def help(self):
return """load_and_scan iosource mount_point fstype [list of scanners]:
Loads the iosource into the right mount point and scans all
new inodes using the scanner list. This allows scanning to
start as soon as VFS inodes are produced and before the VFS is
fully populated.
"""
def complete(self, text,state):
if len(self.args)>4 or len(self.args)==4 and not text:
scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \
[ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ]
return scanners[state]
elif len(self.args)>3 or len(self.args)==3 and not text:
fstypes = [ x for x in Registry.FILESYSTEMS.class_names if x.startswith(text) ]
return fstypes[state]
elif len(self.args)>2 or len(self.args)==2 and not text:
return
elif len(self.args)>1 or len(self.args)==1 and not text:
dbh = DB.DBO(self.environment._CASE)
dbh.execute("select substr(value,1,%r) as abbrev,value from meta where property='iosource' and value like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state))
return dbh.fetch()['value']
def execute(self):
if len(self.args)<3:
yield self.help()
return
iosource=self.args[0]
mnt_point=self.args[1]
filesystem=self.args[2]
query = {}
dbh = DB.DBO()
dbh.mass_insert_start('jobs')
## This works out all the scanners that were specified:
tmp = []
for i in range(3,len(self.args)):
## Is it a parameter?
if "=" in self.args[i]:
prop,value = self.args[i].split("=",1)
query[prop] = value
else:
tmp.extend([x for x in fnmatch.filter(
Registry.SCANNERS.scanners, self.args[i]) ])
scanners = [ ]
for item in tmp:
if item not in scanners:
scanners.append(item)
## Load the filesystem:
try:
fs = Registry.FILESYSTEMS.dispatch(filesystem)
except KeyError:
yield "Unable to find a filesystem of %s" % filesystem
return
fs=fs(self.environment._CASE, query)
fs.cookie = int(time.time())
fs.load(mnt_point, iosource, scanners)
## Wait for all the scanners to finish
self.wait_for_scan(fs.cookie)
yield "Loading complete"
|
gpl-2.0
| -4,366,271,114,927,967,700
| 36.438172
| 178
| 0.575644
| false
| 3.877227
| false
| false
| false
|
mobarski/sandbox
|
parallel/p7cat.py
|
1
|
1316
|
## p7cat.py - parallel concatenation
## (c) 2017 by mobarski (at) gmail (dot) com
## licence: MIT
## version: x1
from __future__ import print_function
import sys
import os
from multiprocessing import Process
from time import time
def write_part(path_in, path_out, offset, blocksize=4096):
fi = open(path_in,'rb')
fo = open(path_out,'r+b')
fo.seek(offset)
while True:
block = fi.read(blocksize)
fo.write(block)
if len(block)<blocksize: break
fi.close()
fo.close()
if __name__ == "__main__":
t0 = time()
print("\n\tP7 CONCAT START\n")
outpath = sys.argv[1]
filenames = sys.argv[2:]
#print('\tOUT',outpath)
#print('\tIN\n',filenames)
meta = {} # filename -> size, offset
offset = 0
for path in filenames:
size = os.path.getsize(path)
meta[path] = (size,offset)
offset += size
# allocate disk space
out = open(outpath,'wb')
out.seek(offset-1)
out.write(b'\x00')
out.close()
proc = {}
for path in filenames:
size,offset = meta[path]
p = Process(target=write_part, args=(path, outpath, offset))
p.start()
print("\tBEGIN pid:{0} size:{2} offset:{1}".format(p.pid,offset,size))
proc[path] = p
sys.stdout.flush()
for path in filenames:
p = proc[path]
p.join()
print("\tEND pid:{0}".format(p.pid))
print("\n\tRUN_TIME_TOTAL:{0:.1f}s\n".format(time()-t0))
|
mit
| -6,190,227,077,822,655,000
| 20.933333
| 75
| 0.647416
| false
| 2.626747
| false
| false
| false
|
stoq/stoqdrivers
|
docs/fiscal-driver-template.py
|
1
|
5263
|
#
# Stoqdrivers template driver
#
# Copyright (C) 2007 Async Open Source <http://www.async.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
import datetime
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.enum import TaxType
from stoqdrivers.interfaces import ICouponPrinter
from stoqdrivers.printers.capabilities import Capability
from stoqdrivers.printers.fiscal import SintegraData
from stoqdrivers.serialbase import SerialBase
from stoqdrivers.translation import stoqdrivers_gettext
_ = stoqdrivers_gettext
@implementer(ICouponPrinter)
class TemplateDriver(SerialBase):
supported = True
model_name = "Template Driver"
coupon_printer_charset = "ascii"
def __init__(self, port, consts=None):
SerialBase.__init__(self, port)
#
# This implements the ICouponPrinter Interface
#
# Coupon methods
def coupon_identify_customer(self, customer, address, document):
pass
def coupon_open(self):
pass
def coupon_cancel(self):
pass
def coupon_close(self, message):
coupon_id = 123
return coupon_id
def coupon_add_item(self, code, description, price, taxcode,
quantity, unit, discount, markup, unit_desc):
item_id = 123
return item_id
def coupon_cancel_item(self, item_id):
pass
def coupon_add_payment(self, payment_method, value, description):
return Decimal("123")
def coupon_totalize(self, discount, markup, taxcode):
return Decimal("123")
# Till / Daily flow
def summarize(self):
# Leitura X
pass
def close_till(self, previous_day):
# Redução Z
pass
def till_add_cash(self, value):
# Suprimento
pass
def till_remove_cash(self, value):
# Sangria
pass
def till_read_memory(self, start, end):
# Leitura Memory Fiscal data
pass
def till_read_memory_by_reductions(self, start, end):
# Leitura Memory Fiscal reduções
pass
# Introspection
def get_capabilities(self):
return dict(
item_code=Capability(max_len=13),
item_id=Capability(digits=4),
items_quantity=Capability(min_size=1, digits=4, decimals=3),
item_price=Capability(digits=6, decimals=2),
item_description=Capability(max_len=29),
payment_value=Capability(digits=12, decimals=2),
promotional_message=Capability(max_len=320),
payment_description=Capability(max_len=48),
customer_name=Capability(max_len=30),
customer_id=Capability(max_len=28),
customer_address=Capability(max_len=80),
add_cash_value=Capability(min_size=0.1, digits=12, decimals=2),
remove_cash_value=Capability(min_size=0.1, digits=12, decimals=2),
)
def get_constants(self):
return self._consts
def get_tax_constants(self):
constants = []
constants.append((TaxType.CUSTOM,
'01',
Decimal('18.00')))
constants.append((TaxType.CUSTOM,
'02',
Decimal('25.00')))
constants.extend([
(TaxType.SUBSTITUTION, 'FF', None),
(TaxType.EXEMPTION, 'II', None),
(TaxType.NONE, 'NN', None),
])
return constants
def get_payment_constants(self):
methods = []
methods.append(('01', 'DINHEIRO'))
methods.append(('02', 'CHEQUE'))
return methods
def get_sintegra(self):
taxes = []
taxes.append(('2500', Decimal("0")))
taxes.append(('1800', Decimal("0")))
taxes.append(('CANC', Decimal("0")))
taxes.append(('DESC', Decimal("0")))
taxes.append(('I', Decimal("0")))
taxes.append(('N', Decimal("0")))
taxes.append(('F', Decimal("0")))
return SintegraData(
opening_date=datetime.date(2000, 1, 1),
serial=self._get_serial(),
serial_id='001',
coupon_start=0,
coupon_end=100,
cro=230,
crz=1232,
coo=320,
period_total=Decimal("1123"),
total=Decimal("2311123"),
taxes=taxes)
# Device detection, asynchronous
def query_status(self):
return 'XXX'
def status_reply_complete(self, reply):
return len(reply) == 23
def get_serial(self):
return 'ABC12345678'
|
lgpl-2.1
| -2,792,795,015,464,589,300
| 27.737705
| 78
| 0.610953
| false
| 3.969057
| false
| false
| false
|
waheedahmed/edx-platform
|
openedx/core/djangoapps/api_admin/views.py
|
1
|
9646
|
"""Views for API management."""
import logging
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse_lazy, reverse
from django.http.response import JsonResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView
from oauth2_provider.generators import generate_client_secret, generate_client_id
from oauth2_provider.models import get_application_model
from oauth2_provider.views import ApplicationRegistration
from slumber.exceptions import HttpNotFoundError
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.api_admin.decorators import require_api_access
from openedx.core.djangoapps.api_admin.forms import ApiAccessRequestForm, CatalogForm
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest, Catalog
from openedx.core.djangoapps.api_admin.utils import course_discovery_api_client
log = logging.getLogger(__name__)
Application = get_application_model() # pylint: disable=invalid-name
class ApiRequestView(CreateView):
"""Form view for requesting API access."""
form_class = ApiAccessRequestForm
template_name = 'api_admin/api_access_request_form.html'
success_url = reverse_lazy('api_admin:api-status')
def get(self, request):
"""
If the requesting user has already requested API access, redirect
them to the client creation page.
"""
if ApiAccessRequest.api_access_status(request.user) is not None:
return redirect(reverse('api_admin:api-status'))
return super(ApiRequestView, self).get(request)
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.site = get_current_site(self.request)
return super(ApiRequestView, self).form_valid(form)
class ApiRequestStatusView(ApplicationRegistration):
"""View for confirming our receipt of an API request."""
success_url = reverse_lazy('api_admin:api-status')
def get(self, request, form=None): # pylint: disable=arguments-differ
"""
If the user has not created an API request, redirect them to the
request form. Otherwise, display the status of their API
request. We take `form` as an optional argument so that we can
display validation errors correctly on the page.
"""
if form is None:
form = self.get_form_class()()
user = request.user
try:
api_request = ApiAccessRequest.objects.get(user=user)
except ApiAccessRequest.DoesNotExist:
return redirect(reverse('api_admin:api-request'))
try:
application = Application.objects.get(user=user)
except Application.DoesNotExist:
application = None
# We want to fill in a few fields ourselves, so remove them
# from the form so that the user doesn't see them.
for field in ('client_type', 'client_secret', 'client_id', 'authorization_grant_type'):
form.fields.pop(field)
return render_to_response('api_admin/status.html', {
'status': api_request.status,
'api_support_link': settings.API_DOCUMENTATION_URL,
'api_support_email': settings.API_ACCESS_MANAGER_EMAIL,
'form': form,
'application': application,
})
def get_form(self, form_class=None):
form = super(ApiRequestStatusView, self).get_form(form_class)
# Copy the data, since it's an immutable QueryDict.
copied_data = form.data.copy()
# Now set the fields that were removed earlier. We give them
# confidential client credentials, and generate their client
# ID and secret.
copied_data.update({
'authorization_grant_type': Application.GRANT_CLIENT_CREDENTIALS,
'client_type': Application.CLIENT_CONFIDENTIAL,
'client_secret': generate_client_secret(),
'client_id': generate_client_id(),
})
form.data = copied_data
return form
def form_valid(self, form):
# Delete any existing applications if the user has decided to regenerate their credentials
Application.objects.filter(user=self.request.user).delete()
return super(ApiRequestStatusView, self).form_valid(form)
def form_invalid(self, form):
return self.get(self.request, form)
@require_api_access
def post(self, request):
return super(ApiRequestStatusView, self).post(request)
class ApiTosView(TemplateView):
"""View to show the API Terms of Service."""
template_name = 'api_admin/terms_of_service.html'
class CatalogSearchView(View):
"""View to search for catalogs belonging to a user."""
def get(self, request):
"""Display a form to search for catalogs belonging to a user."""
return render_to_response('api_admin/catalogs/search.html')
def post(self, request):
"""Redirect to the list view for the given user."""
username = request.POST.get('username')
# If no username is provided, bounce back to this page.
if not username:
return redirect(reverse('api_admin:catalog-search'))
return redirect(reverse('api_admin:catalog-list', kwargs={'username': username}))
class CatalogListView(View):
"""View to list existing catalogs and create new ones."""
template = 'api_admin/catalogs/list.html'
def _get_catalogs(self, client, username):
"""Retrieve catalogs for a user. Returns the empty list if none are found."""
try:
response = client.api.v1.catalogs.get(username=username)
return [Catalog(attributes=catalog) for catalog in response['results']]
except HttpNotFoundError:
return []
def get(self, request, username):
"""Display a list of a user's catalogs."""
client = course_discovery_api_client(request.user)
catalogs = self._get_catalogs(client, username)
return render_to_response(self.template, {
'username': username,
'catalogs': catalogs,
'form': CatalogForm(initial={'viewers': [username]}),
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
})
def post(self, request, username):
"""Create a new catalog for a user."""
form = CatalogForm(request.POST)
client = course_discovery_api_client(request.user)
if not form.is_valid():
catalogs = self._get_catalogs(client, username)
return render_to_response(self.template, {
'form': form,
'catalogs': catalogs,
'username': username,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
}, status=400)
attrs = form.instance.attributes
catalog = client.api.v1.catalogs.post(attrs)
return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']}))
class CatalogEditView(View):
"""View to edit an individual catalog."""
def get(self, request, catalog_id):
"""Display a form to edit this catalog."""
client = course_discovery_api_client(request.user)
response = client.api.v1.catalogs(catalog_id).get()
catalog = Catalog(attributes=response)
form = CatalogForm(instance=catalog)
return render_to_response('api_admin/catalogs/edit.html', {
'catalog': catalog,
'form': form,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
})
def post(self, request, catalog_id):
"""Update or delete this catalog."""
client = course_discovery_api_client(request.user)
if request.POST.get('delete-catalog') == 'on':
client.api.v1.catalogs(catalog_id).delete()
return redirect(reverse('api_admin:catalog-search'))
form = CatalogForm(request.POST)
if not form.is_valid():
response = client.api.v1.catalogs(catalog_id).get()
catalog = Catalog(attributes=response)
return render_to_response('api_admin/catalogs/edit.html', {
'catalog': catalog,
'form': form,
'preview_url': reverse('api_admin:catalog-preview'),
'catalog_api_url': client.api.v1.courses.url(),
}, status=400)
catalog = client.api.v1.catalogs(catalog_id).patch(form.instance.attributes)
return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']}))
class CatalogPreviewView(View):
"""Endpoint to preview courses for a query."""
def get(self, request):
"""
Return the results of a query against the course catalog API. If no
query parameter is given, returns an empty result set.
"""
client = course_discovery_api_client(request.user)
# Just pass along the request params including limit/offset pagination
if 'q' in request.GET:
results = client.api.v1.courses.get(**request.GET)
# Ensure that we don't just return all the courses if no query is given
else:
results = {'count': 0, 'results': [], 'next': None, 'prev': None}
return JsonResponse(results)
|
agpl-3.0
| -1,986,569,222,691,623,200
| 40.222222
| 98
| 0.651669
| false
| 4.16494
| false
| false
| false
|
chfoo/fogchamp
|
util/csv2json.py
|
1
|
4170
|
'''Convert CSV files into JSON files needed for the visualizer page.'''
import argparse
import json
import os
import functools
from util.readers.addarash1 import AddarashReader
from util.readers.bulbapedia import BulbapediaReader
from util.readers.chfoo import ChfooReader
from util.readers.editornotes import EditorNotesReader
from util.readers.nkekev import NkekevReader
from util.readers.pokedex import PokedexReader
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--output-dir', default='./')
arg_parser.add_argument('--metadata-dir', default='metadata/')
args = arg_parser.parse_args()
nkekev_dir = os.path.join(args.metadata_dir, 'nkekev')
chfoo_dir = os.path.join(args.metadata_dir, 'chfoo')
addarash1_dir = os.path.join(args.metadata_dir, 'addarash1')
pokedex_dir = os.path.join(args.metadata_dir, 'pokedex', 'pokedex', 'data', 'csv')
bulbapedia_dir = os.path.join(args.metadata_dir, 'bulbapedia')
editor_notes_dir = os.path.join(args.metadata_dir, 'editor_notes')
output_dir = args.output_dir
pokedex_reader = PokedexReader(pokedex_dir)
nkekev_reader = NkekevReader(nkekev_dir)
chfoo_reader = ChfooReader(chfoo_dir)
addarash1_reader = AddarashReader(addarash1_dir)
bulbapedia_reader = BulbapediaReader(bulbapedia_dir)
editor_notes_reader = EditorNotesReader(editor_notes_dir)
# Build each Pokemon's stats
movesets_funcs = [
('pbr-2.0', functools.partial(
addarash1_reader.read_pbr_2_0, nkekev_reader, chfoo_reader)),
('pbr-gold-1.2-2015-11-07', functools.partial(
addarash1_reader.read_pbr_gold_1_2_2015_11_07,
nkekev_reader, chfoo_reader)),
('pbr-gold-1.2', functools.partial(addarash1_reader.read_pbr_gold_1_2,
nkekev_reader, chfoo_reader)),
('pbr-seel', functools.partial(chfoo_reader.read_pbr_seel, nkekev_reader)),
('pbr-platinum', nkekev_reader.read_pbr_platinum),
('pbr-gold', nkekev_reader.read_pbr_gold),
]
for move_slug, func in movesets_funcs:
pokemon_stats = {}
pokemon_slugs = []
pokemon_types = pokedex_reader.read_pokemon_types()
pokemon_weights = pokedex_reader.read_pokemon_weights()
for pokemon_stat in func():
slug = pokemon_stat.pop('slug')
pokemon_slugs.append(slug)
pokemon_stats[slug] = pokemon_stat
pokemon_stats[slug]['types'] = pokemon_types[pokemon_stat['number']]
pokemon_stats[slug]['weight'] = pokemon_weights[pokemon_stat['number']]
json_path = os.path.join(output_dir, '{}.json'.format(move_slug))
with open(json_path, 'w') as file:
file.write(json.dumps({
'stats': pokemon_stats,
'pokemon_slugs': pokemon_slugs
}, indent=2, sort_keys=True))
# Build all the moves
move_stats = {}
for move in pokedex_reader.read_moves():
slug = move.pop('slug')
move_stats[slug] = move
bulbapedia_reader.downgrade_move_changes(move_stats)
editor_notes_reader.add_move_notes(move_stats)
json_path = os.path.join(output_dir, 'moves.json')
with open(json_path, 'w') as file:
file.write(json.dumps(move_stats, indent=2, sort_keys=True))
# Build descriptions and misc
abilities = {}
for ability in pokedex_reader.read_abilities():
slug = ability.pop('slug')
abilities[slug] = ability
editor_notes_reader.add_ability_notes(abilities)
types_efficacy = pokedex_reader.read_type_efficacy()
items = {}
for item in pokedex_reader.read_items():
slug = item.pop('slug')
items[slug] = item
item_renames = bulbapedia_reader.get_item_renames_map()
json_path = os.path.join(output_dir, 'descriptions.json')
with open(json_path, 'w') as file:
file.write(json.dumps({
'abilities': abilities,
'types_efficacy': types_efficacy,
'items': items,
'item_renames': item_renames,
}, indent=2, sort_keys=True))
if __name__ == '__main__':
main()
|
mit
| -2,807,500,552,308,781,000
| 35.26087
| 86
| 0.642206
| false
| 3.178354
| false
| false
| false
|
vpelletier/neoppod
|
neo/lib/event.py
|
1
|
9556
|
#
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, thread
from time import time
from select import epoll, EPOLLIN, EPOLLOUT, EPOLLERR, EPOLLHUP
from errno import EAGAIN, EEXIST, EINTR, ENOENT
from . import logging
from .locking import Lock
class EpollEventManager(object):
"""This class manages connections and events based on epoll(5)."""
_timeout = None
_trigger_exit = False
def __init__(self):
self.connection_dict = {}
# Initialize a dummy 'unregistered' for the very rare case a registered
# connection is closed before the first call to poll. We don't care
# leaking a few integers for connections closed between 2 polls.
self.unregistered = []
self.reader_set = set()
self.writer_set = set()
self.epoll = epoll()
self._pending_processing = []
self._trigger_fd, w = os.pipe()
os.close(w)
self._trigger_lock = Lock()
def close(self):
os.close(self._trigger_fd)
for c in self.connection_dict.values():
c.close()
del self.__dict__
def getConnectionList(self):
# XXX: use index
return [x for x in self.connection_dict.itervalues()
if not x.isAborted()]
def getClientList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isClient()]
def getServerList(self):
# XXX: use index
return [c for c in self.getConnectionList() if c.isServer()]
def getConnectionListByUUID(self, uuid):
""" Return the connection associated to the UUID, None if the UUID is
None, invalid or not found"""
# XXX: use index
# XXX: consider remove UUID from connection and thus this method
if uuid is None:
return None
result = []
append = result.append
for conn in self.getConnectionList():
if conn.getUUID() == uuid:
append(conn)
return result
# epoll_wait always waits for EPOLLERR & EPOLLHUP so we're forced
# to unregister when we want to ignore all events for a connection.
def register(self, conn, timeout_only=False):
fd = conn.getConnector().getDescriptor()
self.connection_dict[fd] = conn
if timeout_only:
self.wakeup()
else:
self.epoll.register(fd)
self.addReader(conn)
def unregister(self, conn):
new_pending_processing = [x for x in self._pending_processing
if x is not conn]
# Check that we removed at most one entry from
# self._pending_processing .
assert len(new_pending_processing) > len(self._pending_processing) - 2
self._pending_processing = new_pending_processing
fd = conn.getConnector().getDescriptor()
try:
del self.connection_dict[fd]
self.unregistered.append(fd)
self.epoll.unregister(fd)
except KeyError:
pass
except IOError, e:
if e.errno != ENOENT:
raise
else:
self.reader_set.discard(fd)
self.writer_set.discard(fd)
def isIdle(self):
return not (self._pending_processing or self.writer_set)
def _addPendingConnection(self, conn):
pending_processing = self._pending_processing
if conn not in pending_processing:
pending_processing.append(conn)
def poll(self, blocking=1):
if not self._pending_processing:
# Fetch messages from polled file descriptors
self._poll(blocking)
if not self._pending_processing:
return
to_process = self._pending_processing.pop(0)
try:
to_process.process()
finally:
# ...and requeue if there are pending messages
if to_process.hasPendingMessages():
self._addPendingConnection(to_process)
# Non-blocking call: as we handled a packet, we should just offer
# poll a chance to fetch & send already-available data, but it must
# not delay us.
self._poll(0)
def _poll(self, blocking):
if blocking:
timeout = self._timeout
timeout_object = self
for conn in self.connection_dict.itervalues():
t = conn.getTimeout()
if t and (timeout is None or t < timeout):
timeout = t
timeout_object = conn
# Make sure epoll_wait does not return too early, because it has a
# granularity of 1ms and Python 2.7 rounds the timeout towards zero.
# See also https://bugs.python.org/issue20452 (fixed in Python 3).
blocking = .001 + max(0, timeout - time()) if timeout else -1
try:
event_list = self.epoll.poll(blocking)
except IOError, exc:
if exc.errno in (0, EAGAIN):
logging.info('epoll.poll triggered undocumented error %r',
exc.errno)
elif exc.errno != EINTR:
raise
return
if event_list:
self.unregistered = unregistered = []
wlist = []
elist = []
for fd, event in event_list:
if event & EPOLLIN:
conn = self.connection_dict[fd]
if conn.readable():
self._addPendingConnection(conn)
if event & EPOLLOUT:
wlist.append(fd)
if event & (EPOLLERR | EPOLLHUP):
elist.append(fd)
for fd in wlist:
if fd not in unregistered:
self.connection_dict[fd].writable()
for fd in elist:
if fd in unregistered:
continue
try:
conn = self.connection_dict[fd]
except KeyError:
assert fd == self._trigger_fd, fd
with self._trigger_lock:
self.epoll.unregister(fd)
if self._trigger_exit:
del self._trigger_exit
thread.exit()
continue
if conn.readable():
self._addPendingConnection(conn)
elif blocking > 0:
logging.debug('timeout triggered for %r', timeout_object)
timeout_object.onTimeout()
def onTimeout(self):
on_timeout = self._on_timeout
del self._on_timeout
self._timeout = None
on_timeout()
def setTimeout(self, *args):
self._timeout, self._on_timeout = args
def wakeup(self, exit=False):
with self._trigger_lock:
self._trigger_exit |= exit
try:
self.epoll.register(self._trigger_fd)
except IOError, e:
# Ignore if 'wakeup' is called several times in a row.
if e.errno != EEXIST:
raise
def addReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.reader_set:
self.reader_set.add(fd)
self.epoll.modify(fd, EPOLLIN | (
fd in self.writer_set and EPOLLOUT))
def removeReader(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.reader_set:
self.reader_set.remove(fd)
self.epoll.modify(fd, fd in self.writer_set and EPOLLOUT)
def addWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd not in self.writer_set:
self.writer_set.add(fd)
self.epoll.modify(fd, EPOLLOUT | (
fd in self.reader_set and EPOLLIN))
def removeWriter(self, conn):
connector = conn.getConnector()
assert connector is not None, conn.whoSetConnector()
fd = connector.getDescriptor()
if fd in self.writer_set:
self.writer_set.remove(fd)
self.epoll.modify(fd, fd in self.reader_set and EPOLLIN)
def log(self):
logging.info('Event Manager:')
logging.info(' Readers: %r', list(self.reader_set))
logging.info(' Writers: %r', list(self.writer_set))
logging.info(' Connections:')
pending_set = set(self._pending_processing)
for fd, conn in self.connection_dict.items():
logging.info(' %r: %r (pending=%r)', fd, conn,
conn in pending_set)
# Default to EpollEventManager.
EventManager = EpollEventManager
|
gpl-2.0
| 4,390,888,818,905,951,000
| 36.03876
| 80
| 0.57409
| false
| 4.287124
| false
| false
| false
|
AndKyr/GETELEC
|
python/JFplot.py
|
1
|
1648
|
#! /usr/bin/python
import numpy as np
import getelec_mod as gt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mb
font = 30
# mb.rcParams["font.family"] = "Serif"
mb.rcParams["font.size"] = font
mb.rcParams["axes.labelsize"] = font
mb.rcParams["xtick.labelsize"] = font
mb.rcParams["ytick.labelsize"] = font
mb.rcParams["legend.fontsize"] = font
mb.rcParams["lines.linewidth"] = 2.5
fsize = (18,10)
Npoints = 256
Temps = [1.e-2, 300, 800, 1500]
Xfn = np.linspace(0.12, 0.35, 256)
F = 1./Xfn
Jem = np.copy(F)
this = gt.emission_create(W = 4.5, R = 5000., approx = 2)
fig1 = plt.figure(figsize=fsize)
ax1 = fig1.gca()
ax1.set_xlabel(r"$1/F$ [m GV$^{-1}$]")
ax1.set_ylabel(r"$J$ [A nm$^{-2}$]")
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i in range(len(Temps)):
this.Temp = Temps[i]
if (this.Temp < 10.):
this.approx = -1
else:
this.approx = 2
for j in range(len(F)):
this.F = F[j]
this.cur_dens()
Jem[j] = this.Jem
ax1.semilogy(Xfn,Jem, label = r'T = %d K'%this.Temp)
# for i in range(len(Temps)):
# this.Temp = Temps[i]
# if (this.Temp < 10.):
# this.approx = -1
# else:
# this.approx = -1
# for j in range(len(F)):
# this.F = F[j]
# this.cur_dens()
# Jem[j] = this.Jem
# ax1.semilogy(Xfn,Jem, '--', color = colors[i], label = r'T = %d K'%this.Temp)
# np.savetxt("J-F.dat", np.transpose(np.array([F,Jem])), delimiter = " ")
ax1.grid()
ax1.legend()
plt.savefig("JFplot_Tparam.svg")
plt.savefig("JFplot_Tparam.png")
plt.show()
|
gpl-3.0
| -1,551,624,963,708,100,600
| 20.402597
| 83
| 0.586772
| false
| 2.478195
| false
| false
| false
|
goyal-sidd/BLT
|
website/models.py
|
1
|
10857
|
import os
from urlparse import urlparse
import requests
import tweepy
from PIL import Image
from annoying.fields import AutoOneToOneField
from colorthief import ColorThief
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db import models
from django.db.models import Count
from django.db.models import signals
from django.db.models.signals import post_save
from unidecode import unidecode
class Domain(models.Model):
name = models.CharField(max_length=255, unique=True)
url = models.URLField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
webshot = models.ImageField(upload_to="webshots", null=True, blank=True)
clicks = models.IntegerField(null=True, blank=True)
email_event = models.CharField(max_length=255, default="", null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
github = models.CharField(max_length=255, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
twitter = models.CharField(max_length=30, null=True, blank=True)
facebook = models.URLField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@property
def open_issues(self):
return Issue.objects.filter(domain=self).exclude(status="closed")
@property
def closed_issues(self):
return Issue.objects.filter(domain=self).filter(status="closed")
@property
def top_tester(self):
return User.objects.filter(issue__domain=self).annotate(total=Count('issue')).order_by('-total').first()
@property
def get_name(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
def get_logo(self):
if self.logo:
return self.logo.url
image_request = requests.get("https://logo.clearbit.com/" + self.name)
try:
if image_request.status_code == 200:
image_content = ContentFile(image_request.content)
self.logo.save(self.name + ".jpg", image_content)
return self.logo.url
except:
favicon_url = self.url + '/favicon.ico'
return favicon_url
@property
def get_color(self):
if self.color:
return self.color
else:
if not self.logo:
self.get_logo()
try:
color_thief = ColorThief(self.logo)
self.color = '#%02x%02x%02x' % color_thief.get_color(quality=1)
except:
self.color = "#0000ff"
self.save()
return self.color
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_absolute_url(self):
return "/domain/" + self.name
def validate_image(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 3.0
if filesize > megabyte_limit * 1024 * 1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Issue(models.Model):
labels = (
(0, 'General'),
(1, 'Number Error'),
(2, 'Functional'),
(3, 'Performance'),
(4, 'Security'),
(5, 'Typo'),
(6, 'Design')
)
user = models.ForeignKey(User, null=True, blank=True)
domain = models.ForeignKey(Domain, null=True, blank=True)
url = models.URLField()
description = models.TextField()
label = models.PositiveSmallIntegerField(choices=labels, default=0)
views = models.IntegerField(null=True, blank=True)
status = models.CharField(max_length=10, default="open", null=True, blank=True)
user_agent = models.CharField(max_length=255, default="", null=True, blank=True)
ocr = models.TextField(default="", null=True, blank=True)
screenshot = models.ImageField(upload_to="screenshots", validators=[validate_image])
closed_by = models.ForeignKey(User, null=True, blank=True, related_name="closed_by")
closed_date = models.DateTimeField(default=None, null=True, blank=True)
github_url = models.URLField(default="", null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.description
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
@property
def hostname_domain(self):
parsed_url = urlparse(self.url)
return parsed_url.hostname
@property
def domain_name(self):
parsed_url = urlparse(self.url)
domain = parsed_url.hostname
temp = domain.rsplit('.')
if (len(temp) == 3):
domain = temp[1] + '.' + temp[2]
return domain
def get_twitter_message(self):
issue_link = " bugheist.com/issue/" + str(self.id)
prefix = "Bug found on @"
spacer = " | "
msg = prefix + self.domain_title + spacer + self.description[:140 - (
len(prefix) + len(self.domain_title) + len(spacer) + len(issue_link))] + issue_link
return msg
def get_ocr(self):
if self.ocr:
return self.ocr
else:
try:
import pytesseract
self.ocr = pytesseract.image_to_string(Image.open(self.screenshot))
self.save()
return self.ocr
except:
return "OCR not installed"
@property
def get_absolute_url(self):
return "/issue/" + str(self.id)
class Meta:
ordering = ['-created']
TWITTER_MAXLENGTH = getattr(settings, 'TWITTER_MAXLENGTH', 140)
def post_to_twitter(sender, instance, *args, **kwargs):
if not kwargs.get('created'):
return False
try:
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_key = os.environ['TWITTER_ACCESS_KEY']
access_secret = os.environ['TWITTER_ACCESS_SECRET']
except KeyError:
print 'WARNING: Twitter account not configured.'
return False
try:
text = instance.get_twitter_message()
except AttributeError:
text = unicode(instance)
mesg = u'%s' % (text)
if len(mesg) > TWITTER_MAXLENGTH:
size = len(mesg + '...') - TWITTER_MAXLENGTH
mesg = u'%s...' % (text[:-size])
import logging
logger = logging.getLogger('testlogger')
if not settings.DEBUG:
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
file = default_storage.open(instance.screenshot.file.name, 'rb')
media_ids = api.media_upload(filename=unidecode(instance.screenshot.file.name), file=file)
params = dict(status=mesg, media_ids=[media_ids.media_id_string])
api.update_status(**params)
except Exception, ex:
print 'ERROR:', str(ex)
logger.debug('rem %s' % str(ex))
return False
signals.post_save.connect(post_to_twitter, sender=Issue)
class Hunt(models.Model):
user = models.ForeignKey(User)
url = models.URLField()
prize = models.IntegerField()
logo = models.ImageField(upload_to="logos", null=True, blank=True)
plan = models.CharField(max_length=10)
txn_id = models.CharField(max_length=50, null=True, blank=True)
color = models.CharField(max_length=10, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@property
def domain_title(self):
parsed_url = urlparse(self.url)
return parsed_url.netloc.split(".")[-2:][0].title()
class Meta:
ordering = ['-id']
class Points(models.Model):
user = models.ForeignKey(User)
issue = models.ForeignKey(Issue, null=True, blank=True)
domain = models.ForeignKey(Domain, null=True, blank=True)
score = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# @receiver(user_logged_in, dispatch_uid="some.unique.string.id.for.allauth.user_logged_in")
# def user_logged_in_(request, user, **kwargs):
# if not settings.TESTING:
# action.send(user, verb='logged in')
class InviteFriend(models.Model):
sender = models.ForeignKey(User)
recipient = models.EmailField()
sent = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-sent',)
verbose_name = 'invitation'
verbose_name_plural = 'invitations'
def user_images_path(instance, filename):
from django.template.defaultfilters import slugify
filename, ext = os.path.splitext(filename)
return 'avatars/user_{0}/{1}{2}'.format(instance.user.id, slugify(filename), ext)
class UserProfile(models.Model):
title = (
(0, 'Unrated'),
(1, 'Bronze'),
(2, 'Silver'),
(3, 'Gold'),
(4, 'Platinum'),
)
follows = models.ManyToManyField('self', related_name='follower', symmetrical=False, blank=True)
user = AutoOneToOneField('auth.user', related_name="userprofile")
user_avatar = models.ImageField(upload_to=user_images_path, blank=True, null=True)
title = models.IntegerField(choices=title, default=0)
winnings = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
issue_upvoted = models.ManyToManyField(Issue, blank=True, related_name="upvoted")
issue_saved = models.ManyToManyField(Issue, blank=True, related_name="saved")
def avatar(self, size=36):
if self.user_avatar:
return self.user_avatar.url
for account in self.user.socialaccount_set.all():
if 'avatar_url' in account.extra_data:
return account.extra_data['avatar_url']
elif 'picture' in account.extra_data:
return account.extra_data['picture']
def __unicode__(self):
return self.user.email
def create_profile(sender, **kwargs):
user = kwargs["instance"]
if kwargs["created"]:
profile = UserProfile(user=user)
profile.save()
post_save.connect(create_profile, sender=User)
|
agpl-3.0
| 2,311,262,947,357,958,700
| 32.717391
| 112
| 0.634153
| false
| 3.692857
| false
| false
| false
|
mark-r-g/hydrus
|
tests/test_rapidclus.py
|
1
|
1819
|
# Mark Gatheman <markrg@protonmail.com>
#
# This file is part of Hydrus.
#
# Hydrus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hydrus. If not, see <http://www.gnu.org/licenses/>.
import random
from collections import Counter
import numpy as np
from hydrus.rapidclus import close_outer, close_inner, choose_initial_seeds
from hydrus.rapidclus import rapidclus
def test_close_inner():
assert close_inner([1, 3, 6, 10, 11]) == (3, 4, 10, 11, 1)
assert close_inner(range(1, 100, 5)) == (0, 1, 1, 6, 5)
def test_close_outer():
assert close_outer([1, 3, 6, 10, 11], 7) == (2, 6, 1)
assert close_outer([1, 3, 6, 10, 11], 0) == (0, 1, 1)
assert close_outer([1, 3, 6, 10, 11], 111) == (4, 11, 100)
def test_choose_initial_seeds():
assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 3) == [1, 11, 100]
assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 5) == [1, 3, 6, 11, 100]
random.seed(36261837)
data = [int(random.gauss(0,1000)) for _ in range(100)]
assert choose_initial_seeds(data, 5) == [-2376, -862, 521, 1948, 3239]
def test_rapidclus():
random.seed(12521184)
data = [random.gauss(0,1) for _ in range(1000)]
assert sorted(Counter(rapidclus(data)).values()) == [34, 41, 233, 251, 441]
assert rapidclus(data) == rapidclus(np.array(data))
|
gpl-3.0
| -8,838,705,027,626,463,000
| 35.38
| 80
| 0.671798
| false
| 2.943366
| false
| false
| false
|
labase/surdonews
|
src/surdonews/leao/main.py
|
1
|
5477
|
from jqueryui import jq
from browser import document, html
from superpython.virgem.main import Sala, Labirinto, Cena, INVENTARIO # importando do virgem
STYLE = dict(position="absolute", width=300, left=0, top=0, background="blue") # mudar cor do background lá embaixo
STYLE["min-height"] = "300px"
IMAGEM = "http://s16.postimg.org/k81hwi2n9/Desert.jpg"
class Leao:
SETOR = None
def __init__(self):
pass
def monta(self):
NONE = [None] * 4
imn = "https://upload.wikimedia.org/wikipedia/commons/1/1e/Est%C3%BAdio_-_TV_Cultura_Montenegro.jpg"
iml = "http://mochilaotrips.com/wp-content/uploads/2013/03/IMG_1447.jpg"
ims = "https://upload.wikimedia.org/wikipedia/commons/0/01/Morro_de_Castelo_Branco,_aspectos_1,_Castelo_Branco,_concelho_da_Horta,_ilha_do_Faial,_A%C3%A7ores,_Portugal.JPG"
imo = "http://www.unicos.cc/wp-content/uploads/2014/12/jornalismo-1-951x476.jpg"
irl = "http://www.vipcomm.com.br/site/upload/sbHulk_GN_150614026.jpg"
iro = "https://blogpontodeonibus.files.wordpress.com/2013/02/photodownload-php.jpg"
iro = "http://imagens.canaltech.com.br/38560.54878-Tirar-fotos.jpg"
irn = "http://7diasverdes.com.br/wp-content/uploads/2013/07/Bicicleta-de-passeio.jpg"
irs = "http://www.boulevardshopping.com.br/novo/wp-content/uploads/2012/02/Mcdonalds.jpg"
isn = "http://www.comercialvidoto.com.br/site/wgc_media/photos/Banco-pe-de-Ferro-Tamandua.png"
isl = "http://andif.com.br/imagens/noticias/Banco_Santander_mjg.jpg"
iso = "http://imguol.com/2013/01/08/fiat-mille-economy-1357657820399_956x500.jpg"
iss = "http://images.forwallpaper.com/files/images/a/a809/a809de18/32241/notepad.jpg"
desk = "https://blogpontodeonibus.files.wordpress.com/2012/07/expresso_brasileirold_chassiscania_1.jpg"
drawer = "http://s.glbimg.com/og/rg/f/original/2010/07/09/tiago606.jpg"
imageM = ""
sala_norte = Sala([isn, desk, iss, iso], NONE) # mar
sala_leste = Sala([isn, isl, iss, iso], NONE) # mar
sala_sul = Sala([irn, irl, irs, iro], NONE) # deserto
sala_oeste = Sala([isn, isl, iss, iso], NONE) # mar
salas = [sala_norte.norte, sala_leste.leste, sala_sul.sul, sala_oeste.oeste]
sala_centro = Sala([imn, iml, ims, imo], salas)
labirinto = Leao.SETOR = Labirinto([
sala_centro, sala_norte, sala_leste, sala_sul, sala_oeste])
labirinto.norte.leste.meio = Cena(img=imageM)
labirinto.sul.sul.meio = Cena(vai=self.help) # mudado
labirinto.leste.sul.meio = Cena(vai=self.pega_invent) # mudado
labirinto = Cena(vai=self.objetivo) # mudado
return labirinto
def nao_monta(self):
pass
def vai(self):
labirinto = self.monta()
self.monta = self.nao_monta
labirinto.centro.norte.vai()
return labirinto
"""def pega_card(self):
riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" #link da imagem
flag = None
def clicou(_):
#hipótese de flag
input("Você não está num meio de transporte.")
if not "card" in INVENTARIO.inventario: #Se o Rio Card não estiver no inventário significa que ele pegou
input("Você pegou o RioCard.")
INVENTARIO.bota("card", riocard, clicou)
else:
input("Atenção: o inventário está vazio!")"""
def pega_invent(self):
riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" # link da imagem
flag = None
def clicou(_):
# hipótese de flag
input("Você não está num meio de transporte.")
if not "card" in INVENTARIO.inventario: # Se o Rio Card não estiver no inventário significa que ele pegou
input("Você pegou o RioCard.")
INVENTARIO.bota("card", riocard, clicou)
else:
input("Atenção: o inventário está vazio!")
def help(self):
ajuda = "http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/256/Actions-help-hint-icon.png"
flag = None
def clicou(_):
# caso aconteça flag
input("Você precisa ir na sala à leste do atendimento.")
if not "ajuda" in INVENTARIO.inventario:
input("Você quer saber sobre o meu relátorio sobre a gripe? Ele na escrivaninha na sala lesta à recepção.")
INVENTARIO.bota("ajuda", ajuda, clicou)
else:
input("Achou o relatorio? Procurou na sala certa?")
"""
def objetivo(self):
ajuda = "http://www.iconsdownload.net/icons/256/11335-target-icon.png"
flag = None
def clicou(_):
input("Objetivo do programa: Você é um repórter e precisa achar o relatório com o resumo de todas as matérias que você vai conquistar nos diversos lugares do labirinto.")
"""
INSTANCIA = None
def leao():
def cria_leao():
global INSTANCIA
INSTANCIA = Leao()
if not INSTANCIA:
cria_leao()
return INSTANCIA
if __name__ == "__main__":
change_bg = "Para qual cor você quer mudar o plano de fundo? azul/branco"
escolha = input(change_bg)
if escolha == "azul":
background = "blue"
lab = leao()
print(INSTANCIA)
INVENTARIO.inicia()
lab.vai()
# lab.centro.norte.vai()
# lab.sul.oeste.meio = metro.centro.norte
|
gpl-3.0
| -6,861,895,350,810,168,000
| 39.288889
| 182
| 0.633388
| false
| 2.627536
| false
| false
| false
|
levilucio/SyVOLT
|
GM2AUTOSAR_MM/transformation/HMapPartition.py
|
1
|
3685
|
from core.himesis import Himesis
import uuid
class HMapPartition(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule MapPartition.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMapPartition, self).__init__(name='HMapPartition', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """MapPartition"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MapPartition')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Partition() node
self.add_node()
self.vs[3]["mm__"] = """Partition"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Partition()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class PhysicalNode() node
self.add_node()
self.vs[5]["mm__"] = """PhysicalNode"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class PhysicalNode()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class Module() node
self.add_node()
self.vs[7]["mm__"] = """Module"""
self.vs[7]["attr1"] = """1"""
# match_contains node for class Module()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# apply class SwcToEcuMapping() node
self.add_node()
self.vs[9]["mm__"] = """SwcToEcuMapping"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class SwcToEcuMapping()
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[11]["attr1"] = """partition"""
self.vs[11]["mm__"] = """directLink_S"""
# match association Partition--module-->Module node
self.add_node()
self.vs[12]["attr1"] = """module"""
self.vs[12]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Partition()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class PhysicalNode()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class Module()
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class SwcToEcuMapping()
(5,11), # match_class PhysicalNode() -> association partition
(11,3), # association partition -> match_class Partition()
(3,12), # match_class Partition() -> association module
(12,7), # association module -> match_class Module()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((9,'shortName'),('concat',(('constant','Swc2EcuMapping_'),(3,'name')))), ]
|
mit
| 4,636,470,346,832,838,000
| 32.5
| 104
| 0.493623
| false
| 3.962366
| false
| false
| false
|
benpetty/Code-Katas
|
katas/sort_cards/sort_cards.py
|
1
|
1347
|
"""Sort Cards.
https://www.codewars.com/kata/56f399b59821793533000683
Write a function sort_cards() that sorts a shuffled list of cards,
so that any given list of cards is sorted by rank,
no matter the starting collection.
All cards in the list are represented as strings,
so that sorted list of cards looks like this:
['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
Example:
>>> sort_cards(
['3', '9', 'A', '5', 'T', '8', '2', '4', 'Q', '7', 'J', '6', 'K']
['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
Hint: Tests will have many occurrences of same rank cards,
as well as vary in length. You can assume though,
that input list is always going to have at least 1 element.
"""
def sort_cards(cards):
"""Input a list of strings representing cards and return them sorted."""
rank = {
"A": 0,
"2": 1,
"3": 2,
"4": 3,
"5": 4,
"6": 5,
"7": 6,
"8": 7,
"9": 8,
"T": 9,
"J": 10,
"Q": 11,
"K": 12,
}
ranked = []
for card in cards:
card = str(card).upper()
if card in rank:
card = (rank[card], card)
ranked.append(card)
ranked = sorted(ranked)
result = []
for card in ranked:
result.append(card[1])
return result
|
mit
| 3,330,309,443,274,831,400
| 23.944444
| 76
| 0.513734
| false
| 2.986696
| false
| false
| false
|
karlnapf/kameleon-mcmc
|
kameleon_mcmc/tools/Visualise.py
|
1
|
5656
|
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from kameleon_mcmc.distribution.Gaussian import Gaussian
from matplotlib.patches import Ellipse
from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca
from numpy import linspace
from numpy.linalg.linalg import eigh
from numpy import zeros, array, exp, arctan2, sqrt
import numpy
class Visualise(object):
def __init__(self):
pass
@staticmethod
def get_plotting_arrays(distribution):
bounds = distribution.get_plotting_bounds()
assert(len(bounds) == 2)
Xs = linspace(bounds[0][0], bounds[0][1])
Ys = linspace(bounds[1][0], bounds[1][1])
return Xs, Ys
@staticmethod
def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None):
"""
Plots the density of a given Distribution instance and plots some
samples on top.
"""
if Xs is None or Ys is None:
Xs, Ys = Visualise.get_plotting_arrays(distribution)
Visualise.plot_density(distribution, Xs, Ys)
if Z is not None:
hold(True)
Visualise.plot_data(Z)
hold(False)
@staticmethod
def plot_density(distribution, Xs, Ys, log_domain=False):
"""
Plots a 2D density
density - density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
assert(distribution.dimension == 2)
D = zeros((len(Xs), len(Ys)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False):
"""
Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only
density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
if isinstance(distribution, Gaussian) and log_domain == False:
gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution))
gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \
markersize=3.0, markeredgewidth=.1)
return
assert(distribution.dimension == 2)
if Xs is None:
(xmin, xmax), _ = distribution.get_plotting_bounds()
Xs = linspace(xmin, xmax)
if Ys is None:
_, (ymin, ymax) = distribution.get_plotting_bounds()
Ys = linspace(ymin, ymax)
D = zeros((len(Ys), len(Xs)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
contour(Xs, Ys, D, origin='lower')
@staticmethod
def plot_array(Xs, Ys, D):
"""
Plots a 2D array
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
D - array to plot
"""
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def plot_data(Z, y=None):
"""
Plots collection of 2D points and optionally adds a marker to one of them
Z - set of row-vectors points to plot
y - one point that is marked in red, might be None
"""
plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1)
if y is not None:
plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1)
@staticmethod
def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1):
"""
Returns an allipse artist for nstd times the standard deviation of this
Gaussian
"""
assert(isinstance(gaussian, Gaussian))
assert(gaussian.dimension == 2)
# compute eigenvalues (ordered)
vals, vecs = eigh(gaussian.L.dot(gaussian.L.T))
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \
edgecolor="red", fill=False, linewidth=linewidth)
return e
|
bsd-2-clause
| 31,964,555,859,623,480
| 33.072289
| 90
| 0.563296
| false
| 3.793427
| false
| false
| false
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/add_newdocs.py
|
1
|
44163
|
from lib import add_newdoc
add_newdoc('numpy.core','dtype',
[('fields', "Fields of the data-type or None if no fields"),
('names', "Names of fields or None if no fields"),
('alignment', "Needed alignment for this data-type"),
('byteorder',
"Little-endian (<), big-endian (>), native (=), or "\
"not-applicable (|)"),
('char', "Letter typecode for this data-type"),
('type', "Type object associated with this data-type"),
('kind', "Character giving type-family of this data-type"),
('itemsize', "Size of each item"),
('hasobject', "Non-zero if Python objects are in "\
"this data-type"),
('num', "Internally-used number for builtin base"),
('newbyteorder',
"""self.newbyteorder(<endian>)
returns a copy of the dtype object with altered byteorders.
If <endian> is not given all byteorders are swapped.
Otherwise endian can be '>', '<', or '=' to force a particular
byteorder. Data-types in all fields are also updated in the
new dtype object.
"""),
("__reduce__", "self.__reduce__() for pickling"),
("__setstate__", "self.__setstate__() for pickling"),
("subdtype", "A tuple of (descr, shape) or None"),
("descr", "The array_interface data-type descriptor."),
("str", "The array interface typestring."),
("name", "The name of the true data-type"),
("base", "The base data-type or self if no subdtype"),
("shape", "The shape of the subdtype or (1,)"),
("isbuiltin", "Is this a built-in data-type?"),
("isnative", "Is the byte-order of this data-type native?")
]
)
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
# attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""documentation needed
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""An N-d tuple of current coordinates.
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""documentation needed
"""))
# functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""copy() Get a copy of the iterator as a 1-d array
"""))
###############################################################################
#
# broadcast
#
###############################################################################
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""current index in broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""tuple of individual iterators
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""number of dimensions of broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""number of iterators
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""shape of broadcasted result
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""total size of broadcasted result
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray','array',
"""array(object, dtype=None, copy=1,order=None, subok=0,ndmin=0)
Return an array from object with the specified date-type.
Inputs:
object - an array, any object exposing the array interface, any
object whose __array__ method returns an array, or any
(nested) sequence.
dtype - The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy - If true, then force a copy. Otherwise a copy will only occur
if __array__ returns a copy, obj is a nested sequence, or
a copy is needed to satisfy any of the other requirements
order - Specify the order of the array. If order is 'C', then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'FORTRAN', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is None, then the returned array may
be in either C-, or Fortran-contiguous order or even
discontiguous.
subok - If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array
ndmin - Specifies the minimum number of dimensions that the resulting
array should have. 1's will be pre-pended to the shape as
needed to meet this requirement.
""")
add_newdoc('numpy.core.multiarray','empty',
"""empty((d1,...,dn),dtype=float,order='C')
Return a new array of shape (d1,...,dn) and given type with all its
entries uninitialized. This can be faster than zeros.
""")
add_newdoc('numpy.core.multiarray','scalar',
"""scalar(dtype,obj)
Return a new scalar array of the given type initialized with
obj. Mainly for pickle support. The dtype must be a valid data-type
descriptor. If dtype corresponds to an OBJECT descriptor, then obj
can be any object, otherwise obj must be a string. If obj is not given
it will be interpreted as None for object type and zeros for all other
types.
""")
add_newdoc('numpy.core.multiarray','zeros',
"""zeros((d1,...,dn),dtype=float,order='C')
Return a new array of shape (d1,...,dn) and type typecode with all
it's entries initialized to zero.
""")
add_newdoc('numpy.core.multiarray','set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray','fromstring',
"""fromstring(string, dtype=float, count=-1, sep='')
Return a new 1d array initialized from the raw binary data in string.
If count is positive, the new array will have count elements, otherwise its
size is determined by the size of string. If sep is not empty then the
string is interpreted in ASCII mode and converted to the desired number type
using sep as the separator between elements (extra whitespace is ignored).
""")
add_newdoc('numpy.core.multiarray','fromiter',
"""fromiter(iterable, dtype, count=-1)
Return a new 1d array initialized from iterable. If count is
nonegative, the new array will have count elements, otherwise it's
size is determined by the generator.
""")
add_newdoc('numpy.core.multiarray','fromfile',
"""fromfile(file=, dtype=float, count=-1, sep='') -> array.
Required arguments:
file -- open file object or string containing file name.
Keyword arguments:
dtype -- type and order of the returned array (default float)
count -- number of items to input (default all)
sep -- separater between items if file is a text file (default "")
Return an array of the given data type from a text or binary file. The
'file' argument can be an open file or a string with the name of a file to
read from. If 'count' == -1 the entire file is read, otherwise count is the
number of items of the given type to read in. If 'sep' is "" it means to
read binary data from the file using the specified dtype, otherwise it gives
the separator between elements in a text file. The 'dtype' value is also
used to determine the size and order of the items in binary files.
Data written using the tofile() method can be conveniently recovered using
this function.
WARNING: This function should be used sparingly as the binary files are not
platform independent. In particular, they contain no endianess or datatype
information. Nevertheless it can be useful for reading in simply formatted
or binary data quickly.
""")
add_newdoc('numpy.core.multiarray','frombuffer',
"""frombuffer(buffer=, dtype=float, count=-1, offset=0)
Returns a 1-d array of data type dtype from buffer. The buffer
argument must be an object that exposes the buffer interface. If
count is -1 then the entire buffer is used, otherwise, count is the
size of the output. If offset is given then jump that far into the
buffer. If the buffer has data that is out not in machine byte-order,
than use a propert data type descriptor. The data will not be
byteswapped, but the array will manage it in future operations.
""")
add_newdoc('numpy.core.multiarray','concatenate',
"""concatenate((a1, a2, ...), axis=0)
Join arrays together.
The tuple of sequences (a1, a2, ...) are joined along the given axis
(default is the first one) into a single numpy array.
Example:
>>> concatenate( ([0,1,2], [5,6,7]) )
array([0, 1, 2, 5, 6, 7])
""")
add_newdoc('numpy.core.multiarray','inner',
"""inner(a,b)
Returns the dot product of two arrays, which has shape a.shape[:-1] +
b.shape[:-1] with elements computed by the product of the elements
from the last dimensions of a and b.
""")
add_newdoc('numpy.core','fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray','correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray','arange',
"""arange([start,] stop[, step,], dtype=None)
For integer arguments, just like range() except it returns an array
whose type can be specified by the keyword argument dtype. If dtype
is not specified, the type of the result is deduced from the type of
the arguments.
For floating point arguments, the length of the result is ceil((stop -
start)/step). This rule may result in the last element of the result
being greater than stop.
""")
add_newdoc('numpy.core.multiarray','_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray','_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray','set_string_function',
"""set_string_function(f, repr=1)
Set the python function f to be the function used to obtain a pretty
printable string version of an array whenever an array is printed.
f(M) should expect an array argument M, and should return a string
consisting of the desired representation of M for printing.
""")
add_newdoc('numpy.core.multiarray','set_numeric_ops',
"""set_numeric_ops(op=func, ...)
Set some or all of the number methods for all array objects. Do not
forget **dict can be used as the argument list. Return the functions
that were replaced, which can be stored and set later.
""")
add_newdoc('numpy.core.multiarray','where',
"""where(condition, x, y) or where(condition)
Return elements from `x` or `y`, depending on `condition`.
*Parameters*:
condition : array of bool
When True, yield x, otherwise yield y.
x,y : 1-dimensional arrays
Values from which to choose.
*Notes*
This is equivalent to
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
The result is shaped like `condition` and has elements of `x`
or `y` where `condition` is respectively True or False.
In the special case, where only `condition` is given, the
tuple condition.nonzero() is returned, instead.
*Examples*
>>> where([True,False,True],[1,2,3],[4,5,6])
array([1, 5, 3])
""")
add_newdoc('numpy.core.multiarray','lexsort',
"""lexsort(keys=, axis=-1) -> array of indices. Argsort with list of keys.
Perform an indirect sort using a list of keys. The first key is sorted,
then the second, and so on through the list of keys. At each step the
previous order is preserved when equal keys are encountered. The result is
a sort on multiple keys. If the keys represented columns of a spreadsheet,
for example, this would sort using multiple columns (the last key being
used for the primary sort order, the second-to-last key for the secondary
sort order, and so on). The keys argument must be a sequence of things
that can be converted to arrays of the same shape.
Parameters:
a : array type
Array containing values that the returned indices should sort.
axis : integer
Axis to be indirectly sorted. None indicates that the flattened
array should be used. Default is -1.
Returns:
indices : integer array
Array of indices that sort the keys along the specified axis. The
array has the same shape as the keys.
SeeAlso:
argsort : indirect sort
sort : inplace sort
""")
add_newdoc('numpy.core.multiarray','can_cast',
"""can_cast(from=d1, to=d2)
Returns True if data type d1 can be cast to data type d2 without
losing precision.
""")
add_newdoc('numpy.core.multiarray','newbuffer',
"""newbuffer(size)
Return a new uninitialized buffer object of size bytes
""")
add_newdoc('numpy.core.multiarray','getbuffer',
"""getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset. Default is the entire buffer. A
read-write buffer is attempted followed by a read-only buffer.
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type-descriptor object
details the data-type in an array (including byteorder and any
fields). An array can be constructed using the numpy.array
command. Arrays are sequence, mapping and numeric objects.
More information is available in the numpy module and by looking
at the methods and attributes of an array.
ndarray.__new__(subtype, shape=, dtype=float, buffer=None,
offset=0, strides=None, order=None)
There are two modes of creating an array using __new__:
1) If buffer is None, then only shape, dtype, and order
are used
2) If buffer is an object exporting the buffer interface, then
all keywords are interpreted.
The dtype parameter can be any object that can be interpreted
as a numpy.dtype object.
No __init__ method is needed because the array is fully
initialized after the __new__ method.
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""Base object if memory is from some other object.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""A ctypes interface object.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Buffer object pointing to the start of the data.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""Data-type for the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""Imaginary part of the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""Length of one element in bytes.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""Special object providing array flags.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""A 1-d flat iterator.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""Number of bytes in the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""Number of array dimensions.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""Real part of the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""Tuple of array dimensions.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""Number of elements in the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""Tuple of bytes to step in each dimension.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""Same as self.transpose() except self is returned for self.ndim < 2.
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as a from ndarray obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__(|order) -> copy, possibly with different order.
Return a copy of the array.
Argument:
order -- Order of returned copy (default 'C')
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if m is already in fortran order.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, typecode, isfortran, rawdata)
For unpickling.
Arguments:
version -- optional pickle version. If omitted defaults to 0.
shape -- a tuple giving the shape
typecode -- a typecode
isFortran -- a bool stating if Fortran or no
rawdata -- a binary string with the data (or a list if Object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
""" a.all(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
""" a.any(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
""" a.argmax(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
""" a.argmin(axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""a.argsort(axis=-1, kind='quicksort', order=None) -> indices
Perform an indirect sort along the given axis using the algorithm specified
by the kind keyword. It returns an array of indices of the same shape as
'a' that index data along the given axis in sorted order.
:Parameters:
axis : integer
Axis to be indirectly sorted. None indicates that the flattened
array should be used. Default is -1.
kind : string
Sorting algorithm to use. Possible values are 'quicksort',
'mergesort', or 'heapsort'. Default is 'quicksort'.
order : list type or None
When a is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
:Returns:
indices : integer array
Array of indices that sort 'a' along the specified axis.
:SeeAlso:
- lexsort : indirect stable sort with multiple keys
- sort : inplace sort
:Notes:
------
The various sorts are characterized by average speed, worst case
performance, need for work space, and whether they are stable. A stable
sort keeps items with the same key in the same relative order. The three
available algorithms have the following properties:
|------------------------------------------------------|
| kind | speed | worst case | work space | stable|
|------------------------------------------------------|
|'quicksort'| 1 | O(n^2) | 0 | no |
|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes |
|'heapsort' | 3 | O(n*log(n)) | 0 | no |
|------------------------------------------------------|
All the sort algorithms make temporary copies of the data when the sort is not
along the last axis. Consequently, sorts along the last axis are faster and use
less space than sorts along other axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""a.astype(t) -> Copy of array cast to type t.
Cast array m to type t. t can be either a string representing a typecode,
or a python type object of type int, float, or complex.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""a.byteswap(False) -> View or copy. Swap the bytes in the array.
Swap the bytes in the array. Return the byteswapped array. If the first
argument is True, byteswap in-place and return a reference to self.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
""" a.choose(b0, b1, ..., bn, out=None, mode='raise')
Return an array that merges the b_i arrays together using 'a' as
the index The b_i arrays and 'a' must all be broadcastable to the
same shape. The output at a particular position is the input
array b_i at that position depending on the value of 'a' at that
position. Therefore, 'a' must be an integer array with entries
from 0 to n+1.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""a.clip(min=, max=, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""a.compress(condition=, axis=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""a.conj()
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""a.conjugate()
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""a.copy(|order) -> copy, possibly with different order.
Return a copy of the array.
Argument:
order -- Order of returned copy (default 'C')
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if m is already in fortran order.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""a.cumprod(axis=None, dtype=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""a.cumsum(axis=None, dtype=None, out=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals
If a is 2-d, return the diagonal of self with the given offset, i.e., the
collection of elements of the form a[i,i+offset]. If a is n-d with n > 2,
then the axes specified by axis1 and axis2 are used to determine the 2-d
subarray whose diagonal is returned. The shape of the resulting array can
be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
:Parameters:
offset : integer
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal.
axis1 : integer
Axis to be used as the first axis of the 2-d subarrays from which
the diagonals should be taken. Defaults to first index.
axis2 : integer
Axis to be used as the second axis of the 2-d subarrays from which
the diagonals should be taken. Defaults to second index.
:Returns:
array_of_diagonals : same type as original array
If a is 2-d, then a 1-d array containing the diagonal is returned.
If a is n-d, n > 2, then an array of diagonals is returned.
:SeeAlso:
- diag : matlab workalike for 1-d and 2-d arrays.
- diagflat : creates diagonal arrays
- trace : sum along diagonals
Examples
--------
>>> a = arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file) Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load
Arguments:
file -- string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""a.dumps() returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""a.fill(value) -> None. Fill the array with the scalar value.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""a.flatten([fortran]) return a 1-d array (always copy)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""a.getfield(dtype, offset) -> field of array as given type.
Returns a field of the given array as a certain type. A field is a view of
the array data with each itemsize determined by the given type and the
offset into the current array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""a.item() -> copy of first array item as Python scalar.
Copy the first element of array to a standard Python scalar and return
it. The array must be of size one.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""a.max(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""a.mean(axis=None, dtype=None, out=None) -> mean
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the means are computed. The default is
to compute the standard deviation of the flattened array.
dtype : type
Type to use in computing the means. For arrays of
integer type the default is float32, for arrays of float types it
is the same as the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
mean : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- var : variance
- std : standard deviation
Notes
-----
The mean is the sum of the elements along the axis divided by the
number of elements.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""a.min(axis=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""a.newbyteorder(<byteorder>) is equivalent to
a.view(a.dtype.newbytorder(<byteorder>))
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""a.nonzero() returns a tuple of arrays
Returns a tuple of arrays, one for each dimension of a,
containing the indices of the non-zero elements in that
dimension. The corresponding non-zero values can be obtained
with
a[a.nonzero()].
To group the indices by element, rather than dimension, use
transpose(a.nonzero())
instead. The result of this is always a 2d array, with a row for
each non-zero element.;
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""a.prod(axis=None, dtype=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""a.ptp(axis=None) a.max(axis)-a.min(axis)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""a.put(indices, values, mode) sets a.flat[n] = values[n] for
each n in indices. If values is shorter than indices then it
will repeat.
"""))
add_newdoc('numpy.core.multiarray', 'putmask',
"""putmask(a, mask, values) sets a.flat[n] = values[n] for each n where
mask.flat[n] is true. If values is not the same size of a and mask then
it will repeat. This gives different behavior than a[mask] = values.
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""a.ravel([fortran]) return a 1-d array (copy only if needed)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""a.repeat(repeats=, axis=none)
copy elements of a, repeats times. the repeats argument must be a sequence
of length a.shape[axis] or a scalar.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""a.reshape(d1, d2, ..., dn, order='c')
Return a new array from this one. The new array must have the same number
of elements as self. Also always returns a view or raises a ValueError if
that is impossible.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""a.resize(new_shape, refcheck=True, order=False) -> None. Change array shape.
Change size and shape of self inplace. Array must own its own memory and
not be referenced by other arrays. Returns None.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""a.round(decimals=0, out=None) -> out (a). Rounds to 'decimals' places.
Keyword arguments:
decimals -- number of decimals to round to (default 0). May be negative.
out -- existing array to use for output (default a).
Return:
Reference to out, where None specifies the original array a.
Round to the specified number of decimals. When 'decimals' is negative it
specifies the number of positions to the left of the decimal point. The
real and imaginary parts of complex numbers are rounded separately. Nothing
is done if the array is not of float type and 'decimals' is >= 0.
The keyword 'out' may be used to specify a different array to hold the
result rather than the default 'a'. If the type of the array specified by
'out' differs from that of 'a', the result is cast to the new type,
otherwise the original type is kept. Floats round to floats by default.
Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to
0.0, etc. Results may also be surprising due to the inexact representation
of decimal fractions in IEEE floating point and the errors introduced in
scaling the numbers when 'decimals' is something other than 0.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""a.searchsorted(v, side='left') -> index array.
Find the indices into a sorted array such that if the corresponding keys in
v were inserted before the indices the order of a would be preserved. If
side='left', then the first such index is returned. If side='right', then
the last such index is returned. If there is no such index because the key
is out of bounds, then the length of a is returned, i.e., the key would
need to be appended. The returned index array has the same shape as v.
:Parameters:
v : array or list type
Array of keys to be searched for in a.
side : string
Possible values are : 'left', 'right'. Default is 'left'. Return
the first or last index where the key could be inserted.
:Returns:
indices : integer array
The returned array has the same shape as v.
:SeeAlso:
- sort
- histogram
:Notes:
-------
The array a must be 1-d and is assumed to be sorted in ascending order.
Searchsorted uses binary search to find the required insertion points.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""m.setfield(value, dtype, offset) -> None.
places val into field of the given array defined by the data type and offset.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""a.setflags(write=None, align=None, uic=None)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""a.sort(axis=-1, kind='quicksort', order=None) -> None.
Perform an inplace sort along the given axis using the algorithm specified
by the kind keyword.
:Parameters:
axis : integer
Axis to be sorted along. None indicates that the flattened array
should be used. Default is -1.
kind : string
Sorting algorithm to use. Possible values are 'quicksort',
'mergesort', or 'heapsort'. Default is 'quicksort'.
order : list type or None
When a is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
:Returns:
None
:SeeAlso:
- argsort : indirect sort
- lexsort : indirect stable sort on multiple keys
- searchsorted : find keys in sorted array
:Notes:
------
The various sorts are characterized by average speed, worst case
performance, need for work space, and whether they are stable. A stable
sort keeps items with the same key in the same relative order. The three
available algorithms have the following properties:
|------------------------------------------------------|
| kind | speed | worst case | work space | stable|
|------------------------------------------------------|
|'quicksort'| 1 | O(n^2) | 0 | no |
|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes |
|'heapsort' | 3 | O(n*log(n)) | 0 | no |
|------------------------------------------------------|
All the sort algorithms make temporary copies of the data when the sort is not
along the last axis. Consequently, sorts along the last axis are faster and use
less space than sorts along other axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""m.squeeze() eliminate all length-1 dimensions
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""a.std(axis=None, dtype=None, out=None) -> standard deviation.
Returns the standard deviation of the array elements, a measure of the
spread of a distribution. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : type
Type to use in computing the standard deviation. For arrays of
integer type the default is float32, for arrays of float types it
is the same as the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
standard deviation : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- var : variance
- mean : average
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The
computed standard deviation is biased, i.e., the mean is computed by
dividing by the number of elements, N, rather than by N-1.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""a.sum(axis=None, dtype=None) -> Sum of array over given axis.
Sum the array over the given axis. If the axis is None, sum over
all dimensions of the array.
The optional dtype argument is the data type for the returned
value and intermediate calculations. The default is to upcast
(promote) smaller integer types to the platform-dependent int.
For example, on 32-bit platforms:
a.dtype default sum dtype
---------------------------------------------------
bool, int8, int16, int32 int32
Warning: The arithmetic is modular and no error is raised on overflow.
Examples:
>>> array([0.5, 1.5]).sum()
2.0
>>> array([0.5, 1.5]).sum(dtype=int32)
1
>>> array([[0, 1], [0, 5]]).sum(axis=0)
array([0, 6])
>>> array([[0, 1], [0, 5]]).sum(axis=1)
array([1, 5])
>>> ones(128, dtype=int8).sum(dtype=int8) # overflow!
-128
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""a.swapaxes(axis1, axis2) -> new view with axes swapped.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""a.take(indices, axis=None, out=None, mode='raise') -> new array.
The new array is formed from the elements of a indexed by indices along the
given axis.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""a.tofile(fid, sep="", format="%s") -> None. Write the data to a file.
Required arguments:
file -- an open file object or a string containing a filename
Keyword arguments:
sep -- separator for text output. Write binary if empty (default "")
format -- format string for text file output (default "%s")
A convenience function for quick storage of array data. Information on
endianess and precision is lost, so this method is not a good choice for
files intended to archive data or transport data between machines with
different endianess. Some of these problems can be overcome by outputting
the data as text files at the expense of speed and file size.
If 'sep' is empty this method is equivalent to file.write(a.tostring()). If
'sep' is not empty each data item is converted to the nearest Python type
and formatted using "format"%item. The resulting strings are written to the
file separated by the contents of 'sep'. The data is always written in "C"
(row major) order independent of the order of 'a'.
The data produced by this method can be recovered by using the function
fromfile().
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""a.tolist() -> Array as hierarchical list.
Copy the data portion of the array to a hierarchical python list and return
that list. Data items are converted to the nearest compatible Python type.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring',
"""a.tostring(order='C') -> raw copy of array data as a Python string.
Keyword arguments:
order -- order of the data item in the copy {"C","F","A"} (default "C")
Construct a Python string containing the raw bytes in the array. The order
of the data in arrays with ndim > 1 is specified by the 'order' keyword and
this keyword overrides the order of the array. The
choices are:
"C" -- C order (row major)
"Fortran" -- Fortran order (column major)
"Any" -- Current order of array.
None -- Same as "Any"
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
return the sum along the offset diagonal of the array's indicated
axis1 and axis2.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""a.transpose(*axes)
Returns a view of 'a' with axes transposed. If no axes are given,
or None is passed, switches the order of the axes. For a 2-d
array, this is the usual matrix transpose. If axes are given,
they describe how the axes are permuted.
Example:
>>> a = array([[1,2],[3,4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1,0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1,0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""a.var(axis=None, dtype=None, out=None) -> variance
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
:Parameters:
axis : integer
Axis along which the variance is computed. The default is to
compute the variance of the flattened array.
dtype : type
Type to use in computing the variance. For arrays of integer type
the default is float32, for arrays of float types it is the same as
the array type.
out : ndarray
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
:Returns:
variance : The return type varies, see above.
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
:SeeAlso:
- std : standard deviation
- mean: average
Notes
-----
The variance is the average of the squared deviations from the mean, i.e.
var = mean((x - x.mean())**2). The computed variance is biased, i.e.,
the mean is computed by dividing by the number of elements, N, rather
than by N-1.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""a.view(<type>) -> new view of array with same data.
Type can be either a new sub-type object or a data-descriptor object
"""))
|
bsd-3-clause
| -6,906,141,212,455,126,000
| 29.797071
| 83
| 0.613206
| false
| 4.158475
| false
| false
| false
|
italomaia/turtle-linux
|
games/BubbleKing/lib/menu.py
|
1
|
13774
|
import os
import pygame
from pygame.locals import *
from pgu import engine
import data
from cnst import *
import levels
class Menu(engine.State):
def __init__(self,game):
self.game = game
def init(self):
self.font = self.game.font
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.cur = 0
self.game.lcur = 0
self.levels = []
#for fname in os.listdir(data.filepath('levels')):
#if fname[0]=='.': continue
#self.levels.append((fname,fname.replace('.tga','')))
#self.levels.sort()
for fname,title in levels.LEVELS:
self.levels.append((fname,title))
self.items = [
('play the game!','start'),
('select <L>','play'),
('help','help'),
('credits','credits'),
('quit','quit'),
]
self.rects = []
self.frame = 0
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
x,y = 0,4
fnt = self.game.fonts['title']
c =(0,0,0)
text = TITLE
img = fnt.render(text,1,c)
screen.blit(img,((SW-img.get_width())/2,y))
y += 48
fnt = self.font
text = 'high: %05d'%self.game.high
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
x = 90
for n in xrange(0,len(self.items)):
text,value = self.items[n]
text = text.replace('L',self.levels[self.game.lcur][1])
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
if n == self.cur: c = (0xaa,0xaa,0xaa)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 24
text = 'www.imitationpickles.org'
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
y = SH-(img.get_height()+4)
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
def update(self,screen):
return self.paint(screen)
def loop(self):
self.game.music_play('title')
self.frame += 1
def event(self,e):
if e.type is USEREVENT and e.action == 'down':
self.cur = (self.cur+1)%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'up':
self.cur = (self.cur-1+len(self.items))%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'left':
self.game.lcur = (self.game.lcur-1+len(self.levels))%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'right':
self.game.lcur = (self.game.lcur+1)%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'exit':
return engine.Quit(self.game)
elif e.type is USEREVENT and (e.action == 'menu' or e.action == 'jump'):
text,value = self.items[self.cur]
if value == 'start':
self.game.init_play()
self.game.lcur = 0
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'play':
self.game.init_play()
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'quit':
return engine.Quit(self.game)
elif value == 'credits':
return Transition(self.game,Credits(self.game,self))
elif value == 'help':
return Transition(self.game,Help(self.game,self))
class Transition(engine.State):
def __init__(self,game,next):
self.game,self.next = game,next
def init(self):
self.s1 = self.game.screen.convert()
self.init2()
self.frame = 0
self.total = FPS
self.inc = 0
def init2(self):
if hasattr(self.next,'init') and not hasattr(self.next,'_init'):
self.next._init = 0
self.next.init()
self.s2 = self.game.screen.convert()
self.next.paint(self.s2)
def loop(self):
#self.frame += 1
self.inc += 1
#if (self.inc%2) == 0: self.frame += 1
self.frame += 1
if self.frame == self.total:
self.game.screen.blit(self.s2,(0,0))
self.game.flip()
return self.next
def update(self,screen):
return self.paint(screen)
def paint(self,screen):
f = self.frame
t = self.total
t2 = t/2
if f < t2:
i = self.s1
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
else:
f = t2-(f-t2)
i = self.s2
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
i = pygame.transform.scale(i,(SW,SH))
screen.blit(i,(0,0))
self.game.flip()
class Intro(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = FPS
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
self.black = self.moon.convert()
self.black.fill((0,0,0))
def update(self,screen):
return self.paint(screen)
def loop(self):
self.frame += 1
if self.frame == FPS*7:
return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
screen.fill((0,0,0))
f = self.frame
inc = FPS
if 0 < f < inc:
pass
f -= inc
inc = FPS*7
if 0 < f < inc:
a = 255
if f > FPS*2:
screen.blit(self.moon,(0,0))
a = 255- ((f-FPS*2)*255/(FPS*2))
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['... July 20, 1969','man first','walked on','the moon.']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
if f < FPS:
a = 255-(f*255/FPS)
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
self.game.flip()
class Intro2(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
img = pygame.image.load(data.filepath(os.path.join('images','player','right.png')))
w = 160
self.player = pygame.transform.scale(img,(w,img.get_height()*w/img.get_width()))
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.frame = 0
def loop(self):
self.frame += 1
if self.frame == FPS*2:
return Transition(self.game,self.next)
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
#screen.fill((0,0,0))
screen.blit(self.bkgr,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['This is','the year','of the','seahorse!']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
screen.blit(self.player,(130,0))
self.game.flip()
class Prompt(engine.State):
def __init__(self,game,text,yes,no):
self.game = game
self.text = text
self.yes = yes
self.no = no
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN and e.key == K_y:
return self.yes
if e.type is KEYDOWN and e.key == K_n:
return self.no
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Pause(engine.State):
def __init__(self,game,text,next):
self.game = game
self.text = text
self.next = next
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return self.next
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Credits(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Core Team',
'',
'philhassey - director, code, levels',
'trick - tiles, sprites',
'pekuja - code, levels',
'tim - music, levels',
'DrPetter - backgrounds, sfx',
'',
'Also thanks to:',
'fydo (level), Lerc (gfx), Tee (level)',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
class Help(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Help',
'',
'Use your arrow keys to',
'move the seahorse.',
'Button 1 - Jump',
'Button 2 - Shoot',
'',
'Enemies take 3 shots unless',
'you are powered up! You can',
'ride enemy bubbles.',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
|
gpl-3.0
| 2,693,359,161,638,019,000
| 29.074236
| 102
| 0.483665
| false
| 3.370198
| false
| false
| false
|
mathiasertl/fabric
|
fabric/context_managers.py
|
1
|
20926
|
"""
Context managers for use with the ``with`` statement.
.. note:: If you are using multiple directly nested ``with`` statements, it can
be convenient to use multiple context expressions in one single with
statement. Instead of writing::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
you can write::
with cd('/path/to/app'), prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
"""
from contextlib import contextmanager
import six
import socket
import select
from fabric.thread_handling import ThreadHandler
from fabric.state import output, win32, connections, env
from fabric import state
from fabric.utils import isatty
if six.PY2 is True:
from contextlib import nested
else:
from contextlib import ExitStack
class nested(ExitStack):
def __init__(self, *managers):
super(nested, self).__init__()
for manager in managers:
self.enter_context(manager)
if not win32:
import termios
import tty
def _set_output(groups, which):
"""
Refactored subroutine used by ``hide`` and ``show``.
"""
previous = {}
try:
# Preserve original values, pull in new given value to use
for group in output.expand_aliases(groups):
previous[group] = output[group]
output[group] = which
# Yield control
yield
finally:
# Restore original values
output.update(previous)
def documented_contextmanager(func):
wrapper = contextmanager(func)
wrapper.undecorated = func
return wrapper
@documented_contextmanager
def show(*groups):
"""
Context manager for setting the given output ``groups`` to True.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to True for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to turn on debug output (which is typically off by default)::
def my_task():
with show('debug'):
run('ls /var/www')
As almost all output groups are displayed by default, `show` is most useful
for turning on the normally-hidden ``debug`` group, or when you know or
suspect that code calling your own code is trying to hide output with
`hide`.
"""
return _set_output(groups, True)
@documented_contextmanager
def hide(*groups):
"""
Context manager for setting the given output ``groups`` to False.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to False for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to hide the "[hostname] run:" status lines, as well as
preventing printout of stdout and stderr, one might use `hide` as follows::
def my_task():
with hide('running', 'stdout', 'stderr'):
run('ls /var/www')
"""
return _set_output(groups, False)
@documented_contextmanager
def _setenv(variables):
"""
Context manager temporarily overriding ``env`` with given key/value pairs.
A callable that returns a dict can also be passed. This is necessary when
new values are being calculated from current values, in order to ensure that
the "current" value is current at the time that the context is entered, not
when the context manager is initialized. (See Issue #736.)
This context manager is used internally by `settings` and is not intended
to be used directly.
"""
if callable(variables):
variables = variables()
clean_revert = variables.pop('clean_revert', False)
previous = {}
new = []
for key, value in six.iteritems(variables):
if key in state.env:
previous[key] = state.env[key]
else:
new.append(key)
state.env[key] = value
try:
yield
finally:
if clean_revert:
for key, value in six.iteritems(variables):
# If the current env value for this key still matches the
# value we set it to beforehand, we are OK to revert it to the
# pre-block value.
if key in state.env and value == state.env[key]:
if key in previous:
state.env[key] = previous[key]
else:
del state.env[key]
else:
state.env.update(previous)
for key in new:
del state.env[key]
def settings(*args, **kwargs):
"""
Nest context managers and/or override ``env`` variables.
`settings` serves two purposes:
* Most usefully, it allows temporary overriding/updating of ``env`` with
any provided keyword arguments, e.g. ``with settings(user='foo'):``.
Original values, if any, will be restored once the ``with`` block closes.
* The keyword argument ``clean_revert`` has special meaning for
``settings`` itself (see below) and will be stripped out before
execution.
* In addition, it will use `contextlib.nested`_ to nest any given
non-keyword arguments, which should be other context managers, e.g.
``with settings(hide('stderr'), show('stdout')):``.
.. _contextlib.nested: http://docs.python.org/library/contextlib.html#contextlib.nested
These behaviors may be specified at the same time if desired. An example
will hopefully illustrate why this is considered useful::
def my_task():
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
if run('ls /etc/lsb-release'):
return 'Ubuntu'
elif run('ls /etc/redhat-release'):
return 'RedHat'
The above task executes a `run` statement, but will warn instead of
aborting if the ``ls`` fails, and all output -- including the warning
itself -- is prevented from printing to the user. The end result, in this
scenario, is a completely silent task that allows the caller to figure out
what type of system the remote host is, without incurring the handful of
output that would normally occur.
Thus, `settings` may be used to set any combination of environment
variables in tandem with hiding (or showing) specific levels of output, or
in tandem with any other piece of Fabric functionality implemented as a
context manager.
If ``clean_revert`` is set to ``True``, ``settings`` will **not** revert
keys which are altered within the nested block, instead only reverting keys
whose values remain the same as those given. More examples will make this
clear; below is how ``settings`` operates normally::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost'):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string is None again
The internal modification of ``env.host_string`` is nullified -- not always
desirable. That's where ``clean_revert`` comes in::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost', clean_revert=True):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string remains 'otherhost'
Brand new keys which did not exist in ``env`` prior to using ``settings``
are also preserved if ``clean_revert`` is active. When ``False``, such keys
are removed when the block exits.
.. versionadded:: 1.4.1
The ``clean_revert`` kwarg.
"""
managers = list(args)
if kwargs:
managers.append(_setenv(kwargs))
return nested(*managers)
def cd(path):
"""
Context manager that keeps directory state when calling remote operations.
Any calls to `run`, `sudo`, `get`, or `put` within the wrapped block will
implicitly have a string similar to ``"cd <path> && "`` prefixed in order
to give the sense that there is actually statefulness involved.
.. note::
`cd` only affects *remote* paths -- to modify *local* paths, use
`~fabric.context_managers.lcd`.
Because use of `cd` affects all such invocations, any code making use of
those operations, such as much of the ``contrib`` section, will also be
affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative paths
(keep in mind that your default starting directory is your remote user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work due
to how shell-less SSH connections are implemented -- state is **not** kept
between invocations of `run` or `sudo`::
run('cd /var/www')
run('ls')
The above snippet will list the contents of the remote user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with cd('/var/www'):
run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with cd('/var/www'):
run('ls') # cd /var/www && ls
with cd('website1'):
run('ls') # cd /var/www/website1 && ls
.. note::
This context manager is currently implemented by appending to (and, as
always, restoring afterwards) the current value of an environment
variable, ``env.cwd``. However, this implementation may change in the
future, so we do not recommend manually altering ``env.cwd`` -- only
the *behavior* of `cd` will have any guarantee of backwards
compatibility.
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionchanged:: 1.0
Applies to `get` and `put` in addition to the command-running
operations.
.. seealso:: `~fabric.context_managers.lcd`
"""
return _change_cwd('cwd', path)
def lcd(path):
"""
Context manager for updating local current working directory.
This context manager is identical to `~fabric.context_managers.cd`, except
that it changes a different env var (`lcwd`, instead of `cwd`) and thus
only affects the invocation of `~fabric.operations.local` and the local
arguments to `~fabric.operations.get`/`~fabric.operations.put`.
Relative path arguments are relative to the local user's current working
directory, which will vary depending on where Fabric (or Fabric-using code)
was invoked. You can check what this is with `os.getcwd
<http://docs.python.org/release/2.6/library/os.html#os.getcwd>`_. It may be
useful to pin things relative to the location of the fabfile in use, which
may be found in :ref:`env.real_fabfile <real-fabfile>`
.. versionadded:: 1.0
"""
return _change_cwd('lcwd', path)
def _change_cwd(which, path):
path = path.replace(' ', r'\ ')
if state.env.get(which) and not path.startswith('/') and not path.startswith('~'):
new_cwd = state.env.get(which) + '/' + path
else:
new_cwd = path
return _setenv({which: new_cwd})
def path(path, behavior='append'):
"""
Append the given ``path`` to the PATH used to execute any wrapped commands.
Any calls to `run` or `sudo` within the wrapped block will implicitly have
a string similar to ``"PATH=$PATH:<path> "`` prepended before the given
command.
You may customize the behavior of `path` by specifying the optional
``behavior`` keyword argument, as follows:
* ``'append'``: append given path to the current ``$PATH``, e.g.
``PATH=$PATH:<path>``. This is the default behavior.
* ``'prepend'``: prepend given path to the current ``$PATH``, e.g.
``PATH=<path>:$PATH``.
* ``'replace'``: ignore previous value of ``$PATH`` altogether, e.g.
``PATH=<path>``.
.. note::
This context manager is currently implemented by modifying (and, as
always, restoring afterwards) the current value of environment
variables, ``env.path`` and ``env.path_behavior``. However, this
implementation may change in the future, so we do not recommend
manually altering them directly.
.. versionadded:: 1.0
"""
return _setenv({'path': path, 'path_behavior': behavior})
def prefix(command):
"""
Prefix all wrapped `run`/`sudo` commands with given command plus ``&&``.
This is nearly identical to `~fabric.operations.cd`, except that nested
invocations append to a list of command strings instead of modifying a
single string.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::
with prefix('workon myvenv'):
run('./manage.py syncdb')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py syncdb
This context manager is compatible with `~fabric.context_managers.cd`, so
if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could
do the following::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py syncdb
$ cd /path/to/app && workon myvenv && ./manage.py loaddata myfixture
Finally, as alluded to near the beginning,
`~fabric.context_managers.prefix` may be nested if desired, e.g.::
with prefix('workon myenv'):
run('ls')
with prefix('source /some/script'):
run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
"""
return _setenv(lambda: {'command_prefixes': state.env.command_prefixes + [command]})
@documented_contextmanager
def char_buffered(pipe):
"""
Force local terminal ``pipe`` be character, not line, buffered.
Only applies on Unix-based systems; on Windows this is a no-op.
"""
if win32 or not isatty(pipe):
yield
else:
old_settings = termios.tcgetattr(pipe)
tty.setcbreak(pipe)
try:
yield
finally:
termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
def shell_env(**kw):
"""
Set shell environment variables for wrapped commands.
For example, the below shows how you might set a ZeroMQ related environment
variable when installing a Python ZMQ library::
with shell_env(ZMQ_DIR='/home/user/local'):
run('pip install pyzmq')
As with `~fabric.context_managers.prefix`, this effectively turns the
``run`` command into::
$ export ZMQ_DIR='/home/user/local' && pip install pyzmq
Multiple key-value pairs may be given simultaneously.
.. note::
If used to affect the behavior of `~fabric.operations.local` when
running from a Windows localhost, ``SET`` commands will be used to
implement this feature.
"""
return _setenv({'shell_env': kw})
def _forwarder(chan, sock):
# Bidirectionally forward data between a socket and a Paramiko channel.
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
@documented_contextmanager
def remote_tunnel(remote_port, local_port=None, local_host="localhost",
remote_bind_address="127.0.0.1"):
"""
Create a tunnel forwarding a locally-visible port to the remote target.
For example, you can let the remote host access a database that is
installed on the client host::
# Map localhost:6379 on the server to localhost:6379 on the client,
# so that the remote 'redis-cli' program ends up speaking to the local
# redis-server.
with remote_tunnel(6379):
run("redis-cli -i")
The database might be installed on a client only reachable from the client
host (as opposed to *on* the client itself)::
# Map localhost:6379 on the server to redis.internal:6379 on the client
with remote_tunnel(6379, local_host="redis.internal")
run("redis-cli -i")
``remote_tunnel`` accepts up to four arguments:
* ``remote_port`` (mandatory) is the remote port to listen to.
* ``local_port`` (optional) is the local port to connect to; the default is
the same port as the remote one.
* ``local_host`` (optional) is the locally-reachable computer (DNS name or
IP address) to connect to; the default is ``localhost`` (that is, the
same computer Fabric is running on).
* ``remote_bind_address`` (optional) is the remote IP address to bind to
for listening, on the current target. It should be an IP address assigned
to an interface on the target (or a DNS name that resolves to such IP).
You can use "0.0.0.0" to bind to all interfaces.
.. note::
By default, most SSH servers only allow remote tunnels to listen to the
localhost interface (127.0.0.1). In these cases, `remote_bind_address`
is ignored by the server, and the tunnel will listen only to 127.0.0.1.
.. versionadded: 1.6
"""
if local_port is None:
local_port = remote_port
sockets = []
channels = []
threads = []
def accept(channel, src, dest):
src_addr, src_port = src
dest_addr, dest_port = dest
channels.append(channel)
sock = socket.socket()
sockets.append(sock)
try:
sock.connect((local_host, local_port))
except Exception:
print("[%s] rtunnel: cannot connect to %s:%d (from local)" %
(env.host_string, local_host, local_port))
channel.close()
return
print("[%s] rtunnel: opened reverse tunnel: %r -> %r -> %r"
% (env.host_string, channel.origin_addr,
channel.getpeername(), (local_host, local_port)))
th = ThreadHandler('fwd', _forwarder, channel, sock)
threads.append(th)
transport = connections[env.host_string].get_transport()
transport.request_port_forward(remote_bind_address, remote_port, handler=accept)
try:
yield
finally:
for sock, chan, th in zip(sockets, channels, threads):
sock.close()
chan.close()
th.thread.join()
th.raise_if_needed()
transport.cancel_port_forward(remote_bind_address, remote_port)
quiet = lambda: settings(hide('everything'), warn_only=True)
quiet.__doc__ = """
Alias to ``settings(hide('everything'), warn_only=True)``.
Useful for wrapping remote interrogative commands which you expect to fail
occasionally, and/or which you want to silence.
Example::
with quiet():
have_build_dir = run("test -e /tmp/build").succeeded
When used in a task, the above snippet will not produce any ``run: test -e
/tmp/build`` line, nor will any stdout/stderr display, and command failure
is ignored.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.hide`
.. versionadded:: 1.5
"""
warn_only = lambda: settings(warn_only=True)
warn_only.__doc__ = """
Alias to ``settings(warn_only=True)``.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.quiet`
"""
|
bsd-2-clause
| -7,542,093,378,251,034,000
| 33.760797
| 91
| 0.632132
| false
| 4.195269
| false
| false
| false
|
LokiW/extendable-cards
|
extendable_cards/view/game_view.py
|
1
|
6424
|
from extendable_cards.view.graphics import Rectangle, Point, Text
from tkinter import Button
class GameOutline(object):
def __init__(self, window, dx, dy, w, h):
self.top_y = dy
self.bottom_y = dy+h
self.right_x = dx+w
self.left_x = dx
self.discard_end_x = dx + (w/6.0)
self.discard_top_y = self.bottom_y - (h/3.0)
discard_p_b = Point(dx+1, self.bottom_y-1)
discard_p_t = Point(self.discard_end_x, self.discard_top_y)
discard_text_p = Point((2*dx + (w/6.0))/2, (self.bottom_y - (h / 6.0)))
self.discard = Rectangle(discard_p_b, discard_p_t)
self.discard.setFill("grey")
self.discard_text = Text(discard_text_p, "DISCARD PILE")
self.deck_begin_x = self.right_x - (w/6.0)
deck_p_b = Point(self.right_x-1, self.bottom_y-1)
deck_p_t = Point(self.deck_begin_x, self.bottom_y - (h / 3.0))
deck_text_p = Point(self.right_x - (w / 12.0), self.bottom_y - (h / 6.0))
self.deck = Rectangle(deck_p_b, deck_p_t)
self.deck.setFill("grey")
self.deck_text = Text(deck_text_p, "DECK")
self.hand = []
self.in_play = []
self.selected = []
self.win = window
def display_outline(self):
self.discard.draw(self.win)
self.deck.draw(self.win)
def display_outline_with_labels(self):
self.display_outline()
self.deck_text.draw(self.win)
self.discard_text.draw(self.win)
def undisplay_labels(self):
self.deck_text.undraw()
self.discard_text.undraw()
def display_hand_area(self):
self._display_card_list(self.hand, PlayArea.HAND)
def undisplay_hand_area(self):
for card in self.hand:
card.undisplay()
def display_play_area(self):
self._display_card_list(self.in_play, PlayArea.IN_PLAY)
def _display_card_list(self, cards, play_area):
card_num = len(cards)
if card_num == 0:
return False
cur_card = 0
lx, by, rx, ty = self.get_area_points(play_area)
y_unit = (by - ty) / 50.0
card_height = by - ty - 2*y_unit
card_width = card_height * (5.0/7.0)
x_unit = ((rx - card_width) - lx)/card_num
for card in cards:
card.display_card(lx + (cur_card*x_unit), ty + y_unit, w=card_width, h=card_height)
cur_card += 1
def undisplay_play_area(self):
for card in self.in_play:
card.undisplay()
def select_cards(self, cards, play_area):
self.selected.append({'card': card_type(card), 'origin': play_area})
def select_card(self, card, play_area):
for card in cards:
if play_area == PlayArea.HAND:
for hc in self.hand[:]:
if hc.card.name == card.card.name:
self.selected.append({'card':hc, 'origin': play_area})
self.hand.remove(hc)
return
elif play_area == PlayArea.IN_PLAY:
for ipc in self.in_play[:]:
if ipc.card.name == card.card.name:
self.selected.append({'card':ipc, 'origin': play_area})
self.in_play.remove(ipc)
return
elif play_area == PlayArea.DECK or play_area == PlayArea.DISCARD:
self.selected.append({'card': card_type(card), 'origin': play_area})
elif play_area == PlayArea.SELECTION:
for sc, origin in self.selected:
if sc.card.name == card.card.name:
self.return_selections()
self.selected.append({'card': sc, 'origin': origin})
return
def return_selections(self):
self.undisplay_selection()
for card in self.selected[:]:
if card['origin'] == PlayArea.HAND:
self.hand.append(card)
self.selected.remove(card)
elif card['origin'] == PlayArea.IN_PLAY:
self.in_play.append(card)
self.selected.remove(card)
else:
self.selected.remove(card)
def display_selection(self):
self._display_card_list([item['card'] for item in self.selected], PlayArea.SELECTION)
def undisplay_selection(self):
for card in self.selected:
card.undisplay()
def add_to_hand_area(self, card_view):
self.hand.append(card_view)
def add_to_play_area(self, card_view):
self.in_play.append(card_view)
def get_card_at_point(self, point, area):
x = point.getX()
y = point.getY()
if area == PlayArea.HAND:
last_seen = None
for card in self.hand:
lx = min(card.card.getP1().getX(), card.card.getP2().getX())
if lx < x:
last_seen = card
else:
return last_seen
return last_seen
def get_area(self, point):
x = point.getX()
y = point.getY()
if y < self.discard_top_y:
return PlayArea.IN_PLAY
elif x < self.discard_end_x:
return PlayArea.DISCARD
elif x > self.deck_begin_x:
return PlayArea.DECK
elif len(self.selected) > 0:
return PlayArea.HAND
else:
return PlayArea.HAND
def get_area_points(self, area):
if area == PlayArea.IN_PLAY:
return (self.left_x, self.discard_top_y, self.right_x, self.top_y)
elif area == PlayArea.DISCARD:
return (self.left_x, self.bottom_y, self.discard_end_x, self.discard_top_y)
elif area == PlayArea.HAND:
return (self.discard_end_x, self.bottom_y, self.deck_begin_x, self.discard_top_y)
elif area == PlayArea.DECK:
return (self.deck_begin_x, self.bottom_y, self.right_x, self.discard_top_y)
elif area == PlayArea.SELECTION:
return (self.discard_end_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(2.0/3.0),
self.deck_begin_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(5.0/3.0))
class PlayArea(object):
IN_PLAY = "play"
DISCARD = "discard"
DECK = "deck"
HAND = "hand"
SELECTION = "selection"
|
bsd-2-clause
| 7,205,527,144,953,349,000
| 30.960199
| 103
| 0.538294
| false
| 3.411577
| false
| false
| false
|
google/tf-quant-finance
|
tf_quant_finance/experimental/pricing_platform/framework/market_data/rate_curve.py
|
1
|
13760
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RateCurve object."""
from typing import Optional, Tuple
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance import math
from tf_quant_finance import rates as rates_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types
from tf_quant_finance.experimental.pricing_platform.framework.core import daycount_conventions
from tf_quant_finance.experimental.pricing_platform.framework.core import interpolation_method
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils
_DayCountConventions = daycount_conventions.DayCountConventions
_InterpolationMethod = interpolation_method.InterpolationMethod
_DayCountConventionsProtoType = types.DayCountConventionsProtoType
class RateCurve(pmd.RateCurve):
"""Represents an interest rate curve."""
def __init__(
self,
maturity_dates: types.DateTensor,
discount_factors: tf.Tensor,
valuation_date: types.DateTensor,
interpolator: Optional[_InterpolationMethod] = None,
interpolate_rates: Optional[bool] = True,
daycount_convention: Optional[_DayCountConventionsProtoType] = None,
curve_type: Optional[curve_types.CurveType] = None,
dtype: Optional[tf.DType] = None,
name: Optional[str] = None):
"""Initializes the interest rate curve.
Args:
maturity_dates: A `DateTensor` containing the maturity dates on which the
curve is specified.
discount_factors: A `Tensor` of real dtype specifying the discount factors
corresponding to the input maturities. The shape of this input should
match the shape of `maturity_dates`.
valuation_date: A scalar `DateTensor` specifying the valuation (or
settlement) date for the curve.
interpolator: An instance of `InterpolationMethod`.
Default value: `None` in which case cubic interpolation is used.
interpolate_rates: A boolean specifying whether the interpolation should
be done in discount rates or discount factors space.
Default value: `True`, i.e., interpolation is done in the discount
factors space.
daycount_convention: `DayCountConventions` to use for the interpolation
purpose.
Default value: `None` which maps to actual/365 day count convention.
curve_type: An instance of `CurveTypes` to mark the rate curve.
Default value: `None` which means that the curve does not have the
marker.
dtype: `tf.Dtype`. Optional input specifying the dtype of the `rates`
input.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'rate_curve'.
"""
self._name = name or "rate_curve"
with tf.compat.v1.name_scope(self._name):
self._discount_factor_nodes = tf.convert_to_tensor(
discount_factors, dtype=dtype,
name="curve_discount_factors")
self._dtype = dtype or self._discount_factor_nodes.dtype
if interpolator is None or interpolator == _InterpolationMethod.CUBIC:
def cubic_interpolator(xi, x, y):
spline_coeffs = math.interpolation.cubic.build_spline(x, y)
return math.interpolation.cubic.interpolate(xi, spline_coeffs,
dtype=dtype)
interpolator = cubic_interpolator
self._interpolation_method = _InterpolationMethod.CUBIC
elif interpolator == _InterpolationMethod.LINEAR:
def linear_interpolator(xi, x, y):
return math.interpolation.linear.interpolate(xi, x, y,
dtype=dtype)
interpolator = linear_interpolator
self._interpolation_method = _InterpolationMethod.LINEAR
elif interpolator == _InterpolationMethod.CONSTANT_FORWARD:
def constant_fwd(xi, x, y):
return rates_lib.constant_fwd.interpolate(xi, x, y, dtype=dtype)
interpolator = constant_fwd
self._interpolation_method = _InterpolationMethod.CONSTANT_FORWARD
else:
raise ValueError(f"Unknown interpolation method {interpolator}.")
self._dates = dateslib.convert_to_date_tensor(maturity_dates)
self._valuation_date = dateslib.convert_to_date_tensor(
valuation_date)
self._daycount_convention = (
daycount_convention or _DayCountConventions.ACTUAL_365)
self._day_count_fn = utils.get_daycount_fn(self._daycount_convention)
self._times = self._get_time(self._dates)
self._interpolator = interpolator
self._interpolate_rates = interpolate_rates
# Precompute discount rates:
self._curve_type = curve_type
@property
def daycount_convention(self) -> types.DayCountConventionsProtoType:
"""Daycount convention."""
return self._daycount_convention
def daycount_fn(self):
"""Daycount function."""
return self._day_count_fn
@property
def discount_factor_nodes(self) -> types.FloatTensor:
"""Discount factors at the interpolation nodes."""
return self._discount_factor_nodes
@property
def node_dates(self) -> types.DateTensor:
"""Dates at which the discount factors and rates are specified."""
return self._dates
@property
def discount_rate_nodes(self) -> types.FloatTensor:
"""Discount rates at the interpolation nodes."""
discount_rates = tf.math.divide_no_nan(
-tf.math.log(self.discount_factor_nodes), self._times,
name="discount_rate_nodes")
return discount_rates
def set_discount_factor_nodes(self, values: types.FloatTensor):
"""Update discount factors at the interpolation nodes with new values."""
values = tf.convert_to_tensor(values, dtype=self._dtype)
values_shape = values.shape.as_list()
nodes_shape = self.discount_factor_nodes.shape.as_list()
if values_shape != nodes_shape:
raise ValueError("New values should have shape {0} but are of "
"shape {1}".format(nodes_shape, values_shape))
self._discount_factor_nodes = values
def discount_rate(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns interpolated rates at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
rates = -tf.math.divide_no_nan(
tf.math.log(discount_factor), times)
return tf.identity(rates, name=name or "discount_rate")
def discount_factor(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns discount factors at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
discount_factor = tf.math.exp(-rates * times)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
return tf.identity(discount_factor, name=name or "discount_factor")
def forward_rate(
self,
start_date: Optional[types.DateTensor] = None,
maturity_date: Optional[types.DateTensor] = None,
start_time: Optional[types.FloatTensor] = None,
maturity_time: Optional[types.FloatTensor] = None,
day_count_fraction: Optional[tf.Tensor] = None):
"""Returns the simply accrued forward rate between [start_dt, maturity_dt].
Args:
start_date: A `DateTensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_date: A `DateTensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
start_time: A real `Tensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_time: A real `Tensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
day_count_fraction: An optional `Tensor` of real dtype specifying the
time between `start_date` and `maturity_date` in years computed using
the forward rate's day count basis. The shape of the input should be
the same as that of `start_date` and `maturity_date`.
Default value: `None`, in which case the daycount fraction is computed
using `daycount_convention`.
Returns:
A real `Tensor` of same shape as the inputs containing the simply
compounded forward rate.
"""
if start_date is None and start_time is None:
raise ValueError("Either start_date or start_times "
"must be supplied.")
if maturity_date is None and maturity_time is None:
raise ValueError("Either maturity_date or maturity_time must be "
"supplied.")
if start_date is not None and maturity_date is not None:
start_date = dateslib.convert_to_date_tensor(start_date)
maturity_date = dateslib.convert_to_date_tensor(maturity_date)
if day_count_fraction is None:
day_count_fn = self._day_count_fn
day_count_fraction = day_count_fn(
start_date=start_date, end_date=maturity_date, dtype=self._dtype)
else:
day_count_fraction = tf.convert_to_tensor(day_count_fraction,
self._dtype,
name="day_count_fraction")
start_time = self._get_time(start_date)
maturity_time = self._get_time(maturity_date)
else:
start_time = tf.convert_to_tensor(start_time, dtype=self._dtype)
maturity_time = tf.convert_to_tensor(maturity_time, dtype=self._dtype)
day_count_fraction = maturity_time - start_time
dfstart = self.discount_factor(interpolation_times=start_time)
dfmaturity = self.discount_factor(interpolation_times=maturity_time)
return tf.math.divide_no_nan(
tf.math.divide_no_nan(dfstart, dfmaturity) - 1., day_count_fraction)
@property
def valuation_date(self) -> types.DateTensor:
return self._valuation_date
@property
def interpolation_method(self) -> _InterpolationMethod:
return self._interpolation_method
def _get_time(self,
dates: types.DateTensor) -> types.FloatTensor:
"""Computes the year fraction from the curve's valuation date."""
return self._day_count_fn(start_date=self._valuation_date,
end_date=dates,
dtype=self._dtype)
@property
def curve_type(self) -> curve_types.CurveType:
return self._curve_type
def discount_factors_and_dates(self) -> Tuple[types.FloatTensor,
types.DateTensor]:
"""Returns discount factors and dates at which the discount curve is fitted.
"""
return (self._discount_factor_nodes, self._dates)
@property
def dtype(self) -> types.Dtype:
return self._dtype
@property
def interpolate_rates(self) -> bool:
"""Returns `True` if the interpolation is on rates and not on discounts."""
return self._interpolate_rates
__all__ = ["RateCurve"]
|
apache-2.0
| -6,346,764,848,289,983,000
| 44.114754
| 102
| 0.672456
| false
| 4.101341
| false
| false
| false
|
Davasny/CCAS
|
ccas/models/exchanges/__init__.py
|
1
|
1783
|
from . import poloniex, btc_e, bittrex, bitfinex
from ccas.models import database, coinmarketcap
def get_balances(exchange, public_key, secret_key):
if exchange == "poloniex":
return poloniex.get_balances(public_key, secret_key)
if exchange == "btc-e":
return btc_e.get_balances(public_key, secret_key)
if exchange == "bittrex":
return bittrex.get_balances(public_key, secret_key)
if exchange == "bitfinex":
return bitfinex.get_balances(public_key, secret_key)
def get_exchanges():
response = database.new_query("SELECT id, exchange FROM exchanges_api_keys;")
return list(response)
def get_btc_price():
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='btc';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_btc_price()
if exchange == "btc-e":
return btc_e.get_btc_price()
if exchange == "bittrex":
return bittrex.get_btc_price()
if exchange == "bitfinex":
return bitfinex.get_btc_price()
else:
return -1
def get_price(currency):
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='"+ currency.lower() +"';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_price(currency)
if exchange == "btc-e":
return btc_e.get_price(currency)
if exchange == "bittrex":
return bittrex.get_price(currency)
if exchange == "bitfinex":
return bitfinex.get_price(currency)
if exchange == "coinmarketcap":
return coinmarketcap.get_price(currency)
else:
return -1
|
mit
| 4,158,025,492,351,887,000
| 33.960784
| 113
| 0.615816
| false
| 3.402672
| false
| false
| false
|
veryhappythings/discord-gather
|
gather/discord_gather.py
|
1
|
2123
|
import asyncio
import logging
import discord
from .gatherbot import GatherBot
from .organiser import Organiser
from . import commands
logger = logging.getLogger(__name__)
class DiscordGather:
def __init__(self, token):
self.token = token
self.bot = None
self.client = discord.Client()
self.client.on_ready = self.on_ready
asyncio.get_event_loop().call_soon(self._report_loop)
def run(self):
self.client.run(self.token)
async def on_ready(self):
self.bot = GatherBot(self.client.user.name)
self.bot.register_message_handler(self.client.send_message)
self.bot.register_action('^!help$', commands.bot_help)
self.bot.register_action('^!(?:add|join|s)$', commands.add)
self.bot.register_action('^!(?:remove|rem|so)$', commands.remove)
self.bot.register_action('^!(?:game|status)$', commands.game_status)
self.bot.register_action('^!(?:reset)$', commands.reset)
self.client.on_member_update = self.on_member_update
self.client.on_message = self.bot.on_message
logger.info('Logged in as')
logger.info(self.bot.username)
logger.info('------')
async def on_member_update(self, before, after):
# Handle players going offline
if (before.status == discord.Status.online and
after.status == discord.Status.offline):
await self.bot.member_went_offline(before)
# Handle players going AFK
elif (before.status == discord.Status.online and
after.status == discord.Status.idle):
await self.bot.member_went_afk(before)
def _report_loop(self):
if self.bot:
logger.info(report(self.bot.organiser))
asyncio.get_event_loop().call_later(60 * 10, self._report_loop)
def report(organiser: Organiser) -> str:
report = ["Report:"]
for key, queue in organiser.queues.items():
report.append("{}-{}: {} current players - {} games to date".format(
key.server, key, len(queue), organiser.games_count[key]))
return "\n".join(report)
|
mit
| -7,811,874,497,295,730,000
| 33.803279
| 76
| 0.628356
| false
| 3.647766
| false
| false
| false
|
GoogleCloudPlatform/cloudml-samples
|
chainer/containers/quickstart/mnist/trainer/mnist.py
|
1
|
6554
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import six
import subprocess
import hypertune
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer import serializers
MODEL_FILE_NAME = 'chainer.model'
class Net(chainer.Chain):
def __init__(self):
super(Net, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(1, 10, ksize=5)
self.conv2 = L.Convolution2D(10, 20, ksize=5)
self.fc1 = L.Linear(None, 50)
self.fc2 = L.Linear(None, 10)
def forward(self, x):
x = F.relu(F.max_pooling_2d(self.conv1(x), 2))
x = F.relu(F.max_pooling_2d(F.dropout(self.conv2(x)), 2))
x = F.reshape(F.flatten(x), (-1, 320))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
class HpReport(chainer.training.Extension):
"""Trainer extension for hyper parameter tuning with CMLE.
Args:
log_report (str or LogReport): Log report to accumulate the
observations. This is either the name of a LogReport extensions
registered to the trainer, or a LogReport instance to use
internally.
global_step: key to epoch
hyperparameter_metric_tag: user-defined
metric_value: key to metric
"""
def __init__(self,
log_report='LogReport',
hp_global_step='epoch',
hp_metric_val='validation/main/loss',
hp_metric_tag='loss'):
self._log_report = log_report
self._log_len = 0 # number of observations already done
self._hp_global_step = hp_global_step
self._hp_metric_val = hp_metric_val
self._hp_metric_tag = hp_metric_tag
def __call__(self, trainer):
log_report = self._log_report
if isinstance(log_report, str):
log_report = trainer.get_extension(log_report)
elif isinstance(log_report, log_report_module.LogReport):
log_report(trainer) # update the log report
else:
raise TypeError('log report has a wrong type %s' %
type(log_report))
log = log_report.log
log_len = self._log_len
hpt = hypertune.HyperTune()
while len(log) > log_len:
target_log = log[log_len]
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag=self._hp_metric_tag,
metric_value=target_log[self._hp_metric_val],
global_step=target_log[self._hp_global_step])
log_len += 1
self.log_len = log_len
def get_args():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser(description='Chainer MNIST Example')
parser.add_argument(
'--batch-size',
type=int,
default=100,
metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument(
'--test-batch-size',
type=int,
default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument(
'--epochs',
type=int,
default=10,
metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--model-dir',
default=None,
help='The directory to store the model')
parser.add_argument(
'--gpu',
type=int,
default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument(
'--resume',
action='store_true',
help='Resume training')
args = parser.parse_args()
return args
def main():
# Training settings
args = get_args()
# Set up a neural network to train
model = L.Classifier(Net())
if args.gpu >= 0:
# Make a specified GPU current
chainer.backends.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum)
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(ndim=3)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epochs, 'epoch'))
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Send selected entries of the log to CMLE HP tuning system
trainer.extend(
HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss'))
if args.resume:
# Resume from a snapshot
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
if not os.path.exists(tmp_model_file):
subprocess.check_call([
'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME),
tmp_model_file])
if os.path.exists(tmp_model_file):
chainer.serializers.load_npz(tmp_model_file, trainer)
trainer.run()
if args.model_dir:
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
serializers.save_npz(tmp_model_file, model)
subprocess.check_call([
'gsutil', 'cp', tmp_model_file,
os.path.join(args.model_dir, MODEL_FILE_NAME)])
if __name__ == '__main__':
main()
|
apache-2.0
| 508,161,105,582,383,800
| 30.209524
| 80
| 0.652121
| false
| 3.529348
| true
| false
| false
|
yasserglez/pytiger2c
|
packages/pytiger2c/ast/integerliteralexpressionnode.py
|
1
|
2204
|
# -*- coding: utf-8 -*-
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.valuedexpressionnode import ValuedExpressionNode
from pytiger2c.types.integertype import IntegerType
class IntegerLiteralExpressionNode(ValuedExpressionNode):
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
Representa un literal de un número entero en el lenguaje Tiger. El valor
de retorno de esta expresión siempre será C{IntegerType}.
"""
def _get_integer(self):
"""
Método para obtener el valor de la propiedad C{integer}.
"""
return self._integer
integer = property(_get_integer)
def __init__(self, integer):
"""
Inicializa la clase C{IntegerLiteralExpressionNode}.
@type integer: C{int}
@param integer: Valor del número entero literal.
"""
super(IntegerLiteralExpressionNode, self).__init__()
self._integer = integer
def check_semantics(self, scope, errors):
"""
Para obtener información acerca de los parámetros recibidos por
el método consulte la documentación del método C{check_semantics}
en la clase C{LanguageNode}.
Este nodo del árbol de sintáxis abstracta no requiere comprobación
semántica, solamente se da valor al tipo de retorno del nodo que
siempre será C{IntegerType}.
"""
self._scope = scope
self._return_type = IntegerType()
def generate_code(self, generator):
"""
Genera el código correspondiente a la estructura del lenguaje Tiger
representada por el nodo.
Para obtener información acerca de los parámetros recibidos por
este método consulte la documentación del método C{generate_code}
de la clase C{LanguageNode}.
"""
self.scope.generate_code(generator)
int_code_type = IntegerType().code_type
local_var = generator.define_local(int_code_type)
generator.add_statement('{0} = {1};'.format(local_var, self.integer))
self._code_name = local_var
|
mit
| 7,899,778,051,832,931,000
| 33.587302
| 77
| 0.652134
| false
| 3.54886
| false
| false
| false
|
jgmanzanas/CMNT_004_15
|
project-addons/sale_display_stock/report/sale_order_line_report.py
|
1
|
4447
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, tools
class sale_order_line_report(models.Model):
_name = 'sale.order.line.report'
_auto = False
name = fields.Char('Name', readonly=True)
partner_id = fields.Many2one('res.partner', 'Partner', readonly=True)
product_qty = fields.Float('Quantity', readonly=True)
uom = fields.Many2one('product.uom', 'UoM', readonly=True)
price_unit = fields.Float('Price unit', readonly=True)
discount = fields.Float('Discount', readonly=True)
salesman_id = fields.Many2one('res.users', 'Salesperson', readonly=True)
state = fields.Char('State', readonly=True)
product_id = fields.Many2one('product.product', 'Product', readonly=True)
order_id = fields.Many2one('sale.order', 'Order', readonly=True)
qty_kitchen = fields.Float('Qty in kitchen', group_operator="avg",
readonly=True)
qty_stock = fields.Float('Stock qty', group_operator="avg", readonly=True)
company_id = fields.Many2one("res.company", "Company", readonly=True)
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
cr.execute("""
CREATE or REPLACE VIEW sale_order_line_report as (SELECT sol.id as id,
sol.name as name,
sol.order_partner_id as partner_id,
sol.product_uom_qty as product_qty,
sol.product_uom as uom,
sol.price_unit as price_unit,
sol.discount as discount,
sol.salesman_id as salesman_id,
sol.state as state,
sol.order_id as order_id,
sol.company_id as company_id,
q_kt.product_id,
q_kt.qty AS qty_kitchen,
stck.qty AS qty_stock
FROM sale_order_line sol
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT res_id
FROM ir_model_data
WHERE module = 'location_moves' AND name IN ('stock_location_kitchen','stock_location_pantry')
)
GROUP BY product_id) q_kt
ON sol.product_id = q_kt.product_id
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT loc.id
FROM stock_location loc
INNER JOIN (SELECT parent_left,
parent_right
FROM stock_location
WHERE
id IN (select view_location_id from stock_warehouse))
stock
ON loc.parent_left >=
stock.parent_left
AND loc.parent_right <=
stock.parent_right)
GROUP BY product_id) stck
ON sol.product_id = stck.product_id
WHERE q_kt.qty > 0 and sol.id in (select sale_line_id from procurement_order po where po.state not in ('done', 'cancel'))
GROUP BY sol.id, sol.name, sol.order_partner_id, sol.product_uom_qty,
sol.product_uom, sol.price_unit, sol.discount, sol.company_id,
sol.salesman_id, sol.state, sol.order_id, q_kt.product_id, q_kt.qty, stck.qty)
""")
|
agpl-3.0
| 516,351,656,280,464,100
| 47.326087
| 137
| 0.550157
| false
| 4.143523
| false
| false
| false
|
gevannmullins/linux_server
|
add_items.py
|
1
|
4498
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Item, User
engine = create_engine('postgresql://catalog:password@localhost/catalog')
# engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Caron Mullins", email="caronmullins2016@gmail.com", picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png')
session.add(User1)
session.commit()
# Category 1 with 3 items
category1 = Category(user_id=1, name="Soccer", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category1)
session.commit()
item1 = Item(user_id=1, name="Soccer Ball", description="Soccer balls for practicing and match games.", category=category1)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Soccer Boots", description="Soccer boots to maxumise gameplay", category=category1)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Whistles", description="Whistles for training sessions.", category=category1)
session.add(item3)
session.commit()
# Next Categories and its items
category2 = Category(user_id=1, name="Basketball", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category2)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category2.id)
session.add(item1)
session.commit()
# Categories 3
category3 = Category(user_id=1, name="Baseball", image="http://totalsportscomplex.com/wp-content/uploads/2014/09/baseball-pic.jpg")
session.add(category3)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category3.id)
session.add(item1)
session.commit()
# Categories 4
category4 = Category(user_id=1, name="Frisbee", image="http://uvmbored.com/wp-content/uploads/2015/10/how_the_frisbee_took_flight.jpg")
session.add(category4)
session.commit()
item1 = Item(user_id=1, name="Flying Disc", description="A Flying disc or a Flying Saucer", category_id = category4.id)
session.add(item1)
session.commit()
# Categories 5
category5 = Category(user_id=1, name="Snowboarding", image="https://pantherfile.uwm.edu/collins9/www/finalproject5/Project_5/snowboarding3.jpg")
session.add(category5)
session.commit()
item1 = Item(user_id=1, name="Snowboard", description="Wooden board suitable to glide on snow", category_id = category5.id)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Goggles", description="Anit-glare protective safety glasses",category_id = category5.id)
session.add(item2)
session.commit()
# Categories 6
category6 = Category(user_id=1, name="Rock Climbing", image="http://asme.berkeley.edu/wordpress/wp-content/uploads/2013/11/Rock-Climbing-Wallpaper-HD.jpg")
session.add(category6)
session.commit()
item1 = Item(user_id=1, name="Shoes", description="Superior performance shoew wtih excellent grip", category_id = category6.id)
session.add(item1)
session.commit()
# Categories 7
category7 = Category(user_id=1, name="Skating", image="http://www.ocasia.org/Images-OCA/During-the-Roller-Skating-XXX-contest-between-XXX-_53834132011574.jpg")
session.add(category7)
session.commit()
item1 = Item(user_id=1, name="Skates", description="Roller skates with bearing suitable for beginner and advanced skater", category_id = category7.id)
session.add(item1)
session.commit()
# Categories 8
category8 = Category(user_id=1, name="Hockey", image="http://www.picture-newsletter.com/street-hockey/street-hockey-39.jpg")
session.add(category8)
session.commit()
item1 = Item(user_id=1, name="Stick", description="Composite Stick favorable for both ice and street hockey", category_id = category8.id)
session.add(item1)
session.commit()
print "added menu items!"
|
mit
| 530,226,847,194,362,200
| 32.318519
| 174
| 0.765229
| false
| 3.026918
| false
| false
| false
|
alphagov/backdrop
|
tests/read/test_parse_request_args.py
|
1
|
5035
|
from datetime import datetime
import re
import unittest
from hamcrest import assert_that, is_, has_item
import pytz
from werkzeug.datastructures import MultiDict
from backdrop.read.query import parse_request_args
class Test_parse_request_args(unittest.TestCase):
def test_start_at_is_parsed(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_start_at_is_used(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00"),
("start_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_end_at_is_parsed(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_end_at_is_used(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00"),
("end_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_one_filter_by_is_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar")])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar"),
("filter_by", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
assert_that(args['filter_by'], has_item(["bar", "foo"]))
def test_build_query_with_boolean_value(self):
request_args = MultiDict([
("filter_by", "planet:true"),
("filter_by", "star:false"),
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item([ "planet", True ]))
assert_that(args['filter_by'], has_item([ "star", False ]))
def test_one_filter_by_prefix_is_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:/hello/world")])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'],
has_item(["foo", '/hello/world']))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:bar"),
("filter_by_prefix", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'], has_item(["foo", 'bar']))
assert_that(args['filter_by_prefix'], has_item(["bar", 'foo']))
def test_group_by_is_passed_through_untouched(self):
request_args = MultiDict([("group_by", "foobar")])
args = parse_request_args(request_args)
assert_that(args['group_by'], is_(['foobar']))
def test_sort_is_parsed(self):
request_args = MultiDict([
("sort_by", "foo:ascending")])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "ascending"]))
def test_sort_will_use_first_argument_only(self):
request_args = MultiDict([
("sort_by", "foo:descending"),
("sort_by", "foo:ascending"),
])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "descending"]))
def test_limit_is_parsed(self):
request_args = MultiDict([
("limit", "123")
])
args = parse_request_args(request_args)
assert_that(args['limit'], is_(123))
def test_one_collect_is_parsed_with_default_method(self):
request_args = MultiDict([
("collect", "some_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default")]))
def test_two_collects_are_parsed_with_default_methods(self):
request_args = MultiDict([
("collect", "some_key"),
("collect", "some_other_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default"),
("some_other_key", "default")]))
def test_one_collect_is_parsed_with_custom_method(self):
request_args = MultiDict([
("collect", "some_key:mean")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "mean")]))
|
mit
| -5,928,915,904,435,119,000
| 29.70122
| 74
| 0.555511
| false
| 3.498958
| true
| false
| false
|
imgrant/fit2tcx
|
fit2tcx.py
|
1
|
40690
|
#!/usr/bin/env python
#
# fit2tcx - convert a FIT file to a TCX file
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name> [https://github.com/Tigge/FIT-to-TCX/]
# Copyright (c) 2014-2016, Ian Grant <ian@iangrant.me> [https://github.com/imgrant/fit2tcx]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__version__ = "1.6"
import sys
import copy
import contextlib
import argparse
import lxml.etree
from datetime import datetime, timedelta
from pytz import timezone, utc
from tzwhere import tzwhere
from geopy.distance import GreatCircleDistance
from fitparse import FitFile, FitParseError
"""
Limit values for error checking on speed & distance calculations
"""
# Speed and distance calculated from GPS will be ignored
# for trackpoints where the acceleration from the last
# point is above this threshold (in m/s^2)
MAX_ACCELERATION = 3.0
"""
FIT to TCX values mapping
"""
LAP_TRIGGER_MAP = {
"manual": "Manual",
"time": "Time",
"distance": "Distance",
"position_start": "Location",
"position_lap": "Location",
"position_waypoint": "Location",
"position_marked": "Location",
"session_end": "Manual",
"fitness_equipment": "Manual"}
INTENSITY_MAP = {
"active": "Active",
"warmup": "Active",
"cooldown": "Active",
"rest": "Resting",
None: "Active"}
PRODUCT_MAP = {
0: "Unknown",
255: "Run Trainer 2.0", # Timex
# Garmin products:
1: "Garmin Connect API", # Also HRM1
2: "AXH01",
2: "AXH01",
4: "AXB02",
5: "HRM2SS",
6: "DSI_ALF02",
473: "Forerunner 301",
474: "Forerunner 301",
475: "Forerunner 301",
494: "Forerunner 301",
717: "Forerunner 405",
987: "Forerunner 405",
782: "Forerunner 50",
988: "Forerunner 60",
1011: "DSI_ALF01",
1018: "Forerunner 310XT",
1446: "Forerunner 310XT",
1036: "Edge 500",
1199: "Edge 500",
1213: "Edge 500",
1387: "Edge 500",
1422: "Edge 500",
1124: "Forerunner 110",
1274: "Forerunner 110",
1169: "Edge 800",
1333: "Edge 800",
1334: "Edge 800",
1497: "Edge 800",
1386: "Edge 800",
1253: "Chirp",
1325: "Edge 200",
1555: "Edge 200",
1328: "Forerunner 910XT",
1537: "Forerunner 910XT",
1600: "Forerunner 910XT",
1664: "Forerunner 910XT",
1765: "Forerunner 920XT",
1341: "ALF04",
1345: "Forerunner 610",
1410: "Forerunner 610",
1360: "Forerunner 210",
1436: "Forerunner 70",
1461: "AMX",
1482: "Forerunner 10",
1688: "Forerunner 10",
1499: "Swim",
1551: "Fenix",
1967: "Fenix 2",
1561: "Edge 510",
1742: "Edge 510",
1821: "Edge 510",
1567: "Edge 810",
1721: "Edge 810",
1822: "Edge 810",
1823: "Edge 810",
1836: "Edge 1000",
1570: "Tempe",
1735: "VIRB Elite",
1736: "Edge Touring",
1752: "HRM Run",
10007: "SDM4",
20119: "Training Center",
1623: "Forerunner 620",
2431: "Forerunner 235"}
"""
TCX schema and namespace values
"""
TCD_NAMESPACE = "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"
TCD = "{%s}" % TCD_NAMESPACE
XML_SCHEMA_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
XML_SCHEMA = "{%s}" % XML_SCHEMA_NAMESPACE
SCHEMA_LOCATION = \
"http://www.garmin.com/xmlschemas/ActivityExtension/v2 " + \
"http://www.garmin.com/xmlschemas/ActivityExtensionv2.xsd " + \
"http://www.garmin.com/xmlschemas/FatCalories/v1 " + \
"http://www.garmin.com/xmlschemas/fatcalorieextensionv1.xsd " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd"
NSMAP = {
None: TCD_NAMESPACE,
"xsi": XML_SCHEMA_NAMESPACE}
# Class and context manager to suppress stdout for use with tzwhere.
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
class MyDataProcessor(object):
"""
Custom units data processor for FIT object
"""
def process_type_bool(self, field_data):
if field_data.value is not None:
field_data.value = bool(field_data.value)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None
def process_units_semicircles(self, field_data):
if field_data.value is not None:
field_data.value *= 180.0 / (2**31)
field_data.units = 'deg'
class TZDataProcessor(MyDataProcessor):
"""
Extra data processor layer for working with timezones.
For the Timex Run Trainer 2.0, date-times claim to be UTC (as per the FIT
format spec), but are actually an (unknown) local timezone.
If the data processor is called with a lat,lon point, we look up the true
timezone and re-normalize date-times to UTC.
Otherwise, if the data processor is called with a timezone name (defaults
to UTC, i.e. no difference), we use that and re-normalize.
"""
def __init__(self, lat=None, lon=None, tzname="UTC"):
if lat is not None and lon is not None:
with nostdout():
w = tzwhere.tzwhere()
self.tz = timezone(w.tzNameAt(lat, lon))
else:
self.tz = timezone(tzname)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def iso_Z_format(dt):
iso = dt.isoformat()
z_iso = iso.replace("+00:00", "Z")
return z_iso
def sum_distance(activity,
start_time=datetime(1899, 1, 1, 0, 0, 1, tzinfo=utc),
end_time=datetime(2189, 12, 31, 23, 59, 59, tzinfo=utc)):
"""
Calculate distance from GPS data for an activity
"""
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31, 0, 0, 0, tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'distance': None,
'position_lat': None,
'position_long': None}
for var in ['distance',
'position_lat',
'position_long']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# For mid-activity laps, iterate through trackpoints to
# grab the first point before the start of the lap, also
# delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then loop over tps array to calculate cumulative point-to-point
# distance from GPS data. Existing distance data (e.g. from footpod)
# is used when there is no GPS position available or it is bad.
distance = 0.0
for timestamp in sorted(tps):
tp = tps[timestamp]
if prev is not None:
if prev['distance'] is None:
prev_dist = 0
else:
prev_dist = prev['distance']
if not None in (tp['position_lat'],
tp['position_long'],
prev['position_lat'],
prev['position_long']):
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_dist = tp['distance'] - prev_dist
except:
# Fallback to existing distance stream data on error
gps_dist = tp['distance'] - prev_dist
else:
# Fallback to existing distance stream data if no GPS coords
gps_dist = tp['distance'] - prev_dist
distance += gps_dist
prev = tp
return distance
def create_element(tag, text=None, namespace=None):
"""Create a free element"""
namespace = NSMAP[namespace]
tag = "{%s}%s" % (namespace, tag)
element = lxml.etree.Element(tag, nsmap=NSMAP)
if text is not None:
element.text = text
return element
def create_sub_element(parent, tag, text=None, namespace=None):
"""Create an element as a child of an existing given element"""
element = create_element(tag, text, namespace)
parent.append(element)
return element
def create_document():
"""Create a TCX XML document"""
document = create_element("TrainingCenterDatabase")
document.set(XML_SCHEMA + "schemaLocation", SCHEMA_LOCATION)
document = lxml.etree.ElementTree(document)
return document
def add_author(document):
"""Add author element (TCX writer) to TCX"""
author = create_sub_element(document.getroot(), "Author")
author.set(XML_SCHEMA + "type", "Application_t")
create_sub_element(author, "Name", "fit2tcx Converter")
build = create_sub_element(author, "Build")
version = create_sub_element(build, "Version")
vMajor, vMinor = tuple(map(int, (__version__.split("."))))
create_sub_element(version, "VersionMajor", str(vMajor))
create_sub_element(version, "VersionMinor", str(vMinor))
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
create_sub_element(author, "LangID", "en")
create_sub_element(author, "PartNumber", "000-00000-00")
def add_creator(element, manufacturer, product_name, product_id, serial):
"""Add creator element (recording device) to TCX activity"""
creator = create_sub_element(element, "Creator")
creator.set(XML_SCHEMA + "type", "Device_t")
create_sub_element(creator, "Name", manufacturer + " " + product_name)
unitID = int(serial or 0)
create_sub_element(creator, "UnitId", str(unitID))
# Set ProductID to 0 for non-Garmin devices
if manufacturer != "Garmin":
product_id = 0
create_sub_element(creator, "ProductID", str(product_id))
version = create_sub_element(creator, "Version")
create_sub_element(version, "VersionMajor", "0")
create_sub_element(version, "VersionMinor", "0")
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
def add_notes(element, text):
"""Add notes element to TCX activity"""
create_sub_element(element, "Notes", text)
def add_trackpoint(element, trackpoint, sport):
"""Create a trackpoint element"""
timestamp = trackpoint['timestamp']
pos_lat = trackpoint['position_lat']
pos_long = trackpoint['position_long']
distance = trackpoint['distance']
altitude = trackpoint['altitude']
speed = trackpoint['speed']
heart_rate = trackpoint['heart_rate']
cadence = trackpoint['cadence']
create_sub_element(element, "Time", iso_Z_format(timestamp))
if pos_lat is not None and pos_long is not None:
pos = create_sub_element(element, "Position")
create_sub_element(pos, "LatitudeDegrees", "{:.6f}".format(pos_lat))
create_sub_element(pos, "LongitudeDegrees", "{:.6f}".format(pos_long))
if altitude is not None:
create_sub_element(element, "AltitudeMeters", str(altitude))
if distance is not None:
create_sub_element(element, "DistanceMeters", str(distance))
if heart_rate is not None:
heartrateelem = create_sub_element(element, "HeartRateBpm")
heartrateelem.set(XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str(heart_rate))
if speed is not None or cadence is not None:
if cadence is not None and sport == "Biking":
# Bike cadence is stored in main trackpoint element,
# not an extension, unlike running cadence (below)
create_sub_element(element, "Cadence", str(cadence))
exelem = create_sub_element(element, "Extensions")
tpx = create_sub_element(exelem, "TPX")
tpx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if speed is not None:
create_sub_element(tpx, "Speed", str(speed))
if cadence is not None:
if sport == "Running":
tpx.set("CadenceSensor", "Footpod")
create_sub_element(tpx, "RunCadence", str(cadence))
elif sport == "Biking":
tpx.set("CadenceSensor", "Bike")
def add_lap(element,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_distance,
activity_scaling_factor,
total_cumulative_distance):
"""Add a lap element to a TCX document"""
# Only process laps with timestamps - this serves as a workaround for
# extra fake/empty laps in FIT files from the Timex Run Trainer 2.0
if lap.get_value('timestamp') is not None:
lap_num = lap.get_value("message_index") + 1
start_time = lap.get_value("start_time")
end_time = lap.get_value("timestamp")
totaltime = lap.get_value("total_elapsed_time")
stored_distance = lap.get_value("total_distance")
calculated_distance = sum_distance(activity, start_time, end_time)
if fixed_distance is not None:
reference_distance = fixed_distance
else:
reference_distance = calculated_distance
try:
lap_scaling_factor = reference_distance / stored_distance
except ZeroDivisionError:
lap_scaling_factor = 1.00
if calibrate and per_lap_cal:
scaling_factor = lap_scaling_factor
else:
scaling_factor = activity_scaling_factor
max_speed = lap.get_value("max_speed")
avg_speed = lap.get_value("avg_speed")
calories = lap.get_value("total_calories")
avg_heart = lap.get_value("avg_heart_rate")
max_heart = lap.get_value("max_heart_rate")
intensity = INTENSITY_MAP[lap.get_value("intensity")]
avg_cadence = lap.get_value("avg_cadence")
max_cadence = lap.get_value("max_cadence")
if lap.get_value("lap_trigger"):
triggermet = LAP_TRIGGER_MAP[lap.get_value("lap_trigger")]
else:
triggermet = LAP_TRIGGER_MAP["manual"]
lapelem = create_sub_element(element, "Lap")
lapelem.set("StartTime", iso_Z_format(start_time))
#
# TotalTimeSeconds
#
create_sub_element(lapelem, "TotalTimeSeconds", str("%d" % totaltime))
#
# DistanceMeters
#
lap_dist_elem = create_sub_element(lapelem,
"DistanceMeters",
str("%d" % stored_distance)
)
#
# MaximumSpeed
#
lap_max_spd_elem = create_sub_element(lapelem,
"MaximumSpeed",
str("%.3f" % max_speed))
#
# Calories
#
create_sub_element(lapelem, "Calories", str("%d" % calories))
#
# AverageHeartRateBpm
#
if avg_heart is not None:
heartrateelem = create_sub_element(lapelem, "AverageHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % avg_heart))
#
# MaximumHeartRateBpm
#
if max_heart is not None:
heartrateelem = create_sub_element(lapelem, "MaximumHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % max_heart))
#
# Intensity
#
create_sub_element(lapelem, "Intensity", intensity)
#
# Cadence (bike)
#
if avg_speed or avg_cadence or max_cadence:
if sport == "Biking" and avg_cadence is not None:
# Average bike cadence is stored in main lap element,
# not as an extension, unlike average running cadence (below)
create_sub_element(lapelem, "Cadence", str("%d" % avg_cadence))
#
# TriggerMethod
#
create_sub_element(lapelem, "TriggerMethod", triggermet)
if dist_recalc:
distance_used = calculated_distance
elif calibrate:
if fixed_distance is not None:
distance_used = fixed_distance
else:
distance_used = stored_distance * scaling_factor
else:
distance_used = stored_distance
#
# Track
#
trackelem = create_sub_element(lapelem, "Track")
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31).replace(tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'cadence': None,
'distance': None,
'position_lat': None,
'position_long': None,
'heart_rate': None,
'altitude': None,
'speed': None}
for var in ['cadence',
'distance',
'position_lat',
'position_long',
'heart_rate',
'altitude',
'speed']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# Iterate through all trackpoints to grab the first point before the
# start of the lap, then delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then process all trackpoints for this lap, recalculating speed &
# distance from GPS and adjusting if requested, before adding element
stored_avg_speed = copy.copy(avg_speed)
stored_max_speed = copy.copy(max_speed)
distance = 0.0
max_speed = 0.0
tp_speed = None
for timestamp in sorted(tps):
tp = tps[timestamp]
trackpointelem = create_sub_element(trackelem, "Trackpoint")
if prev is not None:
if prev['distance'] is None:
prev['distance'] = 0
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
except:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
if dist_recalc:
tp_dist = gps_dist
elif calibrate:
tp_dist = (
tp['distance'] - prev['distance']) * scaling_factor
else:
tp_dist = tp['distance'] - prev['distance']
try:
if speed_recalc:
tp_speed = gps_speed
elif calibrate:
tp_speed = tp['speed'] * scaling_factor
else:
tp_speed = tp['speed']
total_cumulative_distance += tp_dist
distance += tp_dist
if tp_speed > max_speed:
max_speed = tp_speed
except TypeError:
tp_speed = None
# Store previous trackpoint before changing the current one
prev = copy.copy(tp)
# Adjust trackpoint distance & speed values if requested
if ((dist_recalc or calibrate)
and tp['distance'] is not None
and total_cumulative_distance is not None):
tp['distance'] = "{:.1f}".format(total_cumulative_distance)
if ((speed_recalc or calibrate)
and tp['speed'] is not None
and tp_speed is not None):
tp['speed'] = "{:.3f}".format(tp_speed)
# Add trackpoint element
add_trackpoint(trackpointelem, tp, sport)
#
# Notes
#
if fixed_distance is not None:
precision_str = ("; known distance: {ref_dist:.3f} km "
"(FIT precision: {fit_precision:.1f}%; "
"GPS/footpod precision: {gps_precision:.1f}%)")
reference = "known distance"
else:
precision_str = " (precision: {precision:.1f}%)"
reference = "GPS/footpod"
try:
fit_precision_calc = (1 - (abs(reference_distance -
stored_distance) /
reference_distance)) * 100
gps_precision_calc = (1 - (abs(reference_distance -
calculated_distance) /
reference_distance)) * 100
precision_calc = (1 - (abs(calculated_distance -
stored_distance) /
calculated_distance)) * 100
except ZeroDivisionError:
fit_precision_calc = 100
gps_precision_calc = 100
precision_calc = 100
notes = ("Lap {lap_number:d}: {distance_used:.3f} km in {total_time!s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km"
+ precision_str + "\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on {reference} for this lap: {new_cf:.1f}%"
).format(lap_number=lap_num,
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(totaltime)),
fit_dist=stored_distance / 1000,
gps_dist=calculated_distance / 1000,
ref_dist=reference_distance / 1000,
fit_precision=fit_precision_calc,
gps_precision=gps_precision_calc,
precision=precision_calc,
old_cf=current_cal_factor,
reference=reference,
new_cf=lap_scaling_factor * current_cal_factor)
add_notes(lapelem, notes)
#
# Extensions (AvgSpeed, AvgRunCadence, MaxRunCadence, MaxBikeCadence)
#
if not all(var is None for var in (avg_speed, avg_cadence, max_cadence)):
exelem = create_sub_element(lapelem, "Extensions")
lx = create_sub_element(exelem, "LX")
lx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if avg_speed is not None:
lap_avg_spd_elem = create_sub_element(lx,
"AvgSpeed",
str("%.3f" % avg_speed))
if avg_cadence is not None and sport == "Running":
create_sub_element(lx,
"AvgRunCadence",
str("%d" % avg_cadence))
if max_cadence is not None:
if sport == "Running":
create_sub_element(lx,
"MaxRunCadence",
str("%d" % max_cadence))
elif sport == "Biking":
create_sub_element(lx,
"MaxBikeCadence",
str("%d" % max_cadence))
# Adjust overall lap distance & speed values if required
if calibrate:
# Manual distance:
if fixed_distance is not None:
lap_dist_elem.text = "{:d}".format(int(fixed_distance))
lap_avg_spd_elem.text = "{:.3f}".format(
fixed_distance / totaltime)
else:
lap_dist_elem.text = "{:d}".format(
int(stored_distance * scaling_factor))
lap_avg_spd_elem.text = "{:.3f}".format(
stored_avg_speed * scaling_factor)
lap_max_spd_elem.text = "{:.3f}".format(
stored_max_speed * scaling_factor)
# GPS recalculation options override calibration:
if dist_recalc:
lap_dist_elem.text = "{:d}".format(int(distance))
if speed_recalc:
lap_avg_spd_elem.text = "{:.3f}".format(distance / totaltime)
lap_max_spd_elem.text = "{:.3f}".format(max_speed)
return distance
else:
return 0
def add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor):
"""Add an activity to a TCX document"""
# Sport type
sport = session.get_value("sport")
sport_mapping = {"running": "Running", "cycling": "Biking"}
sport = sport_mapping[sport] if sport in sport_mapping else "Other"
actelem = create_sub_element(element, "Activity")
actelem.set("Sport", sport)
create_sub_element(actelem,
"Id",
iso_Z_format(session.get_value("start_time")))
total_cumulative_distance = 0.0
lap_num = 0
for lap in activity.get_messages('lap'):
if lap.get_value("start_time") == lap.get_value("timestamp"):
continue # skip very short laps that won't have any data
if manual_lap_distance is not None:
try:
fixed_dist = manual_lap_distance[lap_num]
except IndexError:
fixed_dist = None
else:
fixed_dist = None
lap_dist = add_lap(actelem,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_dist,
activity_scaling_factor,
total_cumulative_distance)
total_cumulative_distance += lap_dist
lap_num += 1
return (actelem, total_cumulative_distance)
def convert(filename,
time_zone="auto",
dist_recalc=False,
speed_recalc=False,
calibrate=False,
per_lap_cal=False,
manual_lap_distance=None,
current_cal_factor=100.0):
"""Convert a FIT file to TCX format"""
# Calibration requires either GPS recalculation or manual lap distance(s):
if calibrate and not dist_recalc and manual_lap_distance is None:
sys.stderr.write("Calibration requested, enabling distance recalculation from GPS/footpod.\n")
dist_recalc = True
# Calibration with manual lap distances implies
# per-lap calibration:
if calibrate and manual_lap_distance is not None:
per_lap_cal = True
document = create_document()
element = create_sub_element(document.getroot(), "Activities")
try:
if time_zone == "auto":
# We need activity object to be able to get trackpoints,
# before re-creating activity again with timezone info
activity = FitFile(filename,
check_crc=False,
data_processor=MyDataProcessor())
activity.parse()
lat = None
lon = None
for trackpoint in activity.get_messages('record'):
if lat is not None and lon is not None:
break
lat = trackpoint.get_value("position_lat")
lon = trackpoint.get_value("position_long")
if lat is not None and lon is not None:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(lat=lat,
lon=lon))
else:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(tzname=time_zone))
activity.parse()
session = next(activity.get_messages('session'))
total_activity_distance = session.get_value('total_distance')
total_calculated_distance = sum_distance(activity)
activity_scaling_factor = (total_calculated_distance /
total_activity_distance)
new_cal_factor = activity_scaling_factor * current_cal_factor
actelem, total_distance = add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor)
except FitParseError as e:
sys.stderr.write(str("Error while parsing .FIT file: %s" % e) + "\n")
sys.exit(1)
if dist_recalc:
distance_used = total_calculated_distance
elif calibrate:
distance_used = total_distance
else:
distance_used = total_activity_distance
method = ""
if dist_recalc or speed_recalc or calibrate:
parts = []
if calibrate:
if per_lap_cal:
parts.append("calibration applied per lap")
else:
parts.append("calibration applied")
if dist_recalc and speed_recalc:
parts.append("speed and distance recalculated")
elif dist_recalc:
parts.append("distance recalculated")
elif speed_recalc:
parts.append("speed recalculated")
if calibrate and manual_lap_distance is not None:
reference = " from known distance (with GPS fill-in)"
elif dist_recalc or speed_recalc:
reference = " from GPS/footpod"
method = "(" + ", ".join(parts) + reference + ")"
notes = ("{total_laps:d} laps: {distance_used:.3f} km in {total_time!s} {dist_method:s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km "
"(precision: {precision:.1f}%)\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on recomputed distance: {new_cf:.1f}%"
).format(total_laps=session.get_value('num_laps'),
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(session.get_value(
'total_timer_time'))),
fit_dist=total_activity_distance / 1000,
gps_dist=total_calculated_distance / 1000,
precision=(1 - (abs(total_calculated_distance -
total_activity_distance) /
total_calculated_distance)) * 100,
old_cf=current_cal_factor,
new_cf=new_cal_factor,
dist_method=method)
add_notes(actelem, notes)
try:
dinfo = next(activity.get_messages('device_info'))
manufacturer = dinfo.get_value('manufacturer').title().replace('_', ' ')
product_name = dinfo.get_value('descriptor').replace('_', ' ')
product_id = dinfo.get_value('product')
serial_number = dinfo.get_value('serial_number')
except: # if no device_info message, StopIteration is thrown
fid = next(activity.get_messages('file_id'))
manufacturer = fid.get_value('manufacturer').title().replace('_', ' ')
product_id = fid.get_value('product')
product_name = PRODUCT_MAP[product_id] if product_id in PRODUCT_MAP else product_id
serial_number = fid.get_value('serial_number')
add_creator(actelem,
manufacturer,
product_name,
product_id,
serial_number
)
add_author(document)
return document
def main():
"""Read arguments from command line to convert FIT file to TCX"""
parser = argparse.ArgumentParser(prog="fit2tcx")
parser.add_argument("FitFile", help="Input FIT file")
parser.add_argument("TcxFile", help="Output TCX file")
parser.add_argument(
"-v",
"--version",
action='version',
version='%(prog)s {version}'.format(version=__version__))
parser.add_argument(
"-z",
"--timezone",
action="store",
type=str,
default="auto",
help="Specify the timezone for FIT file timestamps (default, 'auto', uses GPS data to lookup the local timezone)")
parser.add_argument(
"-d",
"--recalculate-distance-from-gps",
action="store_true",
help="Recalculate distance from GPS data")
parser.add_argument(
"-s",
"--recalculate-speed-from-gps",
action="store_true",
help="Recalculate speed from GPS data")
parser.add_argument(
"-c",
"--calibrate-footpod",
action="store_true",
help="Use GPS-measured and/or known distance to calibrate footpod data")
parser.add_argument(
"-p",
"--per-lap-calibration",
action="store_true",
help="Apply footpod calibration on a per lap basis")
parser.add_argument(
"-l",
"--manual-lap-distance",
action="append",
default=None,
type=float,
help="Manually specify known lap distance(s) (in metres, use calibration to apply)")
parser.add_argument(
"-f",
"--calibration-factor",
action="store",
default=100.0,
type=float,
help="Existing calibration factor (defaults to 100.0)")
args = parser.parse_args()
if (args.calibrate_footpod and
not args.recalculate_distance_from_gps and
not args.manual_lap_distance):
parser.error("-c (--calibrate-footpod) requires either -d (--recalculate-distance-from-gps) or -l (--manual-lap-distance)")
return 1
try:
document = convert(args.FitFile,
args.timezone,
args.recalculate_distance_from_gps,
args.recalculate_speed_from_gps,
args.calibrate_footpod,
args.per_lap_calibration,
args.manual_lap_distance,
args.calibration_factor)
activity_notes = document.getroot().findtext(".//{*}Activity/{*}Notes")
if activity_notes is not None:
sys.stdout.write(str(activity_notes) + "\n")
tcx = open(args.TcxFile, 'wb')
tcx.write(lxml.etree.tostring(document.getroot(),
pretty_print=True,
xml_declaration=True,
encoding="UTF-8"))
return 0
except FitParseError as exception:
sys.stderr.write(str(exception) + "\n")
return 1
if __name__ == "__main__":
sys.exit(main())
|
mit
| 1,875,265,608,820,062,500
| 36.641073
| 131
| 0.541239
| false
| 4.032706
| false
| false
| false
|
gkabbe/cMDLMC
|
mdlmc/IO/converters.py
|
1
|
1764
|
# coding=utf-8
import logging
import os
import pathlib
import tables
import h5py
import daiquiri
import fire
import numpy as np
from typing import Union, Iterable
from ..atoms.numpy_atom import dtype_xyz
from ..atoms import numpy_atom as npa
from ..IO.trajectory_parser import XYZTrajectory
logger = logging.getLogger(__name__)
def save_xyz_to_hdf5(xyz_fname, hdf5_fname=None, *, remove_com_movement=False,
dataset_name="trajectory", selection=None):
"""
Note: HDF5 with Blosc compression currently only works if h5py and pytables are installed via
conda!"""
xyz = XYZTrajectory(xyz_fname, selection=selection)
logger.info("Determine length of xyz trajectory.")
trajectory_length = len(xyz)
first_frame = next(iter(xyz))
frame_shape = first_frame.atom_positions.shape
atom_names = first_frame.atom_names.astype("S")
logger.info("Names: %s", atom_names)
if not hdf5_fname:
hdf5_fname = os.path.splitext(xyz_fname)[0] + ".hdf5"
with h5py.File(hdf5_fname, "w") as hdf5_file:
# Use blosc compression (needs tables import and code 32001)
traj_atomnames = hdf5_file.create_dataset("atom_names", atom_names.shape, dtype="2S")
traj_atomnames[:] = atom_names
traj = hdf5_file.create_dataset(dataset_name, shape=(trajectory_length, *frame_shape),
dtype=np.float32, compression=32001)
for i, xyz_frame in enumerate(xyz):
if remove_com_movement:
npa.remove_center_of_mass_movement(xyz_frame)
if i % 1000 == 0:
logger.info("Frame %i", i)
traj[i] = xyz_frame.atom_positions
def main():
daiquiri.setup(level=logging.INFO)
fire.Fire()
|
gpl-3.0
| -4,830,993,265,289,770,000
| 31.072727
| 97
| 0.652494
| false
| 3.431907
| false
| false
| false
|
chromium2014/src
|
tools/perf/page_sets/intl_ko_th_vi.py
|
1
|
1913
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlKoThViPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlKoThViPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ko_th_vi.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlKoThViPageSet(page_set_module.PageSet):
""" Popular pages in Korean, Thai and Vietnamese. """
def __init__(self):
super(IntlKoThViPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ko_th_vi.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: #7 site in Vietnam
'http://us.24h.com.vn/',
# Why: #6 site in Vietnam
'http://vnexpress.net/',
# Why: #18 site in Vietnam
'http://vietnamnet.vn/',
# Why: #5 site in Vietnam
# pylint: disable=C0301
'http://news.zing.vn/the-gioi/ba-dam-thep-margaret-thatcher-qua-doi/a312895.html#home_noibat1',
'http://kenh14.vn/home.chn',
# Why: #5 site in Korea
'http://www.naver.com/',
# Why: #9 site in Korea
'http://www.daum.net/',
# Why: #25 site in Korea
'http://www.donga.com/',
'http://www.chosun.com/',
'http://www.danawa.com/',
# Why: #10 site in Thailand
'http://pantip.com/',
'http://thaimisc.com/'
]
for url in urls_list:
self.AddPage(IntlKoThViPage(url, self))
|
bsd-3-clause
| -6,543,040,283,645,747,000
| 31.423729
| 101
| 0.644015
| false
| 2.989063
| false
| false
| false
|
jakevdp/altair
|
altair/utils/deprecation.py
|
1
|
1447
|
import warnings
# import functools
class AltairDeprecationWarning(UserWarning):
pass
def _deprecated(obj, name=None, message=None):
"""Return a version of a class or function that raises a deprecation warning.
Parameters
----------
obj : class or function
The object to create a deprecated version of.
name : string (optional)
The name of the deprecated object
message : string (optional)
The deprecation message
Returns
-------
deprecated_obj :
The deprecated version of obj
Examples
--------
>>> class Foo(object): pass
>>> OldFoo = _deprecated(Foo, "OldFoo")
>>> f = OldFoo() # doctest: +SKIP
AltairDeprecationWarning: alt.OldFoo is deprecated. Use alt.Foo instead.
"""
if message is None:
message = ("alt.{} is deprecated. Use alt.{} instead."
"".format(name, obj.__name__))
if isinstance(obj, type):
return type(name, (obj,),
{'__doc__': obj.__doc__,
'__init__': _deprecated(obj.__init__, "__init__", message)})
elif callable(obj):
# @functools.wraps(obj) # TODO: use this in Py3 only
def new_obj(*args, **kwargs):
warnings.warn(message, AltairDeprecationWarning)
return obj(*args, **kwargs)
return new_obj
else:
raise ValueError("Cannot deprecate object of type {}".format(type(obj)))
|
bsd-3-clause
| 4,549,240,084,075,891,000
| 29.787234
| 81
| 0.583967
| false
| 4.425076
| false
| false
| false
|
kd0aij/matrixpilot_old
|
Tools/MAVLink/MAVProxy/modules/antenna.py
|
1
|
2346
|
#!/usr/bin/env python
'''
antenna pointing module
Andrew Tridgell
June 2012
'''
import sys, os, time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'cuav', 'lib'))
import cuav_util
mpstate = None
class module_state(object):
def __init__(self):
self.gcs_location = None
self.last_bearing = 0
self.last_announce = 0
def name():
'''return module name'''
return "antenna"
def description():
'''return module description'''
return "antenna pointing module"
def cmd_antenna(args):
'''set gcs location'''
state = mpstate.antenna_state
usage = "antenna lat lon"
if len(args) != 2:
if state.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(state.gcs_location))
return
state.gcs_location = (float(args[0]), float(args[1]))
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.antenna_state = module_state()
mpstate.command_map['antenna'] = (cmd_antenna, "antenna link control")
def unload():
'''unload module'''
pass
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
state = mpstate.antenna_state
if state.gcs_location is None and mpstate.status.wploader.count() > 0:
home = mpstate.status.wploader.wp(0)
mpstate.antenna_state.gcs_location = (home.x, home.y)
print("Antenna home set")
if state.gcs_location is None:
return
if m.get_type() == 'GPS_RAW' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat, m.lon)
elif m.get_type() == 'GPS_RAW_INT' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat/1.0e7, m.lon/1.0e7)
else:
return
mpstate.console.set_status('Antenna', 'Antenna %.0f' % bearing, row=0)
if abs(bearing - state.last_bearing) > 5 and (time.time() - state.last_announce) > 15:
state.last_bearing = bearing
state.last_announce = time.time()
mpstate.functions.say("Antenna %u" % int(bearing+0.5))
|
gpl-3.0
| 5,135,345,788,596,944,000
| 30.583333
| 104
| 0.602728
| false
| 3.222527
| false
| false
| false
|
kubernetes-client/python
|
kubernetes/client/models/v1alpha1_webhook_throttle_config.py
|
1
|
4435
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1WebhookThrottleConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'burst': 'int',
'qps': 'int'
}
attribute_map = {
'burst': 'burst',
'qps': 'qps'
}
def __init__(self, burst=None, qps=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1WebhookThrottleConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._burst = None
self._qps = None
self.discriminator = None
if burst is not None:
self.burst = burst
if qps is not None:
self.qps = qps
@property
def burst(self):
"""Gets the burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:return: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._burst
@burst.setter
def burst(self, burst):
"""Sets the burst of this V1alpha1WebhookThrottleConfig.
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:param burst: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._burst = burst
@property
def qps(self):
"""Gets the qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:return: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._qps
@qps.setter
def qps(self, qps):
"""Sets the qps of this V1alpha1WebhookThrottleConfig.
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:param qps: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._qps = qps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return True
return self.to_dict() != other.to_dict()
|
apache-2.0
| -7,392,792,150,688,209,000
| 28.566667
| 124
| 0.583089
| false
| 3.988309
| true
| false
| false
|
adamcaudill/yawast
|
yawast/scanner/plugins/http/applications/wordpress.py
|
1
|
7344
|
# Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import re
from typing import Tuple, Union, List, cast
from urllib.parse import urljoin
from packaging import version
from requests import Response
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.evidence import Evidence
from yawast.scanner.plugins.http import version_checker, response_scanner
from yawast.scanner.plugins.result import Result
from yawast.shared import network, output
def identify(url: str) -> Tuple[Union[str, None], List[Result]]:
results = []
# find WordPress
res, path = _identify_by_path(url, "")
if path is None:
res, path = _identify_by_path(url, "blog/")
# check to see if we have a valid hit
if path is not None:
# we have a WordPress install, let's see if we can get a version
body = res.text
ver = "Unknown"
# this works for modern versions
m = re.search(r"login.min.css\?ver=\d+\.\d+\.?\d*", body)
if m:
ver = m.group(0).split("=")[1]
else:
# the current method doesn't work, fall back to an older method
m = re.search(r"load-styles.php\?[\w,;=&%]+;ver=\d+\.\d+\.?\d*", body)
if m:
ver = m.group(0).split("=")[-1]
# report that we found WordPress
results.append(
Result.from_evidence(
Evidence.from_response(res, {"version": ver}),
f"Found WordPress v{ver} at {path}",
Vulnerabilities.APP_WORDPRESS_VERSION,
)
)
# is this a current version?
ver = cast(version.Version, version.parse(ver))
curr_version = version_checker.get_latest_version("wordpress", ver)
if curr_version is not None and curr_version > ver:
results.append(
Result.from_evidence(
Evidence.from_response(
res,
{
"installed_version": str(ver),
"current_verison": str(curr_version),
},
),
f"WordPress Outdated: {ver} - Current: {curr_version}",
Vulnerabilities.APP_WORDPRESS_OUTDATED,
)
)
return path, results
else:
return None, []
def check_path_disclosure(wp_url: str) -> List[Result]:
# this is a list of files that are known to throw a fatal error when accessed directly
# this is from a manual review of all plugins with at least 1M installs
urls = [
"wp-content/plugins/hello.php",
"wp-content/plugins/akismet/akismet.php",
"wp-content/plugins/contact-form-7/includes/capabilities.php",
"wp-content/plugins/wordpress-seo/admin/views/partial-alerts-errors.php",
"wp-content/plugins/jetpack/load-jetpack.php",
"wp-content/plugins/jetpack/uninstall.php",
"wp-content/plugins/duplicate-post/duplicate-post-admin.php",
"wp-content/plugins/wpforms-lite/includes/admin/class-welcome.php",
"wp-content/plugins/wp-google-maps/base/includes/welcome.php",
"wp-content/plugins/wp-super-cache/wp-cache.php",
"wp-content/plugins/mailchimp-for-wp/integrations/wpforms/bootstrap.php",
"wp-content/plugins/mailchimp-for-wp/integrations/bootstrap.php",
"wp-content/plugins/regenerate-thumbnails/regenerate-thumbnails.php",
"wp-content/plugins/advanced-custom-fields/includes/deprecated.php",
"wp-content/plugins/redirection/redirection.php",
"wp-content/plugins/wpforms-lite/includes/admin/importers/class-ninja-forms.php",
"wp-content/plugins/ninja-forms/includes/deprecated.php",
"wp-content/plugins/so-widgets-bundle/so-widgets-bundle.php",
"wp-content/plugins/wp-fastest-cache/templates/preload.php",
"wp-content/plugins/duplicate-page/duplicatepage.php",
"wp-content/plugins/better-wp-security/better-wp-security.php",
"wp-content/plugins/all-in-one-wp-security-and-firewall/other-includes/wp-security-unlock-request.php",
"wp-content/plugins/related-posts/views/settings.php",
"wp-content/plugins/wpcontentguard/views/settings.php",
"wp-content/plugins/simple-social-icons/simple-social-icons.php",
]
results: List[Result] = []
for url in urls:
target = urljoin(wp_url, url)
head = network.http_head(target, False)
if head.status_code != 404:
resp = network.http_get(target, False)
if resp.status_code < 300 or resp.status_code >= 500:
# we have some kind of response that could be useful
if "<b>Fatal error</b>:" in resp.text:
# we have an error
pattern = r"<b>((\/|[A-Z]:\\).*.php)<\/b>"
if re.search(pattern, resp.text):
try:
path = re.findall(pattern, resp.text)[0][0]
results.append(
Result.from_evidence(
Evidence.from_response(resp, {"path": path}),
f"WordPress File Path Disclosure: {target} ({path})",
Vulnerabilities.APP_WORDPRESS_PATH_DISCLOSURE,
)
)
except Exception:
output.debug_exception()
results += response_scanner.check_response(target, resp)
return results
def check_json_user_enum(url: str) -> List[Result]:
results = []
target = urljoin(url, "wp-json/wp/v2/users")
res = network.http_get(target, False)
body = res.text
if res.status_code < 300 and "slug" in body:
data = res.json()
# log the enum finding
results.append(
Result.from_evidence(
Evidence.from_response(res),
f"WordPress WP-JSON User Enumeration at {target}",
Vulnerabilities.APP_WORDPRESS_USER_ENUM_API,
)
)
# log the individual users
for user in data:
results.append(
Result.from_evidence(
Evidence.from_response(
res,
{
"user_id": user["id"],
"user_slug": user["slug"],
"user_name": user["name"],
},
),
f"ID: {user['id']}\tUser Slug: '{user['slug']}'\t\tUser Name: '{user['name']}'",
Vulnerabilities.APP_WORDPRESS_USER_FOUND,
)
)
return results
def _identify_by_path(url: str, path: str) -> Tuple[Response, Union[str, None]]:
target = urljoin(url, f"{path}wp-login.php")
res = network.http_get(target, False)
body = res.text
if res.status_code == 200 and "Powered by WordPress" in body:
return res, urljoin(url, path)
else:
return res, None
|
mit
| 6,941,804,610,785,816,000
| 38.483871
| 111
| 0.564134
| false
| 3.881607
| false
| false
| false
|
cysuncn/python
|
spark/crm/PROC_M_R_RET_CUST_FLOW.py
|
1
|
4734
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_M_R_RET_CUST_FLOW').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#MCRM_RET_CUST_FLOW 增量 删除当天文件
ret = os.system("hdfs dfs -rm -r /"+dbname+"/MCRM_RET_CUST_FLOW/"+V_DT+".parquet")
MCRM_RET_CUST_ASSETS = sqlContext.read.parquet(hdfs+'/MCRM_RET_CUST_ASSETS/*')
MCRM_RET_CUST_ASSETS.registerTempTable("MCRM_RET_CUST_ASSETS")
ACRM_F_AG_AGREEMENT = sqlContext.read.parquet(hdfs+'/ACRM_F_AG_AGREEMENT/*')
ACRM_F_AG_AGREEMENT.registerTempTable("ACRM_F_AG_AGREEMENT")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT CUST_ID AS CUST_ID
,FR_ID AS FR_ID
,MIN(concat(SUBSTR(START_DATE, 1, 4),'-',SUBSTR(START_DATE, 6, 2),'-',SUBSTR(START_DATE, 9, 2))) AS OPEN_DATE
,MAX(concat(SUBSTR(END_DATE, 1, 4),'-',SUBSTR(END_DATE, 6, 2),'-',SUBSTR(END_DATE, 9, 2))) AS CANCEL_DATE
FROM ACRM_F_AG_AGREEMENT A --客户协议表
GROUP BY FR_ID
,CUST_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_MCRM_RET_CUST_FLOW_01 = sqlContext.sql(sql)
TMP_MCRM_RET_CUST_FLOW_01.registerTempTable("TMP_MCRM_RET_CUST_FLOW_01")
dfn="TMP_MCRM_RET_CUST_FLOW_01/"+V_DT+".parquet"
TMP_MCRM_RET_CUST_FLOW_01.cache()
nrows = TMP_MCRM_RET_CUST_FLOW_01.count()
TMP_MCRM_RET_CUST_FLOW_01.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_MCRM_RET_CUST_FLOW_01.unpersist()
ACRM_F_AG_AGREEMENT.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_MCRM_RET_CUST_FLOW_01/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_MCRM_RET_CUST_FLOW_01 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[11] 001-02::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUST_ID AS CUST_ID
,A.CUST_ZH_NAME AS CUST_ZH_NAME
,A.CUST_MANAGER AS CUST_MANAGER
,A.CUST_MANAGER_NAME AS CUST_MANAGER_NAME
,A.ORG_ID AS ORG_ID
,A.ORG_NAME AS ORG_NAME
,A.CUST_LEVEL AS CUST_LEVEL
,A.GRADE_DATE AS GRADE_DATE
,B.OPEN_DATE AS OPEN_DATE
,C.CANCEL_DATE AS CANCEL_DATE
,A.MONTH_BAL AS CUST_ASSETS
,A.OLD_CUST_LEVEL AS CUST_LEVEL_FU
,A.ST_DATE AS ST_DATE
,'' AS O_MAIN_TYPE
,'' AS M_MAIN_TYPE
FROM MCRM_RET_CUST_ASSETS A --客户资产情况表
LEFT JOIN TMP_MCRM_RET_CUST_FLOW_01 B --客户流入流出机构统计表临时表01
ON A.CUST_ID = B.CUST_ID
AND B.FR_ID = A.FR_ID
AND SUBSTR(B.OPEN_DATE, 1, 7) = SUBSTR(V_DT, 1, 7)
LEFT JOIN TMP_MCRM_RET_CUST_FLOW_01 C --客户流入流出机构统计表临时表01
ON A.CUST_ID = C.CUST_ID
AND C.FR_ID = A.FR_ID
AND SUBSTR(C.CANCEL_DATE, 1, 7) = SUBSTR(V_DT, 1, 7)
WHERE A.ST_DATE = V_DT """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
MCRM_RET_CUST_FLOW = sqlContext.sql(sql)
MCRM_RET_CUST_FLOW.registerTempTable("MCRM_RET_CUST_FLOW")
dfn="MCRM_RET_CUST_FLOW/"+V_DT+".parquet"
MCRM_RET_CUST_FLOW.cache()
nrows = MCRM_RET_CUST_FLOW.count()
MCRM_RET_CUST_FLOW.write.save(path=hdfs + '/' + dfn, mode='append')
MCRM_RET_CUST_FLOW.unpersist()
MCRM_RET_CUST_ASSETS.unpersist()
TMP_MCRM_RET_CUST_FLOW_01.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert MCRM_RET_CUST_FLOW lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
|
gpl-3.0
| 6,747,739,261,989,934,000
| 42.150943
| 177
| 0.580892
| false
| 2.453863
| false
| true
| false
|
thomastweets/PythonRSA
|
GUI_RSA.py
|
1
|
15279
|
############################
### GUI for RS analysis ###
############################
import wx
import rsa
import os
import webbrowser
files_number = 0
class RSA_GUI(wx.Frame):
def __init__(self, parent, title):
super(RSA_GUI,self).__init__(parent, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER, title = title, size = (400,275))
self.InitUI()
self.Show(True)
def InitUI(self):
## Creates Status Bar
self.CreateStatusBar()
self.menuBar = wx.MenuBar()
self.filemenu = wx.Menu()
self.helpmenu = wx.Menu()
self.menuHelp = self.helpmenu.Append(wx.ID_ANY, "&Help", "Learn more about RSA and how to use this program")
self.menuAbout = self.helpmenu.Append(wx.ID_ABOUT, "&About", "Learn more about this program")
self.menuClear = self.filemenu.Append(wx.ID_ANY,"&Clear","Clear data")
self.filemenu.AppendSeparator()
self.menuExit = self.filemenu.Append(wx.ID_EXIT, "&Exit", "Terminate the program")
self.menuBar.Append(self.filemenu, "&File")
self.menuBar.Append(self.helpmenu, "&Help")
self.SetMenuBar(self.menuBar)
self.Bind(wx.EVT_MENU, self.OnAbout, self.menuAbout)
self.Bind(wx.EVT_MENU, self.OnHelp, self.menuHelp)
self.Bind(wx.EVT_MENU, self.OnExit, self.menuExit)
self.Bind(wx.EVT_MENU, self.OnClear, self.menuClear)
## buttons
self.panel = wx.Panel(self)
self.main_box = wx.BoxSizer(wx.VERTICAL)
file_box = wx.BoxSizer(wx.HORIZONTAL)
file_button = wx.Button(self.panel, label = 'Select files', size = (90, 30))
file_box.Add(file_button)
self.file_text = wx.TextCtrl(self.panel)
self.file_text.Disable()
file_box.Add(self.file_text, proportion = 1, flag = wx.EXPAND | wx.LEFT, border = 5)
self.main_box.Add(file_box, flag = wx.EXPAND | wx.ALL, border = 10)
self.main_box.Add((-1,10))
label_box = wx.BoxSizer(wx.HORIZONTAL)
label_button = wx.Button(self.panel, label = 'Conditions', size = (90, 30))
label_box.Add(label_button)
self.label_text = wx.TextCtrl(self.panel)
self.label_text.Disable()
label_box.Add(self.label_text, proportion = 1, flag = wx.EXPAND | wx.LEFT, border = 5)
self.main_box.Add(label_box, flag = wx. EXPAND | wx.RIGHT | wx.LEFT, border = 10)
self.main_box.Add((-1,30))
options_box = wx.BoxSizer(wx.HORIZONTAL)
options_button = wx.Button(self.panel, label='Options', size = (70, 30))
options_box.Add(options_button)
self.main_box.Add(options_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.main_box.Add((-1,10))
end_box = wx.BoxSizer(wx.HORIZONTAL)
self.go_btn = wx.Button(self.panel, label = 'Go', size = (70, 30))
self.go_btn.Disable()
end_box.Add(self.go_btn, flag = wx.BOTTOM, border = 5)
cancel_btn = wx.Button(self.panel, label = 'Cancel', size = (70, 30))
end_box.Add(cancel_btn, flag = wx.LEFT | wx.BOTTOM, border = 5)
self.main_box.Add(end_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.main_box)
self.Bind(wx.EVT_BUTTON, self.OnFiles, file_button)
self.Bind(wx.EVT_BUTTON, self.conditions, label_button)
self.Bind(wx.EVT_BUTTON, self.OnOptions, options_button)
self.go_btn.Bind(wx.EVT_BUTTON, self.OnGo)
self.Bind(wx.EVT_BUTTON, self.OnCancel, cancel_btn)
self.labels = []
self.files = []
self.Center()
def OnOptions(self, e):
self.new = OptionWindow(parent=None, id=-1)
self.new.Show()
def OnAbout(self, e):
dlg = wx.MessageDialog(self, "This is a program to perform a representational similarity analysis on functional magnetic resonance imaging data.\n\n"
"The analysis is following the principles described in the paper 'Representational Similarity Analysis - Connecting"
" the Branches of Systems Neuroscience' by Nikolaus Kriegeskorte, Marieke Mur and Peter Bandettini (2008). \n\nIt is the"
" result of a project work at Maastricht University by Pia Schroeder, Amelie Haugg and Julia Brehm under the supervision of Thomas Emmerling."
"\n\nFor correspondence please refer to https://github.com/thomastweets/PythonRSA", "About this program")
dlg.ShowModal()
dlg.Destroy()
def OnHelp(self, e):
webbrowser.open("https://github.com/thomastweets/PythonRSA/blob/master/README.md")
#dlg = wx.MessageDialog(self, "", "Help for this program")
#dlg.ShowModal()
#dlg.Destroy()
def OnExit(self, e):
self.Close(True)
def OnClear(self, e):
self.files = []
self.labels = []
self.file_text.ChangeValue(str(''))
self.label_text.ChangeValue(str(''))
rsa.matrix_plot1 = True
rsa.matrix_plot2 = False
rsa.bar_plot = False
rsa.correlations1 = False
rsa.correlations2 = False
rsa.pvalues = False
rsa.no_relabelings = 10000
rsa.dist_metric = 1
rsa.output_first = True
rsa.output_second = False
rsa.scale_to_max = False
global files_number
files_number = 0
self.go_btn.Disable()
def OnFiles(self, event):
dialog = wx.FileDialog(self, "Choose files:", os.getcwd(), " ","*.vom", wx.FD_OPEN|wx.FD_MULTIPLE)
self.files = []
if dialog.ShowModal() == wx.ID_OK:
self.paths = dialog.GetPaths()
# myfiles contains all the file names
for path in self.paths:
self.files.append(os.path.basename(path).encode("utf-8"))
global files_number
if len(self.files) > 1:
files_number = 1
else:
files_number = 0
if self.files:
self.file_text.ChangeValue(str(', '.join(self.files)))
self.go_btn.Enable()
dialog.Destroy()
def conditions(self, event):
self.textinput = wx.TextEntryDialog(self, "Type in condition names separated by a white space", "Condition labels")
if self.textinput.ShowModal() == wx.ID_OK:
self.input = self.textinput.GetValue()
# labels contains a list of all conditions
self.labels = self.input.split()
self.labels = [label.encode("utf-8") for label in self.labels]
if self.labels:
self.label_text.ChangeValue(str(', '.join(self.labels)))
self.textinput.Destroy()
def OnGo(self, e):
if self.labels == ['Tetris']:
import Tetris
else:
wait = wx.BusyCursor()
rsa.RSA(self.paths, self.files, self.labels)
del wait
def OnCancel(self, e):
self.Close(True)
class OptionWindow(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Options',
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MINIMIZE_BOX ^ wx.MAXIMIZE_BOX,
size=(400,500))
self.InitOpt()
def InitOpt(self):
self.panel = wx.Panel(self)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add((-1,20))
self.line1 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line1, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,10))
# Check box: First-order RDMs
self.RDM1_box = wx.BoxSizer(wx.HORIZONTAL)
self.RDM1_cb = wx.CheckBox(self.panel, label = 'First order RDMs')
self.RDM1_cb.SetValue(rsa.output_first)
self.RDM1_cb.Bind(wx.EVT_CHECKBOX, self.OnSelectRDM1)
self.RDM1_box.Add(self.RDM1_cb)
self.vbox.Add(self.RDM1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Matrix plots
self.mplot1_box = wx.BoxSizer(wx.HORIZONTAL)
self.mplot1_box.Add((25,-1))
self.mplot1_cb = wx.CheckBox(self.panel, label = 'Matrix plots')
self.mplot1_cb.SetValue(rsa.matrix_plot1)
self.mplot1_box.Add(self.mplot1_cb)
self.vbox.Add(self.mplot1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: First-order correlations
self.correlations1_box = wx.BoxSizer(wx.HORIZONTAL)
self.correlations1_box.Add((25,-1))
self.correlations1_cb = wx.CheckBox(self.panel, label = 'Correlations')
self.correlations1_cb.SetValue(rsa.correlations1)
self.correlations1_box.Add(self.correlations1_cb)
self.vbox.Add(self.correlations1_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Scale to maximum distance
self.scale_box = wx.BoxSizer(wx.HORIZONTAL)
self.scale_box.Add((25,-1))
self.scale_cb = wx.CheckBox(self.panel, label='Scale to max')
self.scale_cb.SetValue(rsa.scale_to_max)
self.scale_box.Add(self.scale_cb)
self.vbox.Add(self.scale_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Dropdown menu for distance metric
self.drop_box = wx.BoxSizer(wx.HORIZONTAL)
self.drop_box.Add((25,-1))
self.drop_label = wx.StaticText(self.panel, label = 'Distance metric ')
self.drop_box.Add(self.drop_label)
self.distances = ['Correlation distance', 'Euclidean distance', 'Absolute activation difference']
self.dropdown = wx.ComboBox(self.panel, value = self.distances[rsa.dist_metric-1], choices = self.distances, style=wx.CB_READONLY)
self.drop_box.Add(self.dropdown)
self.vbox.Add(self.drop_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,20))
self.line2 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line2, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,10))
# Check box: Second-order RDM
self.RDM2_box = wx.BoxSizer(wx.HORIZONTAL)
self.RDM2_cb = wx.CheckBox(self.panel, label = 'Second order RDMs')
self.RDM2_cb.SetValue(rsa.output_second)
self.RDM2_cb.Bind(wx.EVT_CHECKBOX, self.OnSelectRDM2)
self.RDM2_box.Add(self.RDM2_cb)
self.vbox.Add(self.RDM2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# only checkable if you have chosen enough files
self.RDM2_cb.Disable()
if files_number == 1:
self.RDM2_cb.Enable()
# Check box: Matrix plots
self.mplot2_box = wx.BoxSizer(wx.HORIZONTAL)
self.mplot2_box.Add((25,-1))
self.mplot2_cb = wx.CheckBox(self.panel, label = 'Matrix plots')
self.mplot2_cb.SetValue(rsa.matrix_plot2)
self.mplot2_box.Add(self.mplot2_cb)
self.vbox.Add(self.mplot2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Bar plots
self.bplot_box = wx.BoxSizer(wx.HORIZONTAL)
self.bplot_box.Add((25,-1))
self.bplot_cb = wx.CheckBox(self.panel, label = 'Bar plots')
self.bplot_cb.SetValue(rsa.bar_plot)
self.bplot_box.Add(self.bplot_cb)
self.vbox.Add(self.bplot_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: Second-order correlations
self.correlations2_box = wx.BoxSizer(wx.HORIZONTAL)
self.correlations2_box.Add((25,-1))
self.correlations2_cb = wx.CheckBox(self.panel, label = 'Correlations')
self.correlations2_cb.SetValue(rsa.correlations2)
self.correlations2_box.Add(self.correlations2_cb)
self.vbox.Add(self.correlations2_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# Check box: p-values
self.p_box = wx.BoxSizer(wx.HORIZONTAL)
self.p_box.Add((25,-1))
self.p_cb = wx.CheckBox(self.panel, label='p-values')
self.p_cb.SetValue(rsa.pvalues)
self.p_box.Add(self.p_cb)
self.vbox.Add(self.p_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
# No of permutations SpinControl
self.perm_box = wx.BoxSizer(wx.HORIZONTAL)
self.perm_box.Add((25,-1))
self.perm_label = wx.StaticText(self.panel, label = 'No. of Permutations ')
self.perm_box.Add(self.perm_label)
self.perm_spin = wx.SpinCtrl(self.panel, value=str(rsa.no_relabelings), min=100, max = 100000)
self.perm_box.Add(self.perm_spin, proportion = 1)
self.vbox.Add(self.perm_box, flag = wx.LEFT, border = 10)
self.vbox.Add((-1,10))
self.line3 = wx.StaticLine(self.panel, wx.ID_ANY, style=wx.LI_VERTICAL)
self.vbox.Add(self.line3, 0, wx.ALL | wx.EXPAND, 5)
self.vbox.Add((-1,50))
# Dis-/Enable options
self.OnSelectRDM1([])
self.OnSelectRDM2([])
# Done and Cancel Buttons
self.end_box = wx.BoxSizer(wx.HORIZONTAL)
self.done_btn = wx.Button(self.panel, label = 'Done', size = (70, 30))
self.done_btn.Bind(wx.EVT_BUTTON, self.OnDone)
self.end_box.Add(self.done_btn, flag = wx.BOTTOM, border = 5)
self.cancel_btn = wx.Button(self.panel, label = 'Cancel', size = (70, 30))
self.cancel_btn.Bind(wx.EVT_BUTTON, self.OnCancel)
self.end_box.Add(self.cancel_btn, flag = wx.LEFT | wx.BOTTOM, border = 5)
self.vbox.Add(self.end_box, flag = wx.ALIGN_RIGHT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.vbox)
self.Center()
def OnSelectRDM1(self,e):
if self.RDM1_cb.GetValue():
self.mplot1_cb.Enable()
self.correlations1_cb.Enable()
self.scale_cb.Enable()
self.dropdown.Enable()
else:
self.mplot1_cb.Disable()
self.correlations1_cb.Disable()
self.scale_cb.Disable()
self.dropdown.Disable()
def OnSelectRDM2(self,e):
if self.RDM2_cb.GetValue() and files_number == 1:
self.bplot_cb.Enable()
self.mplot2_cb.Enable()
self.p_cb.Enable()
self.correlations2_cb.Enable()
self.perm_spin.Enable()
else:
self.bplot_cb.Disable()
self.p_cb.Disable()
self.perm_spin.Disable()
self.mplot2_cb.Disable()
self.correlations2_cb.Disable()
def OnDone(self,e):
rsa.output_first = self.RDM1_cb.GetValue()
rsa.output_second = self.RDM2_cb.GetValue()
rsa.matrix_plot1 = self.mplot1_cb.GetValue()
rsa.matrix_plot2 = self.mplot2_cb.GetValue()
rsa.bar_plot = self.bplot_cb.GetValue()
rsa.correlations1 = self.correlations1_cb.GetValue()
rsa.correlations2 = self.correlations2_cb.GetValue()
rsa.pvalues = self.p_cb.GetValue()
rsa.scale_to_max = self.scale_cb.GetValue()
rsa.no_relabelings = self.perm_spin.GetValue()
rsa.dist_metric = self.dropdown.GetSelection()+1
self.Close()
def OnCancel(self,e):
self.Close()
def main():
GUI = wx.App()
RSA_GUI(None, 'RSA')
GUI.MainLoop()
if __name__ == '__main__':
main()
|
gpl-2.0
| 8,023,130,351,131,065,000
| 35.995157
| 179
| 0.59729
| false
| 3.290052
| false
| false
| false
|
Vijfhoek/oyoyo
|
oyoyo/cmdhandler.py
|
1
|
6875
|
# Copyright (c) 2008 Duncan Fordyce
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import inspect
import logging
import sys
import traceback
from oyoyo import helpers
from oyoyo.parse import parse_nick
# Python < 3 compatibility
if sys.version_info < (3,):
class bytes(object):
def __new__(self, b='', encoding='utf8'):
return str(b)
def protected(func):
""" decorator to protect functions from being called """
func.protected = True
return func
class CommandError(Exception):
def __init__(self, cmd):
self.cmd = cmd
class NoSuchCommandError(CommandError):
def __str__(self):
return 'No such command "%s"' % ".".join(self.cmd)
class ProtectedCommandError(CommandError):
def __str__(self):
return 'Command "%s" is protected' % ".".join(self.cmd)
class CommandHandler(object):
""" The most basic CommandHandler """
def __init__(self, client):
self.client = client
@protected
def get(self, in_command_parts):
""" finds a command
commands may be dotted. each command part is checked that it does
not start with and underscore and does not have an attribute
"protected". if either of these is true, ProtectedCommandError
is raised.
its possible to pass both "command.sub.func" and
["command", "sub", "func"].
"""
if isinstance(in_command_parts, (str, bytes)):
in_command_parts = in_command_parts.split(bytes('.', 'ascii'))
command_parts = in_command_parts[:]
p = self
while command_parts:
cmd = command_parts.pop(0).decode('ascii')
if cmd.startswith('_'):
raise ProtectedCommandError(in_command_parts)
try:
f = getattr(p, cmd)
except AttributeError:
raise NoSuchCommandError(in_command_parts)
if hasattr(f, 'protected'):
raise ProtectedCommandError(in_command_parts)
if isinstance(f, CommandHandler) and command_parts:
return f.get(command_parts)
p = f
return f
@protected
def run(self, command, *args):
""" finds and runs a command """
logging.debug("processCommand %s(%s)" % (command, args))
try:
f = self.get(command)
except NoSuchCommandError:
self.__unhandled__(command, *args)
return
logging.debug('f %s' % f)
try:
f(*args)
except Exception, e:
logging.error('command raised %s' % e)
logging.error(traceback.format_exc())
raise CommandError(command)
@protected
def __unhandled__(self, cmd, *args):
"""The default handler for commands. Override this method to
apply custom behavior (example, printing) unhandled commands.
"""
logging.debug('unhandled command %s(%s)' % (cmd, args))
class DefaultCommandHandler(CommandHandler):
""" CommandHandler that provides methods for the normal operation of IRC.
If you want your bot to properly respond to pings, etc, you should subclass this.
"""
def ping(self, prefix, server):
self.client.send('PONG', server)
class DefaultBotCommandHandler(CommandHandler):
""" default command handler for bots. methods/attributes are made
available as commands """
@protected
def getVisibleCommands(self, obj=None):
test = (lambda x: isinstance(x, CommandHandler) or \
inspect.ismethod(x) or inspect.isfunction(x))
members = inspect.getmembers(obj or self, test)
return [m for m, _ in members
if (not m.startswith('_') and
not hasattr(getattr(obj, m), 'protected'))]
def help(self, sender, dest, arg=None):
"""list all available commands or get help on a specific command"""
logging.info('help sender=%s dest=%s arg=%s' % (sender, dest, arg))
if not arg:
commands = self.getVisibleCommands()
commands.sort()
helpers.msg(self.client, dest,
"available commands: %s" % " ".join(commands))
else:
try:
f = self.get(arg)
except CommandError, e:
helpers.msg(self.client, dest, str(e))
return
doc = f.__doc__.strip() if f.__doc__ else "No help available"
if not inspect.ismethod(f):
subcommands = self.getVisibleCommands(f)
if subcommands:
doc += " [sub commands: %s]" % " ".join(subcommands)
helpers.msg(self.client, dest, "%s: %s" % (arg, doc))
class BotCommandHandler(DefaultCommandHandler):
""" complete command handler for bots """
def __init__(self, client, command_handler):
DefaultCommandHandler.__init__(self, client)
self.command_handler = command_handler
def privmsg(self, prefix, dest, msg):
self.tryBotCommand(prefix, dest, msg)
@protected
def tryBotCommand(self, prefix, dest, msg):
""" tests a command to see if its a command for the bot, returns True
and calls self.processBotCommand(cmd, sender) if its is.
"""
logging.debug("tryBotCommand('%s' '%s' '%s')" % (prefix, dest, msg))
if dest == self.client.nick:
dest = parse_nick(prefix)[0]
elif msg.startswith(self.client.nick):
msg = msg[len(self.client.nick)+1:]
else:
return False
msg = msg.strip()
parts = msg.split(' ', 1)
command = parts[0]
arg = parts[1:]
try:
self.command_handler.run(command, prefix, dest, *arg)
except CommandError, e:
helpers.msg(self.client, dest, str(e))
return True
|
mit
| -8,005,940,691,711,680,000
| 31.429245
| 85
| 0.613236
| false
| 4.291511
| false
| false
| false
|
RIKSOF/scspell-jenkins
|
scspell_lib/_util.py
|
1
|
1481
|
############################################################################
# scspell
# Copyright (C) 2009 Paul Pelzl
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
############################################################################
"""
_util -- utility functions which may be useful across the source tree.
"""
# Settings for this session
VERBOSITY_NORMAL = 1
VERBOSITY_DEBUG = 2
VERBOSITY_MAX = VERBOSITY_DEBUG
SETTINGS = {'verbosity' : VERBOSITY_NORMAL}
def mutter(level, text):
"""Print text to the console, if the level is not higher than the
current verbosity setting."""
if level <= SETTINGS['verbosity']:
print text
def set_verbosity(value):
"""Set the verbosity level to a given integral value. The constants
VERBOSITY_* are good choices."""
SETTINGS['verbosity'] = value
# scspell-id: b114984a-c7aa-40a8-9a53-b54fb6a52582
|
gpl-2.0
| 4,990,107,826,805,204,000
| 32.659091
| 76
| 0.654288
| false
| 3.991914
| false
| false
| false
|
robotgear/robotgear
|
robotgear/settings.py
|
1
|
3588
|
"""
Django settings for robotgear project.
Generated by 'django-admin startproject' using Django 1.11.7.
"""
import os
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'semanticuiforms',
'django_q',
'users',
'teams',
'posts'
]
try:
env = os.environ['ROBOTGEAR_ENV']
except KeyError:
env = 'DEBUG'
if env == 'DEBUG':
DEBUG = True
SECRET_KEY = '1$(%%u4n_(w%@6u&2%lgm^93-in4%8t&pd=o)0c_d(_n7(u&#@'
ALLOWED_HOSTS = []
INSTALLED_APPS += ['debug_toolbar', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': '5432',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
elif env == 'PROD':
pass
elif env == 'TEST':
pass
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'robotgear.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
WSGI_APPLICATION = 'robotgear.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Configure custom user model
AUTH_USER_MODEL = 'users.User'
INTERNAL_IPS = '127.0.0.1'
LOGIN_URL = 'login'
Q_CLUSTER = {
'name': 'robotgear',
'workers': 2,
'recycle': 500,
'catch_up': False,
"ack_failures": True,
'retry': 100000,
'label': 'Task Queue',
'orm': 'default'
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'unix:/tmp/memcached.sock',
}
}
|
mit
| 421,187,835,858,788,700
| 21.566038
| 91
| 0.62709
| false
| 3.410646
| false
| false
| false
|
commonsense/divisi
|
csc/divisi/flavors.py
|
1
|
5345
|
from csc.divisi.tensor import DictTensor
from csc.divisi.ordered_set import OrderedSet
from csc.divisi.labeled_view import LabeledView
def add_triple_to_matrix(matrix, triple, value=1.0):
'''
Adds a triple (left, relation, right) to the matrix in the 2D unfolded format.
This is the new add_assertion_tuple.
'''
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
matrix.inc((left, rfeature), value)
matrix.inc((right, lfeature), value)
def set_triple_in_matrix(matrix, triple, value=1.0):
''' Sets a triple (left, relation, right) in the matrix in the 2D
unfolded format to the specified value.
'''
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
matrix[left, rfeature] = value
matrix[right, lfeature] = value
###
### Assertion Tensors
###
class AssertionTensor(LabeledView):
'''
All AssertionTensors have the following functions:
.add_triple(triple, value)
.set_triple(triple, value)
.add_identity(text, value=1.0, relation='Identity')
where triple is (concept1, relation, concept2).
They also have the convenience classmethod from_triples.
'''
def add_identity(self, text, value=1.0, relation='Identity'):
self.add_triple((text, relation, text), value)
def bake(self):
'''
Simplify the representation.
'''
return LabeledView(self.tensor, self._labels)
def add_triples(self, triples, accumulate=True, constant_weight=None):
if accumulate: add = self.add_triple
else: add = self.set_triple
if constant_weight:
for triple in triples:
add(triple, constant_weight)
else:
for triple, weight in triples:
add(triple, weight)
@classmethod
def from_triples(cls, triples, accumulate=True, constant_weight=None):
mat = cls()
mat.add_triples(triples, accumulate, constant_weight)
return mat
def add_identities(self, value=1.0, relation='Identity'):
if not value: return # 0 or False means not to actually add identities.
for concept in self.concepts():
self.add_triple((concept, relation, concept), value)
class ConceptByFeatureMatrix(AssertionTensor):
'''
This is the typical AnalogySpace matrix. It stores each assertion
twice: once as (c1, ('right', rel, c2)) and once as (c2, ('left',
rel, c1)).
This class is a convenience for building matrices in this
format. Once you've add_triple'sed everything, you can call
.bake() to convert it back to a plain old LabeledView of a
DictTensor, just like make_sparse_labeled_tensor does.
'''
def __init__(self):
super(ConceptByFeatureMatrix, self).__init__(
DictTensor(2), [OrderedSet() for _ in '01'])
add_triple = add_triple_to_matrix
set_triple = set_triple_in_matrix
def concepts(self): return self.label_list(0)
class FeatureByConceptMatrix(AssertionTensor):
'''
A transposed ConceptByFeatureMatrix; see it for documentation.
'''
def __init__(self):
super(FeatureByConceptMatrix, self).__init__(
DictTensor(2), [OrderedSet() for _ in '01'])
def add_triple(self, triple, value=1.0):
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
self.inc((rfeature, left), value)
self.inc((lfeature, right), value)
def set_triple(self, triple, value=1.0):
left, relation, right = triple
lfeature = ('left', relation, left)
rfeature = ('right', relation, right)
self[rfeature, left] = value
self[lfeature, right] = value
def concepts(self): return self.label_list(1)
class ConceptRelationConceptTensor(AssertionTensor):
'''
This is a straightforward encoding of concepts as a 3D tensor.
'''
def __init__(self):
# FIXME: yes this saves space, but it might make a row or column be zero.
concepts, relations = OrderedSet(), OrderedSet()
super(ConceptRelationConceptTensor, self).__init__(
DictTensor(3), [concepts, relations, concepts])
def concepts(self): return self.label_list(0)
def add_triple(self, triple, value=1.0):
left, relation, right = triple
self.inc((left, relation, right), value)
def set_triple(self, triple, value=1.0):
left, relation, right = triple
self[left, relation, right] = value
class MirroringCRCTensor(ConceptRelationConceptTensor):
'''
Every assertion (c1, r, c2) in this tensor has an inverse,
(c2, r', c1).
This is analogous to how the 2D tensor makes left and right features.
Inverse relations are constructed from ordinary relations by
prefixing a '-'.
'''
def add_triple(self, triple, value=1.0):
left, relation, right = triple
self.inc((left, relation, right), value) # normal
self.inc((right, '-'+relation, left), value) # inverse
def set_triple(self, triple, value=1.0):
left, relation, right = triple
self[left, relation, right] = value
self[left, '-'+relation, right] = value
|
gpl-3.0
| -1,865,229,490,018,606,000
| 32.198758
| 82
| 0.637605
| false
| 3.80427
| false
| false
| false
|
moyaproject/moya
|
moya/elements/registry.py
|
1
|
4712
|
from __future__ import unicode_literals
from .. import errors
from ..tools import extract_namespace
from .. import namespaces
from ..compat import itervalues
from collections import defaultdict
import inspect
class Meta(object):
logic_skip = False
virtual_tag = False
is_call = False
is_try = False
is_loop = False
app_first_arg = False
text_nodes = None
trap_exceptions = False
translate = False
class ElementRegistry(object):
default_registry = None
_registry_stack = []
def clear(self):
self._registry.clear()
self._dynamic_elements.clear()
del self._registry_stack[:]
@classmethod
def push_registry(cls, registry):
cls._registry_stack.append(registry)
@classmethod
def pop_registry(cls):
cls._registry_stack.pop()
@classmethod
def get_default(cls):
return cls._registry_stack[-1]
def __init__(self, update_from_default=True):
self._registry = defaultdict(dict)
self._dynamic_elements = {}
if update_from_default:
self._registry.update(self.default_registry._registry)
self._dynamic_elements.update(self.default_registry._dynamic_elements)
def clone(self):
"""Return a copy of this registry"""
registry = ElementRegistry(update_from_default=False)
registry._registry = self._registry.copy()
registry._dynamic_elements = self._dynamic_elements.copy()
return registry
def set_default(self):
"""Reset this registry to the default registry (before project loaded)"""
self._registry = self.default_registry._registry.copy()
self._dynamic_elements = self.default_registry._dynamic_elements.copy()
def register_element(self, xmlns, name, element):
"""Add a dynamic element to the element registry"""
xmlns = xmlns or namespaces.run
if name in self._registry[xmlns]:
element_class = self._registry[xmlns][name]
definition = getattr(element_class, "_location", None)
if definition is None:
definition = inspect.getfile(element_class)
if xmlns:
raise errors.ElementError(
'<{}> already registered in "{}" for xmlns "{}"'.format(
name, definition, xmlns
),
element=getattr(element, "element", element),
)
else:
raise errors.ElementError(
'<{}/> already registered in "{}"'.format(name, definition),
element=element,
)
self._registry[xmlns][name] = element
def add_dynamic_registry(self, xmlns, element_callable):
"""Add a dynamic registry (element factory)"""
self._dynamic_elements[xmlns] = element_callable
def clear_registry(self):
"""Clear the registry (called on archive reload)"""
self._registry.clear()
def get_elements_in_xmlns(self, xmlns):
"""Get all elements defined within a given namespace"""
return self._registry.get(xmlns, {})
def get_elements_in_lib(self, long_name):
"""Get all elements defined by a given library"""
lib_elements = []
for namespace in itervalues(self._registry):
lib_elements.extend(
element
for element in itervalues(namespace)
if element._lib_long_name == long_name
)
return lib_elements
def get_element_type(self, xmlns, name):
"""Get an element by namespace and name"""
if xmlns in self._dynamic_elements:
return self._dynamic_elements[xmlns](name)
return self._registry.get(xmlns, {}).get(name, None)
def find_xmlns(self, name):
"""Find the xmlns with contain a given tag, or return None"""
for xmlns in sorted(self._registry.keys()):
if name in self._registry[xmlns]:
return xmlns
return None
def check_namespace(self, xmlns):
"""Check if a namespace exists in the registry"""
return xmlns in self._registry
def set_registry(self, registry):
"""Restore a saved registry"""
self._registry = registry._registry.copy()
self._dynamic_elements = registry._dynamic_elements.copy()
def get_tag(self, tag):
"""Get a tag from it's name (in Clarke's notation)"""
return self.get_element_type(*extract_namespace(tag))
default_registry = ElementRegistry.default_registry = ElementRegistry(
update_from_default=False
)
ElementRegistry.push_registry(ElementRegistry.default_registry)
|
mit
| -1,306,243,355,464,368,600
| 31.951049
| 82
| 0.610781
| false
| 4.543877
| false
| false
| false
|
cmcqueen/simplerandom
|
python/python3/simplerandom/iterators/_iterators_py.py
|
1
|
40947
|
from simplerandom._bitcolumnmatrix import BitColumnMatrix
__all__ = [
"Cong",
"SHR3",
"MWC1",
"MWC2",
"MWC64",
"KISS",
"KISS2",
"LFSR113",
"LFSR88",
"_traverse_iter",
]
def _traverse_iter(o, tree_types=(list, tuple)):
"""Iterate over nested containers and/or iterators.
This allows generator __init__() functions to be passed seeds either as
a series of arguments, or as a list/tuple.
"""
SIMPLERANDOM_BITS = 32
SIMPLERANDOM_MOD = 2**SIMPLERANDOM_BITS
SIMPLERANDOM_MASK = SIMPLERANDOM_MOD - 1
if isinstance(o, tree_types) or getattr(o, '__iter__', False):
for value in o:
for subvalue in _traverse_iter(value):
while True:
yield subvalue & SIMPLERANDOM_MASK
subvalue >>= SIMPLERANDOM_BITS
# If value is negative, then it effectively has infinitely extending
# '1' bits (modelled as a 2's complement representation). So when
# right-shifting it, it will eventually get to -1, and any further
# right-shifting will not change it.
if subvalue == 0 or subvalue == -1:
break
else:
yield o
def _repeat_iter(input_iter):
"""Iterate over the input iter values. Then repeat the last value
indefinitely. This is useful to repeat seed values when an insufficient
number of seeds are provided.
E.g. KISS(1) effectively becomes KISS(1, 1, 1, 1), rather than (if we just
used default values) KISS(1, default-value, default-value, default-value)
It is better to repeat the last seed value, rather than just using default
values. Given two generators seeded with an insufficient number of seeds,
repeating the last seed value means their states are more different from
each other, with less correlation between their generated outputs.
"""
last_value = None
for value in input_iter:
last_value = value
yield value
if last_value is not None:
while True:
yield last_value
def _next_seed_int32_or_default(seed_iter, default_value):
try:
seed_item = next(seed_iter)
except StopIteration:
return default_value
else:
if seed_item is None:
return default_value
else:
return (int(seed_item) & 0xFFFFFFFF)
def _geom_series_uint32(r, n):
"""Unsigned integer calculation of sum of geometric series:
1 + r + r^2 + r^3 + ... r^(n-1)
summed to n terms.
Calculated modulo 2**32.
Use the formula (r**n - 1) / (r - 1)
"""
if n == 0:
return 0
if n == 1 or r == 0:
return 1
m = 2**32
# Split (r - 1) into common factors with the modulo 2**32 -- i.e. all
# factors of 2; and other factors which are coprime with the modulo 2**32.
other_factors = r - 1
common_factor = 1
while (other_factors % 2) == 0:
other_factors //= 2
common_factor *= 2
other_factors_inverse = pow(other_factors, m - 1, m)
numerator = pow(r, n, common_factor * m) - 1
return (numerator // common_factor * other_factors_inverse) % m
class Cong(object):
'''Congruential random number generator
This is a congruential generator with the widely used
69069 multiplier: x[n]=69069x[n-1]+12345. It has
period 2**32.
The leading half of its 32 bits seem to pass tests,
but bits in the last half are too regular. It fails
tests for which those bits play a significant role.
But keep in mind that it is a rare application for
which the trailing bits play a significant role. Cong
is one of the most widely used generators of the last
30 years, as it was the system generator for VAX and
was incorporated in several popular software packages,
all seemingly without complaint.
'''
SIMPLERANDOM_MOD = 2**32
SIMPLERANDOM_MAX = 2**32 - 1
CONG_CYCLE_LEN = 2**32
CONG_MULT = 69069
CONG_CONST = 12345
@staticmethod
def min():
return 0
@staticmethod
def max():
return Cong.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
self.cong = _next_seed_int32_or_default(seed_iter, 0)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
pass
def __next__(self):
self.cong = (69069 * self.cong + 12345) & 0xFFFFFFFF
return self.cong
def current(self):
return self.cong
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
self.cong ^= value_int
next(self)
return self.cong
def __iter__(self):
return self
def getstate(self):
return (self.cong, )
def setstate(self, state):
(self.cong, ) = (int(val) & 0xFFFFFFFF for val in state)
def jumpahead(self, n):
# Cong.jumpahead(n) = r**n * x mod 2**32 +
# c * (1 + r + r**2 + ... + r**(n-1)) mod 2**32
# where r = 69069 and c = 12345.
#
# The part c * (1 + r + r**2 + ... + r**(n-1)) is a geometric series.
# For calculating geometric series mod 2**32, see:
# http://www.codechef.com/wiki/tutorial-just-simple-sum#Back_to_the_geometric_series
n = int(n) % self.CONG_CYCLE_LEN
mult_exp = pow(self.CONG_MULT, n, self.SIMPLERANDOM_MOD)
add_const = (_geom_series_uint32(self.CONG_MULT, n) * self.CONG_CONST) & 0xFFFFFFFF
self.cong = (mult_exp * self.cong + add_const) & 0xFFFFFFFF
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.cong)) + ")"
class SHR3(object):
'''3-shift-register random number generator
SHR3 is a 3-shift-register generator with period
2**32-1. It uses y[n]=y[n-1](I+L^13)(I+R^17)(I+L^5),
with the y's viewed as binary vectors, L the 32x32
binary matrix that shifts a vector left 1, and R its
transpose.
SHR3 seems to pass all except those related to the
binary rank test, since 32 successive values, as
binary vectors, must be linearly independent, while
32 successive truly random 32-bit integers, viewed
as binary vectors, will be linearly independent only
about 29% of the time.
'''
SIMPLERANDOM_MOD = 2**32
SIMPLERANDOM_MAX = 2**32 - 1
SHR3_CYCLE_LEN = 2**32 - 1
_SHR3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_SHR3_MATRIX_b = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,-17)
_SHR3_MATRIX_c = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,5)
_SHR3_MATRIX = _SHR3_MATRIX_c * _SHR3_MATRIX_b * _SHR3_MATRIX_a
@staticmethod
def min():
return 1
@staticmethod
def max():
return SHR3.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
self.shr3 = _next_seed_int32_or_default(seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
if self.shr3 == 0:
# 0 is a bad seed. Invert to get a good seed.
self.shr3 = 0xFFFFFFFF
def __next__(self):
shr3 = self.shr3
shr3 ^= (shr3 & 0x7FFFF) << 13
shr3 ^= shr3 >> 17
shr3 ^= (shr3 & 0x7FFFFFF) << 5
self.shr3 = shr3
return shr3
def current(self):
return self.shr3
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
self.shr3 ^= value_int
self.sanitise()
next(self)
return self.shr3
def __iter__(self):
return self
def getstate(self):
return (self.shr3, )
def setstate(self, state):
(self.shr3, ) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
n = int(n) % self.SHR3_CYCLE_LEN
shr3 = pow(self._SHR3_MATRIX, n) * self.shr3
self.shr3 = shr3
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.shr3)) + ")"
class MWC2(object):
'''"Multiply-with-carry" random number generator
Very similar to MWC1, except that it concatenates the
two 16-bit MWC generators differently. The 'x'
generator is rotated 16 bits instead of just shifted
16 bits.
This gets much better test results than MWC1 in
L'Ecuyer's TestU01 test suite, so it should probably
be preferred.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_MWC_UPPER_MULT = 36969
_MWC_LOWER_MULT = 18000
_MWC_UPPER_MODULO = _MWC_UPPER_MULT * 2**16 - 1
_MWC_LOWER_MODULO = _MWC_LOWER_MULT * 2**16 - 1
_MWC_UPPER_CYCLE_LEN = _MWC_UPPER_MULT * 2**16 // 2 - 1
_MWC_LOWER_CYCLE_LEN = _MWC_LOWER_MULT * 2**16 // 2 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return MWC2.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.mwc_upper = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.mwc_lower = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self._sanitise_upper()
self._sanitise_lower()
def _sanitise_upper(self):
mwc_upper_orig = self.mwc_upper
# There are a few bad states--that is, any multiple of
# _MWC_UPPER_MODULO -- that is 0x9068FFFF (which is 36969 * 2**16 - 1).
sanitised_value = mwc_upper_orig % 0x9068FFFF
if sanitised_value == 0:
# Invert to get a good seed.
sanitised_value = (mwc_upper_orig ^ 0xFFFFFFFF) % 0x9068FFFF
self.mwc_upper = sanitised_value
def _sanitise_lower(self):
mwc_lower_orig = self.mwc_lower
# There are a few bad states--that is, any multiple of
# _MWC_LOWER_MODULO -- that is 0x464FFFFF (which is 18000 * 2**16 - 1).
sanitised_value = mwc_lower_orig % 0x464FFFFF
if sanitised_value == 0:
# Invert to get a good seed.
sanitised_value = (mwc_lower_orig ^ 0xFFFFFFFF) % 0x464FFFFF
self.mwc_lower = sanitised_value
def _next_upper(self):
self.mwc_upper = 36969 * (self.mwc_upper & 0xFFFF) + (self.mwc_upper >> 16)
def _next_lower(self):
self.mwc_lower = 18000 * (self.mwc_lower & 0xFFFF) + (self.mwc_lower >> 16)
def __next__(self):
# Note: this is apparently equivalent to:
# self.mwc_upper = (36969 * self.mwc_upper) % 0x9068FFFF
# self.mwc_lower = (18000 * self.mwc_lower) % 0x464FFFFF
# See Random Number Generation, Pierre L’Ecuyer, section 3.6 Linear Recurrences With Carry
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.136.6898&rep=rep1&type=pdf
self.mwc_upper = 36969 * (self.mwc_upper & 0xFFFF) + (self.mwc_upper >> 16)
self.mwc_lower = 18000 * (self.mwc_lower & 0xFFFF) + (self.mwc_lower >> 16)
return self.current() # call self.current() so that MWC1 can over-ride it
def current(self):
return (((self.mwc_upper & 0xFFFF) << 16) + (self.mwc_upper >> 16) + self.mwc_lower) & 0xFFFFFFFF
mwc = property(current) # Note that this must be over-ridden again in MWC1
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x1
if selector == 0:
self.mwc_upper ^= value_int
self._sanitise_upper()
self._next_upper()
else:
self.mwc_lower ^= value_int
self._sanitise_lower()
self._next_lower()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.mwc_upper, self.mwc_lower)
def setstate(self, state):
(self.mwc_upper, self.mwc_lower) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
# See next() note on functional equivalence.
n_upper = int(n) % self._MWC_UPPER_CYCLE_LEN
self.mwc_upper = pow(self._MWC_UPPER_MULT, n_upper, self._MWC_UPPER_MODULO) * self.mwc_upper % self._MWC_UPPER_MODULO
n_lower = int(n) % self._MWC_LOWER_CYCLE_LEN
self.mwc_lower = pow(self._MWC_LOWER_MULT, n_lower, self._MWC_LOWER_MODULO) * self.mwc_lower % self._MWC_LOWER_MODULO
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) + "," + repr(int(self.mwc_lower)) + ")"
class MWC1(MWC2):
'''"Multiply-with-carry" random number generator
This is the MWC as defined in Marsaglia's 1999
newsgroup post.
This uses two MWC generators to generate high and
low 16-bit parts, which are then combined to make a
32-bit value.
The MWC generator concatenates two 16-bit multiply-
with-carry generators:
x[n]=36969x[n-1]+carry,
y[n]=18000y[n-1]+carry mod 2**16,
It has a period about 2**60.
This seems to pass all Marsaglia's Diehard tests.
However, it fails many of L'Ecuyer's TestU01
tests. The modified MWC2 generator passes many more
tests in TestU01, and should probably be preferred,
unless backwards compatibility is required.
'''
def current(self):
return (((self.mwc_upper & 0xFFFF) << 16) + self.mwc_lower) & 0xFFFFFFFF
# We have to over-ride this again, because of the way property() works.
mwc = property(current)
class MWC64(object):
'''"Multiply-with-carry" random number generator
This uses a single MWC generator with 64 bits to
generate a 32-bit value. The seeds should be 32-bit
values.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_MWC64_MULT = 698769069
_MWC64_MODULO = _MWC64_MULT * 2**32 - 1
_MWC64_CYCLE_LEN = _MWC64_MULT * 2**32 // 2 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return MWC64.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.mwc_upper = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.mwc_lower = _next_seed_int32_or_default(repeat_seed_iter, 0xFFFFFFFF)
self.sanitise()
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
state64 = (self.mwc_upper << 32) + self.mwc_lower
temp = state64
was_changed = False
# There are a few bad seeds--that is, seeds that are a multiple of
# 0x29A65EACFFFFFFFF (which is 698769069 * 2**32 - 1).
if state64 >= 0x29A65EACFFFFFFFF:
was_changed = True
temp = state64 % 0x29A65EACFFFFFFFF
if temp == 0:
# Invert to get a good seed.
temp = (state64 ^ 0xFFFFFFFFFFFFFFFF) % 0x29A65EACFFFFFFFF
was_changed = True
if was_changed:
self.mwc_upper = temp >> 32
self.mwc_lower = temp & 0xFFFFFFFF
def __next__(self):
# Note: this is apparently equivalent to:
# temp64 = (self.mwc_upper << 32) + self.mwc_lower
# temp64 = (698769069 * temp64) % 0x29A65EACFFFFFFFF
# See reference in MWC2.next().
temp64 = 698769069 * self.mwc_lower + self.mwc_upper
self.mwc_lower = temp64 & 0xFFFFFFFF
self.mwc_upper = (temp64 >> 32) & 0xFFFFFFFF
return self.mwc_lower
def current(self):
return self.mwc_lower
mwc = property(current)
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x1
if selector == 0:
self.mwc_upper ^= value_int
else:
self.mwc_lower ^= value_int
self.sanitise()
next(self)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.mwc_upper, self.mwc_lower)
def setstate(self, state):
(self.mwc_upper, self.mwc_lower) = (int(val) & 0xFFFFFFFF for val in state)
self.sanitise()
def jumpahead(self, n):
# See MWC2.next() note on functional equivalence.
n = int(n) % self._MWC64_CYCLE_LEN
temp64 = (self.mwc_upper << 32) + self.mwc_lower
temp64 = pow(self._MWC64_MULT, n, self._MWC64_MODULO) * temp64 % self._MWC64_MODULO
self.mwc_lower = temp64 & 0xFFFFFFFF
self.mwc_upper = (temp64 >> 32) & 0xFFFFFFFF
def __repr__(self):
return self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) + "," + repr(int(self.mwc_lower)) + ")"
class KISS(object):
'''"Keep It Simple Stupid" random number generator
It combines the MWC2, Cong, SHR3 generators. Period is
about 2**123.
This is based on, but not identical to, Marsaglia's
KISS generator as defined in his 1999 newsgroup post.
That generator most significantly has problems with its
SHR3 component (see notes on SHR3). Since we are not
keeping compatibility with the 1999 KISS generator for
that reason, we take the opportunity to slightly
update the MWC and Cong generators too.
'''
SIMPLERANDOM_MAX = 2**32 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return KISS.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.random_mwc = MWC2(repeat_seed_iter)
self.random_cong = Cong(repeat_seed_iter)
self.random_shr3 = SHR3(repeat_seed_iter)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def __next__(self):
mwc_val = next(self.random_mwc)
cong_val = next(self.random_cong)
shr3_val = next(self.random_shr3)
return ((mwc_val ^ cong_val) + shr3_val) & 0xFFFFFFFF
def current(self):
return ((self.random_mwc.current() ^ self.random_cong.cong) + self.random_shr3.shr3) & 0xFFFFFFFF
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x3
if selector == 0:
self.random_mwc.mwc_upper ^= value_int
self.random_mwc._sanitise_upper()
self.random_mwc._next_upper()
elif selector == 1:
self.random_mwc.mwc_lower ^= value_int
self.random_mwc._sanitise_lower()
self.random_mwc._next_lower()
elif selector == 2:
self.random_cong.cong ^= value_int
# Cong doesn't need any sanitising
next(self.random_cong)
else: # selector == 3
self.random_shr3.shr3 ^= value_int
self.random_shr3.sanitise()
next(self.random_shr3)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.random_mwc.getstate(), self.random_cong.getstate(), self.random_shr3.getstate())
def setstate(self, state):
(mwc_state, cong_state, shr3_state) = state
self.random_mwc.setstate(mwc_state)
self.random_cong.setstate(cong_state)
self.random_shr3.setstate(shr3_state)
def jumpahead(self, n):
self.random_mwc.jumpahead(n)
self.random_cong.jumpahead(n)
self.random_shr3.jumpahead(n)
def _get_mwc_upper(self):
return self.random_mwc.mwc_upper
def _set_mwc_upper(self, value):
self.random_mwc.mwc_upper = value
mwc_upper = property(_get_mwc_upper, _set_mwc_upper)
def _get_mwc_lower(self):
return self.random_mwc.mwc_lower
def _set_mwc_lower(self, value):
self.random_mwc.mwc_lower = value
mwc_lower = property(_get_mwc_lower, _set_mwc_lower)
def _get_mwc(self):
return self.random_mwc.current()
mwc = property(_get_mwc)
def _get_shr3(self):
return self.random_shr3.shr3
def _set_shr3(self, value):
self.random_shr3.shr3 = value
shr3 = property(_get_shr3, _set_shr3)
def _get_cong(self):
return self.random_cong.cong
def _set_cong(self, value):
self.random_cong.cong = value
cong = property(_get_cong, _set_cong)
def __repr__(self):
return (self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) +
"," + repr(int(self.mwc_lower)) +
"," + repr(int(self.cong)) +
"," + repr(int(self.shr3)) + ")")
class KISS2(object):
'''"Keep It Simple Stupid" random number generator
It combines the MWC64, Cong, SHR3 generators. Period
is about 2**123.
This is a slightly updated KISS generator design, from
a newsgroup post in 2003:
http://groups.google.com/group/sci.math/msg/9959175f66dd138f
The MWC component uses a single 64-bit calculation,
instead of two 32-bit calculations that are combined.
'''
SIMPLERANDOM_MAX = 2**32 - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return KISS2.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.random_mwc = MWC64(repeat_seed_iter)
self.random_cong = Cong(repeat_seed_iter)
self.random_shr3 = SHR3(repeat_seed_iter)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def __next__(self):
mwc_val = next(self.random_mwc)
cong_val = next(self.random_cong)
shr3_val = next(self.random_shr3)
return (mwc_val + cong_val + shr3_val) & 0xFFFFFFFF
def current(self):
return (self.random_mwc.current() + self.random_cong.cong + self.random_shr3.shr3) & 0xFFFFFFFF
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 24) & 0x3
if selector == 0:
self.random_mwc.mwc_upper ^= value_int
self.random_mwc.sanitise()
next(self.random_mwc)
elif selector == 1:
self.random_mwc.mwc_lower ^= value_int
self.random_mwc.sanitise()
next(self.random_mwc)
elif selector == 2:
self.random_cong.cong ^= value_int
# Cong doesn't need any sanitising
next(self.random_cong)
else: # selector == 3
self.random_shr3.shr3 ^= value_int
self.random_shr3.sanitise()
next(self.random_shr3)
return self.current()
def __iter__(self):
return self
def getstate(self):
return (self.random_mwc.getstate(), self.random_cong.getstate(), self.random_shr3.getstate())
def setstate(self, state):
(mwc_state, cong_state, shr3_state) = state
self.random_mwc.setstate(mwc_state)
self.random_cong.setstate(cong_state)
self.random_shr3.setstate(shr3_state)
def jumpahead(self, n):
self.random_mwc.jumpahead(n)
self.random_cong.jumpahead(n)
self.random_shr3.jumpahead(n)
def _get_mwc_upper(self):
return self.random_mwc.mwc_upper
def _set_mwc_upper(self, value):
self.random_mwc.mwc_upper = value
mwc_upper = property(_get_mwc_upper, _set_mwc_upper)
def _get_mwc_lower(self):
return self.random_mwc.mwc_lower
def _set_mwc_lower(self, value):
self.random_mwc.mwc_lower = value
mwc_lower = property(_get_mwc_lower, _set_mwc_lower)
def _get_mwc(self):
return self.random_mwc.mwc
mwc = property(_get_mwc)
def _get_shr3(self):
return self.random_shr3.shr3
def _set_shr3(self, value):
self.random_shr3.shr3 = value
shr3 = property(_get_shr3, _set_shr3)
def _get_cong(self):
return self.random_cong.cong
def _set_cong(self, value):
self.random_cong.cong = value
cong = property(_get_cong, _set_cong)
def __repr__(self):
return (self.__class__.__name__ + "(" + repr(int(self.mwc_upper)) +
"," + repr(int(self.mwc_lower)) +
"," + repr(int(self.cong)) +
"," + repr(int(self.shr3)) + ")")
def lfsr_next_one_seed(seed_iter, min_value_shift):
"""High-quality seeding for LFSR generators.
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
In case generators are seeded from an incrementing input (such as a system
timer), and between increments only the lower bits may change, we would
also like the lower bits of the input to change the initial state, and not
just be discarded. So we do basic manipulation of the seed input value to
ensure that all bits of the seed input affect the initial state.
"""
try:
seed = next(seed_iter)
except StopIteration:
return 0xFFFFFFFF
else:
if seed is None:
return 0xFFFFFFFF
else:
seed = int(seed) & 0xFFFFFFFF
working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF
min_value = 1 << min_value_shift
if working_seed < min_value:
working_seed = (seed << 24) & 0xFFFFFFFF
if working_seed < min_value:
working_seed ^= 0xFFFFFFFF
return working_seed
def lfsr_validate_one_seed(seed, min_value_shift):
'''Validate seeds for LFSR generators
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
This is a light-weight validation of state, used from setstate().
'''
min_value = 1 << min_value_shift
if seed < min_value:
seed ^= 0xFFFFFFFF
return seed
def lfsr_state_z(z):
return int(z ^ ((z << 16) & 0xFFFFFFFF))
def lfsr_repr_z(z):
return repr(int(z ^ ((z << 16) & 0xFFFFFFFF)))
class LFSR113(object):
'''Combined LFSR random number generator by L'Ecuyer
It combines 4 LFSR generators. The generators have been
chosen for maximal equidistribution.
The period is approximately 2**113.
"Tables of Maximally-Equidistributed Combined Lfsr Generators"
P. L'Ecuyer
Mathematics of Computation, 68, 225 (1999), 261-269.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_LFSR113_1_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,6)
_LFSR113_1_MATRIX_b = BitColumnMatrix.shift(32,-13)
_LFSR113_1_MATRIX_c = BitColumnMatrix.mask(32, 1, 32)
_LFSR113_1_MATRIX_d = BitColumnMatrix.shift(32,18)
_LFSR113_1_MATRIX = _LFSR113_1_MATRIX_d * _LFSR113_1_MATRIX_c + _LFSR113_1_MATRIX_b * _LFSR113_1_MATRIX_a
_LFSR113_1_CYCLE_LEN = 2**(32 - 1) - 1
_LFSR113_2_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,2)
_LFSR113_2_MATRIX_b = BitColumnMatrix.shift(32,-27)
_LFSR113_2_MATRIX_c = BitColumnMatrix.mask(32, 3, 32)
_LFSR113_2_MATRIX_d = BitColumnMatrix.shift(32,2)
_LFSR113_2_MATRIX = _LFSR113_2_MATRIX_d * _LFSR113_2_MATRIX_c + _LFSR113_2_MATRIX_b * _LFSR113_2_MATRIX_a
_LFSR113_2_CYCLE_LEN = 2**(32 - 3) - 1
_LFSR113_3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_LFSR113_3_MATRIX_b = BitColumnMatrix.shift(32,-21)
_LFSR113_3_MATRIX_c = BitColumnMatrix.mask(32, 4, 32)
_LFSR113_3_MATRIX_d = BitColumnMatrix.shift(32,7)
_LFSR113_3_MATRIX = _LFSR113_3_MATRIX_d * _LFSR113_3_MATRIX_c + _LFSR113_3_MATRIX_b * _LFSR113_3_MATRIX_a
_LFSR113_3_CYCLE_LEN = 2**(32 - 4) - 1
_LFSR113_4_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,3)
_LFSR113_4_MATRIX_b = BitColumnMatrix.shift(32,-12)
_LFSR113_4_MATRIX_c = BitColumnMatrix.mask(32, 7, 32)
_LFSR113_4_MATRIX_d = BitColumnMatrix.shift(32,13)
_LFSR113_4_MATRIX = _LFSR113_4_MATRIX_d * _LFSR113_4_MATRIX_c + _LFSR113_4_MATRIX_b * _LFSR113_4_MATRIX_a
_LFSR113_4_CYCLE_LEN = 2**(32 - 7) - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return LFSR113.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.z1 = lfsr_next_one_seed(repeat_seed_iter, 1)
self.z2 = lfsr_next_one_seed(repeat_seed_iter, 3)
self.z3 = lfsr_next_one_seed(repeat_seed_iter, 4)
self.z4 = lfsr_next_one_seed(repeat_seed_iter, 7)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self.z1 = lfsr_validate_one_seed(self.z1, 1)
self.z2 = lfsr_validate_one_seed(self.z2, 3)
self.z3 = lfsr_validate_one_seed(self.z3, 4)
self.z4 = lfsr_validate_one_seed(self.z4, 7)
def _next_z1(self):
b = (((self.z1 & 0x03FFFFFF) << 6) ^ self.z1) >> 13
self.z1 = ((self.z1 & 0x00003FFE) << 18) ^ b
def _next_z2(self):
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 27
self.z2 = ((self.z2 & 0x3FFFFFF8) << 2) ^ b
def _next_z3(self):
b = (((self.z3 & 0x0007FFFF) << 13) ^ self.z3) >> 21
self.z3 = ((self.z3 & 0x01FFFFF0) << 7) ^ b
def _next_z4(self):
b = (((self.z4 & 0x1FFFFFFF) << 3) ^ self.z4) >> 12
self.z4 = ((self.z4 & 0x0007FF80) << 13) ^ b
def __next__(self):
b = (((self.z1 & 0x03FFFFFF) << 6) ^ self.z1) >> 13
self.z1 = ((self.z1 & 0x00003FFE) << 18) ^ b
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 27
self.z2 = ((self.z2 & 0x3FFFFFF8) << 2) ^ b
b = (((self.z3 & 0x0007FFFF) << 13) ^ self.z3) >> 21
self.z3 = ((self.z3 & 0x01FFFFF0) << 7) ^ b
b = (((self.z4 & 0x1FFFFFFF) << 3) ^ self.z4) >> 12
self.z4 = ((self.z4 & 0x0007FF80) << 13) ^ b
return self.z1 ^ self.z2 ^ self.z3 ^ self.z4
def current(self):
return self.z1 ^ self.z2 ^ self.z3 ^ self.z4
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
selector = (current >> 30) & 0x3
if selector == 0:
self.z1 = lfsr_validate_one_seed(self.z1 ^ value_int, 1)
self._next_z1()
elif selector == 1:
self.z2 = lfsr_validate_one_seed(self.z2 ^ value_int, 3)
self._next_z2()
elif selector == 2:
self.z3 = lfsr_validate_one_seed(self.z3 ^ value_int, 4)
self._next_z3()
else: # selector == 3
self.z4 = lfsr_validate_one_seed(self.z4 ^ value_int, 7)
self._next_z4()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (lfsr_state_z(self.z1), lfsr_state_z(self.z2), lfsr_state_z(self.z3), lfsr_state_z(self.z4))
def setstate(self, state):
self.seed(state)
def jumpahead(self, n):
n_1 = int(n) % self._LFSR113_1_CYCLE_LEN
n_2 = int(n) % self._LFSR113_2_CYCLE_LEN
n_3 = int(n) % self._LFSR113_3_CYCLE_LEN
n_4 = int(n) % self._LFSR113_4_CYCLE_LEN
z1 = pow(self._LFSR113_1_MATRIX, n_1) * self.z1
self.z1 = z1
z2 = pow(self._LFSR113_2_MATRIX, n_2) * self.z2
self.z2 = z2
z3 = pow(self._LFSR113_3_MATRIX, n_3) * self.z3
self.z3 = z3
z4 = pow(self._LFSR113_4_MATRIX, n_4) * self.z4
self.z4 = z4
def __repr__(self):
return (self.__class__.__name__ + "(" + lfsr_repr_z(self.z1) +
"," + lfsr_repr_z(self.z2) +
"," + lfsr_repr_z(self.z3) +
"," + lfsr_repr_z(self.z4) + ")")
class LFSR88(object):
'''Combined LFSR random number generator by L'Ecuyer
It combines 3 LFSR generators. The generators have been
chosen for maximal equidistribution.
The period is approximately 2**88.
"Maximally Equidistributed Combined Tausworthe Generators"
P. L'Ecuyer
Mathematics of Computation, 65, 213 (1996), 203-213.
'''
SIMPLERANDOM_MAX = 2**32 - 1
_LFSR88_1_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,13)
_LFSR88_1_MATRIX_b = BitColumnMatrix.shift(32,-19)
_LFSR88_1_MATRIX_c = BitColumnMatrix.mask(32, 1, 32)
_LFSR88_1_MATRIX_d = BitColumnMatrix.shift(32,12)
_LFSR88_1_MATRIX = _LFSR88_1_MATRIX_d * _LFSR88_1_MATRIX_c + _LFSR88_1_MATRIX_b * _LFSR88_1_MATRIX_a
_LFSR88_1_CYCLE_LEN = 2**(32 - 1) - 1
_LFSR88_2_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,2)
_LFSR88_2_MATRIX_b = BitColumnMatrix.shift(32,-25)
_LFSR88_2_MATRIX_c = BitColumnMatrix.mask(32, 3, 32)
_LFSR88_2_MATRIX_d = BitColumnMatrix.shift(32,4)
_LFSR88_2_MATRIX = _LFSR88_2_MATRIX_d * _LFSR88_2_MATRIX_c + _LFSR88_2_MATRIX_b * _LFSR88_2_MATRIX_a
_LFSR88_2_CYCLE_LEN = 2**(32 - 3) - 1
_LFSR88_3_MATRIX_a = BitColumnMatrix.unity(32) + BitColumnMatrix.shift(32,3)
_LFSR88_3_MATRIX_b = BitColumnMatrix.shift(32,-11)
_LFSR88_3_MATRIX_c = BitColumnMatrix.mask(32, 4, 32)
_LFSR88_3_MATRIX_d = BitColumnMatrix.shift(32,17)
_LFSR88_3_MATRIX = _LFSR88_3_MATRIX_d * _LFSR88_3_MATRIX_c + _LFSR88_3_MATRIX_b * _LFSR88_3_MATRIX_a
_LFSR88_3_CYCLE_LEN = 2**(32 - 4) - 1
@staticmethod
def min():
return 0
@staticmethod
def max():
return LFSR88.SIMPLERANDOM_MAX
def __init__(self, *args, **kwargs):
'''Positional arguments are seed values
Keyword-only arguments:
mix_extras=False -- If True, then call mix() to 'mix' extra seed
values into the state.
'''
seed_iter = _traverse_iter(args)
repeat_seed_iter = _repeat_iter(seed_iter)
self.z1 = lfsr_next_one_seed(repeat_seed_iter, 1)
self.z2 = lfsr_next_one_seed(repeat_seed_iter, 3)
self.z3 = lfsr_next_one_seed(repeat_seed_iter, 4)
if kwargs.pop('mix_extras', False):
self.mix(seed_iter)
for key in kwargs:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % key)
def seed(self, *args, **kwargs):
self.__init__(*args, **kwargs)
def sanitise(self):
self.z1 = lfsr_validate_one_seed(self.z1, 1)
self.z2 = lfsr_validate_one_seed(self.z2, 3)
self.z3 = lfsr_validate_one_seed(self.z3, 4)
def _next_z1(self):
b = (((self.z1 & 0x0007FFFF) << 13) ^ self.z1) >> 19
self.z1 = ((self.z1 & 0x000FFFFE) << 12) ^ b
def _next_z2(self):
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 25
self.z2 = ((self.z2 & 0x0FFFFFF8) << 4) ^ b
def _next_z3(self):
b = (((self.z3 & 0x1FFFFFFF) << 3) ^ self.z3) >> 11
self.z3 = ((self.z3 & 0x00007FF0) << 17) ^ b
def __next__(self):
b = (((self.z1 & 0x0007FFFF) << 13) ^ self.z1) >> 19
self.z1 = ((self.z1 & 0x000FFFFE) << 12) ^ b
b = (((self.z2 & 0x3FFFFFFF) << 2) ^ self.z2) >> 25
self.z2 = ((self.z2 & 0x0FFFFFF8) << 4) ^ b
b = (((self.z3 & 0x1FFFFFFF) << 3) ^ self.z3) >> 11
self.z3 = ((self.z3 & 0x00007FF0) << 17) ^ b
return self.z1 ^ self.z2 ^ self.z3
def current(self):
return self.z1 ^ self.z2 ^ self.z3
def mix(self, *args):
for value in _traverse_iter(args):
value_int = int(value) & 0xFFFFFFFF
current = self.current()
if current < 1431655765: # constant is 2^32 / 3
self.z1 = lfsr_validate_one_seed(self.z1 ^ value_int, 1)
self._next_z1()
elif current < 2863311531: # constant is 2^32 * 2 / 3
self.z2 = lfsr_validate_one_seed(self.z2 ^ value_int, 3)
self._next_z2()
else:
self.z3 = lfsr_validate_one_seed(self.z3 ^ value_int, 4)
self._next_z3()
return self.current()
def __iter__(self):
return self
def getstate(self):
return (lfsr_state_z(self.z1), lfsr_state_z(self.z2), lfsr_state_z(self.z3))
def setstate(self, state):
self.seed(state)
def jumpahead(self, n):
n_1 = int(n) % self._LFSR88_1_CYCLE_LEN
n_2 = int(n) % self._LFSR88_2_CYCLE_LEN
n_3 = int(n) % self._LFSR88_3_CYCLE_LEN
z1 = pow(self._LFSR88_1_MATRIX, n_1) * self.z1
self.z1 = z1
z2 = pow(self._LFSR88_2_MATRIX, n_2) * self.z2
self.z2 = z2
z3 = pow(self._LFSR88_3_MATRIX, n_3) * self.z3
self.z3 = z3
def __repr__(self):
return (self.__class__.__name__ + "(" + lfsr_repr_z(self.z1) +
"," + lfsr_repr_z(self.z2) +
"," + lfsr_repr_z(self.z3) + ")")
|
mit
| -5,898,886,228,180,015,000
| 35.298759
| 125
| 0.576945
| false
| 3.21465
| false
| false
| false
|
GNOME/gedit-plugins
|
plugins/commander/modules/align.py
|
1
|
8900
|
# -*- coding: utf-8 -*-
#
# align.py - align commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import commander.commands as commands
import commander.commands.completion
import commander.commands.result
import commander.commands.exceptions
from functools import reduce
import re
__commander_module__ = True
def _get_groups(m, group, add_ws_group):
if len(m.groups()) <= group - 1:
gidx = 0
else:
gidx = group
if len(m.groups()) <= add_ws_group - 1:
wsidx = 0
else:
wsidx = add_ws_group
# Whitespace group must be contained in align group
if m.start(wsidx) < m.start(gidx) or m.end(wsidx) > m.end(gidx):
wsidx = gidx
return (gidx, wsidx)
class Line:
def __init__(self, line, reg, tabwidth):
self.tabwidth = tabwidth
self.line = line
# All the separators
self.matches = list(reg.finditer(line))
# @newline initially contains the first column
if not self.matches:
# No separator found
self.newline = str(line)
else:
# Up to first separator
self.newline = line[0:self.matches[0].start(0)]
def matches_len(self):
return len(self.matches)
def new_len(self, extra=''):
return len((self.newline + extra).expandtabs(self.tabwidth))
def match(self, idx):
if idx >= self.matches_len():
return None
return self.matches[idx]
def append(self, idx, num, group, add_ws_group):
m = self.match(idx)
if m == None:
return
gidx, wsidx = _get_groups(m, group, add_ws_group)
# Append leading match
self.newline += self.line[m.start(0):m.start(gidx)]
# Now align by replacing wsidx with spaces
prefix = self.line[m.start(gidx):m.start(wsidx)]
suffix = self.line[m.end(wsidx):m.end(gidx)]
sp = ''
while True:
bridge = prefix + sp + suffix
if self.new_len(bridge) < num:
sp += ' '
else:
break
self.newline += bridge
# Then append the rest of the match
mnext = self.match(idx + 1)
if mnext == None:
endidx = None
else:
endidx = mnext.start(0)
self.newline += self.line[m.end(gidx):endidx]
def __str__(self):
return self.newline
def _find_max_align(lines, idx, group, add_ws_group):
num = 0
# We will align on 'group', by adding spaces to 'add_ws_group'
for line in lines:
m = line.match(idx)
if m != None:
gidx, wsidx = _get_groups(m, group, add_ws_group)
# until the start
extra = line.line[m.start(0):m.start(wsidx)] + line.line[m.end(wsidx):m.end(gidx)]
# Measure where to align it
l = line.new_len(extra)
else:
l = line.new_len()
if l > num:
num = l
return num
def _regex(view, reg, group, additional_ws, add_ws_group, flags=0):
buf = view.get_buffer()
# Get the selection of lines to align columns on
bounds = buf.get_selection_bounds()
if not bounds:
start = buf.get_iter_at_mark(buf.get_insert())
start.set_line_offset(0)
end = start.copy()
if not end.ends_line():
end.forward_to_line_end()
bounds = (start, end)
if not bounds[0].equal(bounds[1]) and bounds[1].starts_line():
bounds[1].backward_line()
if not bounds[1].ends_line():
bounds[1].forward_to_line_end()
# Get the regular expression from the user
if reg == None:
reg, words, modifier = (yield commander.commands.result.Prompt('Regex:'))
# Compile the regular expression
try:
reg = re.compile(reg, flags)
except Exception as e:
raise commander.commands.exceptions.Execute('Failed to compile regular expression: %s' % (e,))
# Query the user to provide a regex group number to align on
if group == None:
group, words, modifier = (yield commander.commands.result.Prompt('Group (1):'))
try:
group = int(group)
except:
group = 1
# Query the user for additional whitespace to insert for separating items
if additional_ws == None:
additional_ws, words, modifier = (yield commander.commands.result.Prompt('Additional whitespace (0):'))
try:
additional_ws = int(additional_ws)
except:
additional_ws = 0
# Query the user for the regex group number on which to add the
# whitespace
if add_ws_group == None:
add_ws_group, words, modifier = (yield commander.commands.result.Prompt('Whitespace group (1):'))
try:
add_ws_group = int(add_ws_group)
except:
add_ws_group = -1
# By default, add the whitespace on the group on which the columns are
# aligned
if add_ws_group < 0:
add_ws_group = group
start, end = bounds
if not start.starts_line():
start.set_line_offset(0)
if not end.ends_line():
end.forward_to_line_end()
lines = start.get_text(end).splitlines()
newlines = []
num = 0
tabwidth = view.get_tab_width()
# Construct Line objects for all the lines
newlines = [Line(line, reg, tabwidth) for line in lines]
# Calculate maximum number of matches (i.e. columns)
num = reduce(lambda x, y: max(x, y.matches_len()), newlines, 0)
for i in range(num):
al = _find_max_align(newlines, i, group, add_ws_group)
for line in newlines:
line.append(i, al + additional_ws, group, add_ws_group)
# Replace lines
aligned = str.join('\n', [x.newline for x in newlines])
buf.begin_user_action()
buf.delete(bounds[0], bounds[1])
m = buf.create_mark(None, bounds[0], True)
buf.insert(bounds[1], aligned)
buf.select_range(buf.get_iter_at_mark(m), bounds[1])
buf.delete_mark(m)
buf.end_user_action()
yield commander.commands.result.DONE
def __default__(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-sensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group)
def i(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-insensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group, re.IGNORECASE)
# ex:ts=4:et
|
gpl-2.0
| 2,460,157,650,009,915,400
| 30.448763
| 190
| 0.637978
| false
| 3.574297
| false
| false
| false
|
Teagan42/home-assistant
|
homeassistant/components/google_assistant/const.py
|
1
|
4660
|
"""Constants for Google Assistant."""
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
climate,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
DOMAIN = "google_assistant"
GOOGLE_ASSISTANT_API_ENDPOINT = "/api/google_assistant"
CONF_EXPOSE = "expose"
CONF_ENTITY_CONFIG = "entity_config"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_PROJECT_ID = "project_id"
CONF_ALIASES = "aliases"
CONF_API_KEY = "api_key"
CONF_ROOM_HINT = "room"
CONF_ALLOW_UNLOCK = "allow_unlock"
CONF_SECURE_DEVICES_PIN = "secure_devices_pin"
CONF_REPORT_STATE = "report_state"
CONF_SERVICE_ACCOUNT = "service_account"
CONF_CLIENT_EMAIL = "client_email"
CONF_PRIVATE_KEY = "private_key"
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"climate",
"cover",
"fan",
"group",
"input_boolean",
"light",
"media_player",
"scene",
"script",
"switch",
"vacuum",
"lock",
"binary_sensor",
"sensor",
"alarm_control_panel",
]
PREFIX_TYPES = "action.devices.types."
TYPE_CAMERA = PREFIX_TYPES + "CAMERA"
TYPE_LIGHT = PREFIX_TYPES + "LIGHT"
TYPE_SWITCH = PREFIX_TYPES + "SWITCH"
TYPE_VACUUM = PREFIX_TYPES + "VACUUM"
TYPE_SCENE = PREFIX_TYPES + "SCENE"
TYPE_FAN = PREFIX_TYPES + "FAN"
TYPE_THERMOSTAT = PREFIX_TYPES + "THERMOSTAT"
TYPE_LOCK = PREFIX_TYPES + "LOCK"
TYPE_BLINDS = PREFIX_TYPES + "BLINDS"
TYPE_GARAGE = PREFIX_TYPES + "GARAGE"
TYPE_OUTLET = PREFIX_TYPES + "OUTLET"
TYPE_SENSOR = PREFIX_TYPES + "SENSOR"
TYPE_DOOR = PREFIX_TYPES + "DOOR"
TYPE_TV = PREFIX_TYPES + "TV"
TYPE_SPEAKER = PREFIX_TYPES + "SPEAKER"
TYPE_ALARM = PREFIX_TYPES + "SECURITYSYSTEM"
SERVICE_REQUEST_SYNC = "request_sync"
HOMEGRAPH_URL = "https://homegraph.googleapis.com/"
HOMEGRAPH_SCOPE = "https://www.googleapis.com/auth/homegraph"
HOMEGRAPH_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
REQUEST_SYNC_BASE_URL = HOMEGRAPH_URL + "v1/devices:requestSync"
REPORT_STATE_BASE_URL = HOMEGRAPH_URL + "v1/devices:reportStateAndNotification"
# Error codes used for SmartHomeError class
# https://developers.google.com/actions/reference/smarthome/errors-exceptions
ERR_DEVICE_OFFLINE = "deviceOffline"
ERR_DEVICE_NOT_FOUND = "deviceNotFound"
ERR_VALUE_OUT_OF_RANGE = "valueOutOfRange"
ERR_NOT_SUPPORTED = "notSupported"
ERR_PROTOCOL_ERROR = "protocolError"
ERR_UNKNOWN_ERROR = "unknownError"
ERR_FUNCTION_NOT_SUPPORTED = "functionNotSupported"
ERR_ALREADY_DISARMED = "alreadyDisarmed"
ERR_ALREADY_ARMED = "alreadyArmed"
ERR_CHALLENGE_NEEDED = "challengeNeeded"
ERR_CHALLENGE_NOT_SETUP = "challengeFailedNotSetup"
ERR_TOO_MANY_FAILED_ATTEMPTS = "tooManyFailedAttempts"
ERR_PIN_INCORRECT = "pinIncorrect"
ERR_USER_CANCELLED = "userCancelled"
# Event types
EVENT_COMMAND_RECEIVED = "google_assistant_command"
EVENT_QUERY_RECEIVED = "google_assistant_query"
EVENT_SYNC_RECEIVED = "google_assistant_sync"
DOMAIN_TO_GOOGLE_TYPES = {
camera.DOMAIN: TYPE_CAMERA,
climate.DOMAIN: TYPE_THERMOSTAT,
cover.DOMAIN: TYPE_BLINDS,
fan.DOMAIN: TYPE_FAN,
group.DOMAIN: TYPE_SWITCH,
input_boolean.DOMAIN: TYPE_SWITCH,
light.DOMAIN: TYPE_LIGHT,
lock.DOMAIN: TYPE_LOCK,
media_player.DOMAIN: TYPE_SWITCH,
scene.DOMAIN: TYPE_SCENE,
script.DOMAIN: TYPE_SCENE,
switch.DOMAIN: TYPE_SWITCH,
vacuum.DOMAIN: TYPE_VACUUM,
alarm_control_panel.DOMAIN: TYPE_ALARM,
}
DEVICE_CLASS_TO_GOOGLE_TYPES = {
(cover.DOMAIN, cover.DEVICE_CLASS_GARAGE): TYPE_GARAGE,
(cover.DOMAIN, cover.DEVICE_CLASS_DOOR): TYPE_DOOR,
(switch.DOMAIN, switch.DEVICE_CLASS_SWITCH): TYPE_SWITCH,
(switch.DOMAIN, switch.DEVICE_CLASS_OUTLET): TYPE_OUTLET,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_DOOR): TYPE_DOOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_GARAGE_DOOR): TYPE_GARAGE,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_LOCK): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_OPENING): TYPE_SENSOR,
(binary_sensor.DOMAIN, binary_sensor.DEVICE_CLASS_WINDOW): TYPE_SENSOR,
(media_player.DOMAIN, media_player.DEVICE_CLASS_TV): TYPE_TV,
(media_player.DOMAIN, media_player.DEVICE_CLASS_SPEAKER): TYPE_SPEAKER,
(sensor.DOMAIN, sensor.DEVICE_CLASS_TEMPERATURE): TYPE_SENSOR,
(sensor.DOMAIN, sensor.DEVICE_CLASS_HUMIDITY): TYPE_SENSOR,
}
CHALLENGE_ACK_NEEDED = "ackNeeded"
CHALLENGE_PIN_NEEDED = "pinNeeded"
CHALLENGE_FAILED_PIN_NEEDED = "challengeFailedPinNeeded"
STORE_AGENT_USER_IDS = "agent_user_ids"
SOURCE_CLOUD = "cloud"
SOURCE_LOCAL = "local"
|
apache-2.0
| 8,215,510,004,811,099,000
| 30.486486
| 80
| 0.71824
| false
| 2.971939
| false
| false
| false
|
LoyolaCSDepartment/LDA-ICPC-2014
|
topic-models/topic-count/xmlsplit.py
|
1
|
1409
|
#! /usr/bin/env /usr/bin/python3
import os
import sys
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def xmlSplit(infile_name, dest_dir):
try:
# in_file = open('{0}{1}'.format(folder, filename), 'r', encoding='latin_1')
in_file = open(infile_name, 'r', encoding='latin_1')
except IOError:
print("File not found.")
return
dest_dir += '/' # redundant ok; without sucks!
# dest_dir = '{0}input/'.format(folder)
ensure_dir(dest_dir)
file_num = 1
out_file = open('%s%d.txt' % (dest_dir, file_num), 'w')
file_open = True
for x in in_file:
if x[-1] != '\n':
x = '%s\n' % (x)
if not file_open:
file_open = True
out_file = open(next_file, 'w')
# hack to remove non-ascii characters
x = ''.join([c for c in x if ord(c) < 128])
out_file.write('%s' % (x))
if x.startswith('</source>'):
out_file.close()
file_num += 1
next_file = '%s%d.txt' % (dest_dir, file_num)
file_open = False
print('{0} files'.format(file_num - 1) + " left in " + dest_dir)
out_file.close()
in_file.close()
if len(sys.argv) != 3:
print("usage: " + sys.argv[0] + " <input xml file> <output directory>")
sys.exit (-1)
xmlSplit(sys.argv[1], sys.argv[2])
# example call: xmlsplit.py cook.xml /scratch/topics/out')
# xmlSplit('<FIX ME>/topic-models/topic-count/sources/', 'cook.xml')
|
mit
| 8,342,931,913,768,487,000
| 25.092593
| 80
| 0.582683
| false
| 2.773622
| false
| false
| false
|
AMOboxTV/AMOBox.LegoBuild
|
plugin.video.salts/scrapers/izlemeyedeger_scraper.py
|
1
|
3982
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://www.izlemeyedeger.com'
class IzlemeyeDeger_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'IzlemeyeDeger'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'views' in item and item['views']:
label += ' (%s views)' % item['views']
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
embed_url = dom_parser.parse_dom(html, 'meta', {'itemprop': 'embedURL'}, ret='content')
if embed_url:
html = self._http_get(embed_url[0], cache_limit=.5)
for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
stream_url, height = match.groups()
stream_url = stream_url.replace('\\&', '&')
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(embed_url[0]))
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year, season=''):
results = []
search_url = urlparse.urljoin(self.base_url, '/arama?q=%s')
search_url = search_url % (urllib.quote_plus(title))
html = self._http_get(search_url, cache_limit=1)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'section'})
if fragment:
for match in re.finditer('href="([^"]+).*?class="year">\s*(\d+).*?class="video-title">\s*([^<]+)', fragment[0], re.DOTALL):
url, match_year, match_title = match.groups('')
match_title = match_title.strip()
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
|
gpl-2.0
| -3,967,336,712,143,353,000
| 40.479167
| 165
| 0.592416
| false
| 3.742481
| false
| false
| false
|
nhazekam/cctools
|
chirp/src/bindings/python/chirp.binding.py
|
1
|
22645
|
## @package ChirpPython
#
# Python Chirp bindings.
#
# The objects and methods provided by this package correspond to the native
# C API in @ref chirp_reli.h and chirp_swig_wrap.h
#
# The SWIG-based Python bindings provide a higher-level interface that
# revolves around:
#
# - @ref Chirp.Client
# - @ref Chirp.Stat
import os
import time
import json
import binascii
##
# Python Client object
#
# This class is used to create a chirp client
class Client(object):
##
# Create a new chirp client
#
# @param self Reference to the current task object.
# @param hostport The host:port of the server.
# @param timeout The time to wait for a server response on every request.
# @param authentication A list of prefered authentications. E.g., ['tickets', 'unix']
# @param debug Generate client debug output.
def __init__(self, hostport, timeout=60, authentication=None, tickets=None, debug=False):
self.hostport = hostport
self.timeout = timeout
if debug:
cctools_debug_config('chirp_python_client')
cctools_debug_flags_set('chirp')
if tickets and (authentication is None):
authentication = ['ticket']
self.__set_tickets(tickets)
if authentication is None:
auth_register_all()
else:
for auth in authentication:
auth_register_byname(auth)
self.identity = self.whoami()
if self.identity is '':
raise AuthenticationFailure(authentication)
def __exit__(self):
chirp_reli_disconnect(self.hostport)
def __del__(self):
chirp_reli_disconnect(self.hostport)
def __stoptime(self, absolute_stop_time=None, timeout=None):
if timeout is None:
timeout = self.timeout
if absolute_stop_time is None:
absolute_stop_time = time.time() + timeout
return absolute_stop_time
def __set_tickets(self, tickets):
tickets_str = None
if tickets is None:
try:
tickets_str = os.environ['CHIRP_CLIENT_TICKETS']
except KeyError:
tickets_str = None
else:
tickets_str = ','.join(tickets)
if tickets_str is not None:
auth_ticket_load(tickets_str)
##
# Returns a string with identity of the client according to the server.
#
# @param self Reference to the current task object.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def whoami(self, absolute_stop_time=None, timeout=None):
return chirp_wrap_whoami(self.hostport, self.__stoptime(absolute_stop_time, timeout))
##
# Returns a string with the ACL of the given directory.
# Throws an IOError on error (no such directory).
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def listacl(self, path='/', absolute_stop_time=None, timeout=None):
acls = chirp_wrap_listacl(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if acls is None:
raise IOError(path)
return acls.split('\n')
##
# Returns a string with the ACL of the given directory.
# Throws a GeneralError on error.
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param subject Target subject.
# @param rights Permissions to be granted.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def setacl(self, path, subject, rights, absolute_stop_time=None, timeout=None):
result = chirp_reli_setacl(self.hostport, path, subject, rights, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('setacl', result, [path, subject, rights])
return result
##
# Set the ACL for the given directory to be only for the rights to the calling user.
# Throws a GeneralError on error.
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param rights Permissions to be granted.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def resetacl(self, path, rights, absolute_stop_time=None, timeout=None):
result = chirp_wrap_resetacl(self.hostport, path, rights, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('resetacl', result, [path, subject, rights])
return result
##
# Returns a list with the names of the files in the path.
# Throws an IOError on error (no such directory).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def ls(self, path, absolute_stop_time=None, timeout=None):
dr = chirp_reli_opendir(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
files = []
if dir is None:
raise IOError(path)
while True:
d = chirp_reli_readdir(dr)
if d is None: break
files.append(Stat(d.name, d.info))
return files
##
# Returns a Chirp.Stat object with information on path.
# Throws an IOError on error (e.g., no such path or insufficient permissions).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def stat(self, path, absolute_stop_time=None, timeout=None):
info = chirp_wrap_stat(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if info is None:
raise IOError(path)
return Stat(path, info)
##
# Changes permissions on path.
# Throws a GeneralFailure on error (e.g., no such path or insufficient permissions).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param mode Desired permissions (e.g., 0755)
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def chmod(self, path, mode, absolute_stop_time=None, timeout=None):
result = chirp_reli_chmod(self.hostport, path, mode, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('chmod', result)
return result
##
# Copies local file/directory source to the chirp server as file/directory destination.
# If destination is not given, source name is used.
# Raises Chirp.TransferFailure on error.
#
# @param self Reference to the current task object.
# @param source A local file or directory.
# @param destination File or directory name to use in the server (defaults to source).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def put(self, source, destination=None, absolute_stop_time=None, timeout=None):
if destination is None:
destination = source
result = chirp_recursive_put(self.hostport,
source, destination,
self.__stoptime(absolute_stop_time, timeout))
if(result > -1):
return result
raise TransferFailure('put', result, source, destination)
##
# Copies server file/directory source to the local file/directory destination.
# If destination is not given, source name is used.
# Raises Chirp.TransferFailure on error.
#
# @param self Reference to the current task object.
# @param source A server file or directory.
# @param destination File or directory name to be used locally (defaults to source).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def get(self, source, destination=None, absolute_stop_time=None, timeout=None):
if destination is None:
destination = source
result = chirp_recursive_get(self.hostport,
source, destination,
self.__stoptime(absolute_stop_time, timeout))
if(result > -1):
return result
raise TransferFailure('get', result, source, destination)
##
# Removes the given file or directory from the server.
# Raises OSError on error.
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def rm(self, path, absolute_stop_time=None, timeout=None):
status = chirp_reli_rmall(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if status < 0:
raise OSError
##
# Recursively create the directories in path.
# Raises OSError on error.
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param mode Unix permissions for the created directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def mkdir(self, path, mode=493, absolute_stop_time=None, timeout=None):
result = chirp_reli_mkdir_recursive(self.hostport, path, mode, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise OSError
return result
##
# Computes the checksum of path.
# Raises IOError on error.
#
# @param self Reference to the current task object.
# @param path Target file.
# @param algorithm One of 'md5' or 'sha1' (default).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def hash(self, path, algorithm='sha1', absolute_stop_time=None, timeout=None):
hash_hex = chirp_wrap_hash(self.hostport, path, algorithm, self.__stoptime(absolute_stop_time, timeout))
if hash_hex is None:
raise IOError
return hash_hex
##
# Creates a chirp job. See http://ccl.cse.nd.edu/software/manuals/chirp.html for details.
#
# @param job_description A dictionary with a job chirp description.
#
# @code
# job_description = {
# 'executable': "/bin/tar",
# 'arguments': [ 'tar', '-cf', 'archive.tar', 'a', 'b' ],
# 'files': { 'task_path': 'a',
# 'serv_path': '/users/magrat/a.txt'
# 'type': 'INPUT' },
# { 'task_path': 'b',
# 'serv_path': '/users/magrat/b.txt'
# 'type': 'INPUT' },
# { 'task_path': 'archive.tar',
# 'serv_path': '/users/magrat/archive.tar'
# 'type': 'OUTPUT' }
# }
# job_id = client.job_create(job_description);
# @endcode
def job_create(self, job_description):
job_json = json.dumps(job_description)
job_id = chirp_wrap_job_create(self.hostport, job_json, self.__stoptime())
if job_id < 0:
raise ChirpJobError('create', job_id, job_json)
return job_id;
##
# Kills the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be killed.
#
def job_kill(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_kill(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('kill', result, ids_str)
return result;
##
# Commits (starts running) the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be committed.
#
def job_commit(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_commit(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('commit', result, ids_str)
return result;
##
# Reaps the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be reaped.
#
def job_reap(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_reap(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('reap', result, ids_str)
return result;
##
# Obtains the current status for each job id. The value returned is a
# list which contains a dictionary reference per job id.
#
# @param job_ids Job ids of the chirp jobs to be reaped.
#
def job_status(self, *job_ids):
ids_str = json.dumps(job_ids)
status = chirp_wrap_job_status(self.hostport, ids_str, self.__stoptime())
if status is None:
raise ChirpJobError('status', None, ids_str)
return json.loads(status);
##
# Waits waiting_time seconds for the job_id to terminate. Return value is
# the same as job_status. If the call timesout, an empty string is
# returned. If job_id is missing, C<<job_wait>> waits for any of the user's job.
#
# @param waiting_time maximum number of seconds to wait for a job to finish.
# @param job_id id of the job to wait.
def job_wait(self, waiting_time, job_id = 0):
status = chirp_wrap_job_wait(self.hostport, job_id, waiting_time, self.__stoptime())
if status is None:
raise ChirpJobError('status', None, job_id)
return json.loads(status);
##
# Python Stat object
#
# This class is used to record stat information for files/directories of a chirp server.
class Stat(object):
def __init__(self, path, cstat):
self._path = path
self._info = cstat
##
# Target path.
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.path
# @endcode
@property
def path(self):
return self._path
##
# ID of device containing file.
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.device
# @endcode
@property
def device(self):
return self._info.cst_dev
##
# inode number
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.inode
# @endcode
@property
def inode(self):
return self._info.cst_ino
##
# file mode permissions
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.mode
# @endcode
@property
def mode(self):
return self._info.cst_mode
##
# number of hard links
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.nlink
# @endcode
@property
def nlink(self):
return self._info.cst_nlink
##
# user ID of owner
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.uid
# @endcode
@property
def uid(self):
return self._info.cst_uid
##
# group ID of owner
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.gid
# @endcode
@property
def gid(self):
return self._info.cst_gid
##
# device ID if special file
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.rdev
# @endcode
@property
def rdev(self):
return self._info.cst_rdev
##
# total size, in bytes
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.size
# @endcode
@property
def size(self):
return self._info.cst_size
##
# block size for file system I/O
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.block_size
# @endcode
@property
def block_size(self):
return self._info.cst_blksize
##
# number of 512B blocks allocated
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.blocks
# @endcode
@property
def blocks(self):
return self._info.cst_blocks
##
# number of seconds since epoch since last access
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.atime
# @endcode
@property
def atime(self):
return self._info.cst_atime
##
# number of seconds since epoch since last modification
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.mtime
# @endcode
@property
def mtime(self):
return self._info.cst_mtime
##
# number of seconds since epoch since last status change
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.ctime
# @endcode
@property
def ctime(self):
return self._info.cst_ctime
def __repr__(self):
return "%s uid:%d gid:%d size:%d" % (self.path, self.uid, self.gid, self.size)
class AuthenticationFailure(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralFailure(Exception):
def __init__(self, action, status, value):
self.action = action
self.status = status
self.value = value
def __str__(self):
return "%s(%s) %s" % (self.action, self.status, self.value)
class TransferFailure(Exception):
def __init__(self, action, status, source, dest):
self.action = action
self.status = status
self.source = source
self.dest = dest
def __str__(self):
return "Error with %s(%s) %s %s" % (self.action, self.status, self.source, self.dest)
class ChirpJobError(Exception):
def __init__(self, action, status, value):
self.action = action
self.status = status
self.value = value
def __str__(self):
return "%s(%s) %s" % (self.action, self.status, self.value)
# @endcode
|
gpl-2.0
| -4,008,700,873,344,837,600
| 33.678407
| 118
| 0.573151
| false
| 4.129285
| false
| false
| false
|
kralf/morsel
|
python/lib/morsel/nodes/ode/solids/mesh.py
|
1
|
1092
|
from morsel.panda import *
from morsel.nodes.node import Node
from morsel.nodes.ode.object import Object
from morsel.nodes.facade import Mesh as _Mesh
from morsel.nodes.ode.solid import Solid
#-------------------------------------------------------------------------------
class Mesh(Solid):
def __init__(self, **kargs):
super(Mesh, self).__init__(**kargs)
#-------------------------------------------------------------------------------
def getMesh(self):
if not self._mesh and self.object:
self._mesh = _Mesh(parent = self)
self._mesh.copyFrom(self.object.mesh.model, flatten = True)
return self._mesh
mesh = property(getMesh)
#-------------------------------------------------------------------------------
def fit(self, node):
Solid.fit(self, node)
mesh = _Mesh(position = self.globalPosition, orientation =
self.globalOrientation)
mesh.copyFrom(node.mesh, flatten = True)
data = panda.OdeTriMeshData(mesh)
mesh.detachNode()
self.geometry = panda.OdeTriMeshGeom(node.world.space, data)
|
gpl-2.0
| -6,021,325,268,098,747,000
| 29.361111
| 80
| 0.519231
| false
| 4.216216
| false
| false
| false
|
alexhilton/miscellaneous
|
python/pygrep.py
|
1
|
3334
|
#!/usr/bin/env python
"""A Python version of grep utility.
Search one or more named input files against one ore more given patterns.
Print the line containing the match, if there are any.
"""
from optparse import OptionParser;
import re;
import fileinput;
import os.path;
FILENAME = '\033[92m';
LINENO = '\033[94m';
MATCH = '\033[91m';
ENDC = '\033[0m';
class MultiMatcher(object):
"""A set of searchable Regular Expression Patterns
Accept one or more regular expression such that if any one of them
matches a line the first successful match is returned.
"""
def __init__(self, multipattern, ignore_case):
flags = 0;
if ignore_case:
flags = re.IGNORECASE;
self.multipattern = [re.compile(pattern, flags) for pattern in multipattern];
def search(self, line):
for pattern in self.multipattern:
m = pattern.search(line);
if m is not None:
return m;
def build_options():
parser = OptionParser(usage = "usage: %prog [options] -e PATTERN files", version = "%prog 1.0");
parser.add_option("-i", "--ignore-case", action = "store_true", dest = "ignore_case",
default = False, help = "ignore case of letters when matching");
parser.add_option("-r", "--recursive", action = "store_true", dest = "recursive",
default = False, help = "search for files in directory recursively");
parser.add_option("-n", "--negative", action = "store_true", dest = "negative",
default = False, help = "show the lines that does not match the pattern");
parser.add_option("-e", "--regexpr", action = "append", dest = "regexpr",
help = "specify pattern expression on which to match");
return parser;
def do_matching(filename, matcher):
for line in fileinput.input(filename):
line = line.rstrip();
match = matcher.search(line);
if options.negative:
if match is None:
print "%s%s:%s%d %s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), ENDC, line);
else:
if match is not None:
first_part = line[:match.start()];
the_match = line[match.start():match.end()];
second_part = line[match.end():];
print "%s%s:%s%d %s%s%s%s%s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), \
ENDC, first_part, MATCH, the_match, ENDC, second_part);
def main():
global options;
parser = build_options();
options, args = parser.parse_args();
if not options.regexpr:
parser.error("You must specify at least one PATTERN");
if not args:
parser.error("You must specify at least one input file or directory");
matcher = MultiMatcher(options.regexpr, options.ignore_case);
for filename in args:
if not os.path.exists(filename):
print "No such file or directory: ", filename;
continue;
if options.recursive and os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
[do_matching(os.path.join(root, entry), matcher) for entry in files];
elif os.path.isfile(filename):
do_matching(filename, matcher);
if __name__ == "__main__":
main();
|
apache-2.0
| 1,519,449,779,832,885,800
| 37.321839
| 118
| 0.607379
| false
| 3.913146
| false
| false
| false
|
etkirsch/legends-of-erukar
|
erukar/content/inventory/weapons/standard/Focus.py
|
1
|
1029
|
import numpy as np
from erukar.system.engine.inventory import ArcaneWeapon
class Focus(ArcaneWeapon):
Probability = 1
BaseName = "Focus"
EssentialPart = "devotion"
AttackRange = 3
RangePenalty = 3
BaseWeight = 1.0
# Damage
DamageRange = [2, 5]
DamageType = 'force'
DamageModifier = "sense"
DamageScalar = 2.4
ScalingRequirement = 6
EnergyCost = 5
# Distribution
Distribution = np.random.gamma
DistributionProperties = (2, 0.3)
BaseStatInfluences = {
'sense': {'requirement': 8, 'scaling_factor': 3.5, 'cutoff': 200},
'acuity': {'requirement': 0, 'scaling_factor': 1.2, 'cutoff': 100},
}
def failing_requirements(self, wielder):
if wielder.arcane_energy < self.EnergyCost:
return ['Not enough Arcane Energy to use {} -- need {}, have {}'.format(self.alias(), self.EnergyCost, wielder.arcane_energy)]
def on_calculate_attack(self, cmd):
cmd.args['player_lifeform'].arcane_energy -= self.EnergyCost
|
agpl-3.0
| -8,065,563,989,550,132,000
| 27.583333
| 138
| 0.640428
| false
| 3.298077
| false
| false
| false
|
fishroot/qdeep
|
lib/qdeep/objects/script/__init__.py
|
1
|
6352
|
# -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = 'patrick.michl@gmail.com'
__license__ = 'GPLv3'
import nemoa
import qdeep.objects.common
from PySide import QtGui, QtCore
class Editor(qdeep.objects.common.Editor):
objType = 'script'
def createCentralWidget(self):
self.textArea = QtGui.QTextEdit()
self.textArea.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.textArea.setFont(font)
self.textArea.setAcceptDrops(True)
self.highlighter = Highlighter(self.textArea.document())
self.setCentralWidget(self.textArea)
def createActions(self):
self.actRunScript = QtGui.QAction(
qdeep.common.getIcon('actions', 'debug-run.png'),
"Run Script", self,
shortcut = "F5",
statusTip = "Run python script",
triggered = self.runScript)
def createToolBars(self):
self.scriptToolBar = self.addToolBar("Script")
self.scriptToolBar.addAction(self.actRunScript)
def getModified(self):
return self.textArea.document().isModified()
def setModified(self, value = True):
self.textArea.document().setModified(value)
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot read file %s:\n%s." % (
fileName, file.errorString()))
return False
instr = QtCore.QTextStream(file)
self.textArea.setPlainText(instr.readAll())
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
return True
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot write file %s:\n%s." % (fileName,
file.errorString()))
return False
outstr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstr << self.textArea.toPlainText()
QtGui.QApplication.restoreOverrideCursor()
self.setModified(False)
self.updateWindowTitle()
return True
def runScript(self):
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
nemoa.run(self.getName())
QtGui.QApplication.restoreOverrideCursor()
class Highlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QtGui.QTextCharFormat()
keywordFormat.setForeground(QtCore.Qt.darkBlue)
keywordFormat.setFontWeight(QtGui.QFont.Bold)
keywordPatterns = ["\\bchar\\b", "\\bclass\\b", "\\bconst\\b",
"\\bdouble\\b", "\\benum\\b", "\\bexplicit\\b", "\\bfriend\\b",
"\\binline\\b", "\\bint\\b", "\\blong\\b", "\\bnamespace\\b",
"\\boperator\\b", "\\bprivate\\b", "\\bprotected\\b",
"\\bpublic\\b", "\\bshort\\b", "\\bsignals\\b", "\\bsigned\\b",
"\\bslots\\b", "\\bstatic\\b", "\\bstruct\\b",
"\\btemplate\\b", "\\btypedef\\b", "\\btypename\\b",
"\\bunion\\b", "\\bunsigned\\b", "\\bvirtual\\b", "\\bvoid\\b",
"\\bvolatile\\b", "\\bimport\\b", "\\bdef\\b",
"\\bTrue\\b", "\\bFalse\\b", "\\breturn\\b"]
self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
classFormat = QtGui.QTextCharFormat()
classFormat.setFontWeight(QtGui.QFont.Bold)
classFormat.setForeground(QtCore.Qt.darkMagenta)
self.highlightingRules.append((QtCore.QRegExp("\\bQ[A-Za-z]+\\b"),
classFormat))
singleLineCommentFormat = QtGui.QTextCharFormat()
singleLineCommentFormat.setForeground(QtCore.Qt.red)
self.highlightingRules.append((QtCore.QRegExp("//[^\n]*"),
singleLineCommentFormat))
self.multiLineCommentFormat = QtGui.QTextCharFormat()
self.multiLineCommentFormat.setForeground(QtCore.Qt.red)
quotationFormat = QtGui.QTextCharFormat()
quotationFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append((QtCore.QRegExp("\".*\""),
quotationFormat))
self.highlightingRules.append((QtCore.QRegExp("'.*'"),
quotationFormat))
functionFormat = QtGui.QTextCharFormat()
functionFormat.setFontItalic(True)
functionFormat.setForeground(QtCore.Qt.blue)
self.highlightingRules.append((QtCore.QRegExp("\\b[A-Za-z0-9_]+(?=\\()"),
functionFormat))
self.commentStartExpression = QtCore.QRegExp("/\\*")
self.commentEndExpression = QtCore.QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.commentEndExpression.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()
self.setFormat(startIndex, commentLength,
self.multiLineCommentFormat)
startIndex = self.commentStartExpression.indexIn(text,
startIndex + commentLength)
|
gpl-3.0
| -1,445,960,790,410,959,600
| 37.035928
| 97
| 0.614137
| false
| 3.916153
| false
| false
| false
|
quantosauros/cppyProject
|
cppy/cybosPlus/cpRqRp/StockOrderCash.py
|
1
|
1827
|
# coding=utf-8
'''
Created on 2016. 8. 14.
@author: Jay
'''
from cppy.adaptor import CpRqRpClass
import win32com.client
@CpRqRpClass('CpTrade.CpTd0311')
class StockOrderCash(object):
'''
장내주식/코스닥주식/ELW 주문(현금주문) 데이터를 요청하고 수신한다.
'''
def __init__(self):
self.instCpTdUtil = win32com.client.Dispatch("CpTrade.CpTdUtil")
class InputType(enumerate):
SellOrBuy = 0 #주문종류코드 (1: 매도, 2:매수)
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
class OutputType(enumerate):
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
def setInputValue(self, inputTypes, inputValues):
self.inputTypes = inputTypes
self.inputValues = inputValues
def setOutputValue(self, outputTypes):
self.outputTypes = outputTypes
def request(self, com_obj):
self.instCpTdUtil.TradeInit()
for i in range(len(self.inputTypes)) :
com_obj.SetInputValue(self.inputTypes[i], self.inputValues[i])
#계좌번호
accountNumber = self.instCpTdUtil.AccountNumber[0]
com_obj.SetInputValue(1, accountNumber)
com_obj.Request()
def response(self, com_obj):
result = ""
for j in range(0, len(self.outputTypes)) :
value = com_obj.GetHeaderValue(self.outputTypes[j])
result += str(value) + "; "
print (result)
|
mit
| 1,099,986,482,034,040,200
| 26.525424
| 74
| 0.544319
| false
| 2.61838
| false
| false
| false
|
jieter/f-engrave
|
application/settings.py
|
1
|
9320
|
import os
def cast_boolean(value):
if type(value) is bool:
return bool(value)
elif len(value) > 1:
return value == 'True'
else:
return bool(int(value))
def cast_string(value):
value = str(value).strip()
value = value.replace('\\n', '\n')
# unquote string
if value.startswith('"') and value.endswith('"'):
return value[1:-1].strip()
else:
return value
CAST_TYPES = {
'str': cast_string,
'bool': cast_boolean,
'int': int,
'float': float
}
# Old names to maintain backwards compatibility while reading
# config files. Only supported while loading values
# from config files.
OLD_SETTING_NAMES = {
'gpost': 'gcode_postamble',
'gpre': 'gcode_preamble',
'bmp_long': 'bmp_longcurve',
'bmp_optto': 'bmp_opttolerance',
'bmp_turnp': 'bmp_turnpol',
'bmp_turds': 'bmp_turdsize',
'bmp_alpha': 'bmp_alphamax',
'v_drv_crner': 'v_drv_corner',
'v_stp_crner': 'v_step_corner',
'FEED': 'feedrate',
'PLUNGE': 'plunge_rate',
'WSPACE': 'word_space',
'CSPACE': 'char_space',
'LSPACE': 'line_space',
'TANGLE': 'text_angle',
'TCODE': 'text_code',
'H_CALC': 'height_calculation',
'XSCALE': 'xscale',
'YSCALE': 'yscale',
'STHICK': 'line_thickness',
'TRADIUS': 'text_radius',
'ZSAFE': 'zsafe',
'ZCUT': 'zcut',
}
CONFIG_FILENAME = 'config.ngc'
CONFIG_MARKER = '(fengrave_set '
CONFIG_TEMPLATE = CONFIG_MARKER + '%20s %s )'
TEXT_CODE = 'text_code'
CUT_TYPE_ENGRAVE = 'engrave'
CUT_TYPE_VCARVE = 'v-carve'
HOME_DIR = os.path.expanduser("~")
NGC_FILE = (HOME_DIR + "/None")
# IMAGE_FILE = (HOME_DIR + "/None")
IMAGE_FILE = (HOME_DIR + "/Desktop/None") # TEST
class Settings(object):
"""
Default values for the application settings.
"""
_defaults = {
'HOME_DIR': HOME_DIR,
'NGC_FILE': NGC_FILE,
'IMAGE_FILE': IMAGE_FILE,
'config_filename': CONFIG_FILENAME,
'batch': False,
'show_axis': True,
'show_box': True,
'show_thick': True,
'flip': False,
'mirror': False,
# text plotted on a circle with radius
'text_radius': 0.0,
'outer': True, # outside circle
'upper': True, # on top of cirle
'fontdex': False,
'useIMGsize': False,
# flip normals (V-carve side)
'v_flop': False,
# ball carve (ball nose cutter)
'b_carve': False,
# TODO is "BALL" shape valid, or is this covered by b_carve?
# options: 'VBIT', 'FLAT', 'BALL'
'bit_shape': 'VBIT',
# plot during v-carve calculation [GUI]
'v_pplot': False,
'inlay': False,
'no_comments': True,
# arc fitting, options 'none', 'center', 'radius'
'arc_fit': 'none',
'ext_char': False,
# disable variables in gcode [GCODE]
'var_dis': True,
# cleanup cut directions
'clean_P': True,
'clean_X': True,
'clean_Y': False,
# V-Bit cut directions
'v_clean_P': False,
'v_clean_X': True,
'v_clean_Y': False,
'yscale': 50.8,
'xscale': 100.0,
'line_space': 1.2,
'char_space': 25,
'word_space': 100,
'text_angle': 0.0,
# safe height [GCODE]
'zsafe': 5.0,
# engraving depth [GCODE]
'zcut': -0.1,
# derived value
'max_cut': 0.0,
'line_thickness': 0.25,
'border_thickness': 0.5,
# options: 'Default',
# 'Top-Left', 'Top-Center', 'Top-Right',
# 'Mid-Left', 'Mid-Center', 'Mid-Right',
# 'Bot-Left', 'Bot-Center', 'Bot-Right'
'origin': 'Default',
# options: 'Left', 'Right', 'Center'
'justify': 'Left',
# options: 'in', 'mm'
'units': 'mm',
# options: 'in/min', 'mm/min'
'feed_units': 'mm/min',
# horizontal feedrate [GCODE]
'feedrate': 60.0,
# feedrate for plunging into stock [GCODE]
'plunge_rate': 10.0,
# which bounding boxes are used to calculate line height
# options: 'max_all', 'max_use'
'height_calculation': 'max_use',
# Add a box/circle around plot
'plotbox': False,
# Gap between box and engraving
'boxgap': 6.35,
# font location and name
'fontdir': 'fonts',
'fontfile': 'normal.cxf',
# options: 'engrave', 'v-carve'
'cut_type': CUT_TYPE_ENGRAVE,
# 'cut_type': CUT_TYPE_VCARVE,
# options: 'text', 'image'
'input_type': 'text',
# 'input_type': 'image',
# v-cutter parameters
# options: 'scorch', 'voronoi'
'v_strategy': 'scorch',
'v_bit_angle': 60,
'v_bit_dia': 3.0,
'v_depth_lim': 0.0,
'v_drv_corner': 135,
'v_step_corner': 200,
'v_step_len': 0.254,
# v-carve loop accuracy
'v_acc': 0.00254,
'allowance': 0.0,
# options: 'chr', 'all'
'v_check_all': 'all',
'v_rough_stk': 0.0,
'v_max_cut': 0.0,
# options: 'black', 'white', 'right', 'left', 'minority', 'majority', or 'random'
'bmp_turnpol': 'minority',
'bmp_turdsize': 2,
'bmp_alphamax': 1.0,
'bmp_opttolerance': 0.2,
'bmp_longcurve': True,
'xorigin': 0.0,
'yorigin': 0.0,
'segarc': 5.0,
'accuracy': 0.001,
# diameter of the cleanup bit
'clean_dia': 3.0,
# clean-up step-over as percentage of the clean-up bit diameter
'clean_step': 50,
# Width of the clean-up search area (obsolete before or since v1.65)
'clean_w': 50.8,
'clean_v': 1.27,
'clean_name': '_clean',
# G-Code Default Preamble
#
# G17 : sets XY plane
# G64 P0.003 : G64 P- (motion blending tolerance set to 0.003 (units))
# G64 without P option keeps the best speed possible, no matter how
# far away from the programmed point you end up.
# M3 S3000 : Spindle start at 3000
# M7 : Turn mist coolant on
'gcode_preamble': 'G17 G64 P0.003 M3 S3000 M7',
# G-Code Default Postamble
#
# M5 : Stop Spindle
# M9 : Turn all coolant off
# M2 : End Program
'gcode_postamble': 'M5 M9 M2',
'default_text': 'OOF-Engrave',
'text_code': '',
}
def __init__(self, filename=None, autoload=False):
self._settings = self._defaults.copy()
self._text_code = u''
if filename is not None:
self.from_configfile(filename)
elif autoload:
files_to_try = (
CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + '.fengraverc'
)
available = [c for c in files_to_try if os.path.isfile(c)]
if len(available) > 0:
self.from_configfile(available[0])
def __iter__(self):
return self._settings.items()
def type(self, name):
return str(type(self._settings[name]))[7:-2]
def set(self, name, value):
if name == TEXT_CODE:
self._set_text_code(value)
else:
cast = CAST_TYPES[self.type(name)]
self._settings[name] = cast(value)
def get(self, name):
return self._settings[name]
# only for use in C-API calls
def get_dict(self):
return self._settings
def reset(self, name=None):
if name is None:
self._settings = self._defaults.copy()
else:
self.set(name, self._defaults[name])
def has_setting(self, name):
return name in self._settings
def get_fontfile(self):
return self.get('fontdir') + os.path.sep + self.get('fontfile')
def from_configfile(self, filename):
with open(filename, 'r') as config:
for line in config.readlines():
if not line.startswith(CONFIG_MARKER):
continue
line = line[len(CONFIG_MARKER):].strip()
name = line.split(' ')[0].strip()
setting = line[len(name):-1].strip()
if not self.has_setting(name) and name in OLD_SETTING_NAMES:
name = OLD_SETTING_NAMES[name]
try:
self.set(name, setting)
except KeyError:
print 'Setting not found:', name # TODO
def to_gcode(self):
gcode = [CONFIG_TEMPLATE % (key, str(value).replace('\n', '\\n'))
for key, value in self._settings.items()]
return gcode
def get_text_code(self):
return self._text_code
def _set_text_code(self, line):
text_code = u''
code_list = line.split()
for char in code_list:
try:
text_code += "%c" % unichr(int(char))
except:
text_code += "%c" % chr(int(char))
self._text_code = text_code
def __str__(self):
return 'Settings:\n' + ('\n'.join([', '.join(map(str, l)) for l in self._settings.items()]))
|
gpl-3.0
| -4,150,245,928,453,565,400
| 25.704871
| 100
| 0.517275
| false
| 3.370705
| true
| false
| false
|
ic-labs/django-icekit
|
icekit/api/images/serializers.py
|
1
|
2783
|
from django.apps import apps
from rest_framework import serializers
from rest_framework.settings import api_settings
from drf_queryfields import QueryFieldsMixin
from icekit.api.base_serializers import WritableSerializerHelperMixin, \
WritableRelatedFieldSettings
Image = apps.get_model('icekit_plugins_image.Image')
MediaCategory = apps.get_model('icekit.MediaCategory')
class MediaCategorySerializer(serializers.ModelSerializer):
# Redefine `name` field here to avoid `unique=True` constraint that will
# be unavoidably applied by DRF validators if we leave the field to be
# autogenerated based on the model.
name = serializers.CharField(
max_length=255,
read_only=False,
required=False,
)
class Meta:
model = MediaCategory
fields = ['id', 'name']
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
}
class ImageSerializer(
WritableSerializerHelperMixin,
QueryFieldsMixin,
serializers.HyperlinkedModelSerializer
):
"""
A serializer for an ICEkit Image.
"""
categories = MediaCategorySerializer(
many=True,
)
class Meta:
model = Image
fields = [
api_settings.URL_FIELD_NAME,
'id',
'image',
'width',
'height',
'title',
'alt_text',
'caption',
'credit',
'source',
'external_ref',
'categories',
'license',
'notes',
'date_created',
'date_modified',
'is_ok_for_web',
'is_cropping_allowed',
]
extra_kwargs = {
'url': {
'lookup_field': 'pk',
'view_name': 'api:image-api-detail',
},
}
writable_related_fields = {
'categories': WritableRelatedFieldSettings(
lookup_field=['id', 'name'], can_create=True),
}
# TODO It is probably not a good idea to allow API user to set auto-gen ID
# field, but this is the only way I have found (so far) to allow ID to be
# passed through API to relate existing images.
class RelatedImageSerializer(ImageSerializer):
"""
A serializer for an ICEkit Image relationships that exposes the ID primary
key field to permit referring to existing images by ID, instead of needing
to upload an actual image file every time.
"""
class Meta(ImageSerializer.Meta):
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
'image': {
'required': False,
}
}
|
mit
| 2,230,048,733,898,857,700
| 26.83
| 78
| 0.564499
| false
| 4.403481
| false
| false
| false
|
rmcauley/rainwave
|
rainwave/playlist_objects/artist.py
|
1
|
5684
|
from libs import db
from libs import config
from rainwave.playlist_objects.metadata import (
AssociatedMetadata,
MetadataUpdateError,
make_searchable_string,
)
class Artist(AssociatedMetadata):
select_by_name_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE lower(artist_name) = lower(%s)"
select_by_id_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE artist_id = %s"
select_by_song_id_query = 'SELECT r4_artists.artist_id AS id, r4_artists.artist_name AS name, r4_song_artist.artist_is_tag AS is_tag, artist_order AS "order" FROM r4_song_artist JOIN r4_artists USING (artist_id) WHERE song_id = %s ORDER BY artist_order'
disassociate_song_id_query = (
"DELETE FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
)
associate_song_id_query = "INSERT INTO r4_song_artist (song_id, artist_id, artist_is_tag, artist_order) VALUES (%s, %s, %s, %s)"
has_song_id_query = "SELECT COUNT(song_id) FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
check_self_size_query = "SELECT COUNT(song_id) FROM r4_song_artist JOIN r4_songs USING (song_id) WHERE artist_id = %s AND song_verified = TRUE"
delete_self_query = "DELETE FROM r4_artists WHERE artist_id = %s"
# needs to be specialized because of artist_order
def associate_song_id(self, song_id, is_tag=None, order=None):
if not order and not self.data.get("order"):
order = db.c.fetch_var(
"SELECT MAX(artist_order) FROM r4_song_artist WHERE song_id = %s",
(song_id,),
)
if not order:
order = -1
order += 1
elif not order:
order = self.data["order"]
self.data["order"] = order
if is_tag == None:
is_tag = self.is_tag
else:
self.is_tag = is_tag
if db.c.fetch_var(self.has_song_id_query, (song_id, self.id)) > 0:
pass
else:
if not db.c.update(
self.associate_song_id_query, (song_id, self.id, is_tag, order)
):
raise MetadataUpdateError(
"Cannot associate song ID %s with %s ID %s"
% (song_id, self.__class__.__name__, self.id)
)
def _insert_into_db(self):
self.id = db.c.get_next_id("r4_artists", "artist_id")
return db.c.update(
"INSERT INTO r4_artists (artist_id, artist_name, artist_name_searchable) VALUES (%s, %s, %s)",
(self.id, self.data["name"], make_searchable_string(self.data["name"])),
)
def _update_db(self):
return db.c.update(
"UPDATE r4_artists SET artist_name = %s, artist_name_searchable = %s WHERE artist_id = %s",
(self.data["name"], make_searchable_string(self.data["name"]), self.id),
)
def _start_cooldown_db(self, sid, cool_time):
# Artists don't have cooldowns on Rainwave.
pass
def _start_election_block_db(self, sid, num_elections):
# Artists don't block elections either (OR DO THEY) (they don't)
pass
def load_all_songs(self, sid, user_id=1):
all_songs = db.c.fetch_all(
"SELECT r4_song_artist.song_id AS id, "
"r4_songs.song_origin_sid AS sid, "
"song_title AS title, "
"CAST(ROUND(CAST(song_rating AS NUMERIC), 1) AS REAL) AS rating, "
"song_exists AS requestable, "
"song_length AS length, "
"song_cool AS cool, "
"song_cool_end AS cool_end, "
"song_url as url, song_link_text as link_text, "
"COALESCE(song_rating_user, 0) AS rating_user, "
"COALESCE(song_fave, FALSE) AS fave, "
"album_name, r4_albums.album_id "
"FROM r4_song_artist "
"JOIN r4_songs USING (song_id) "
"JOIN r4_albums USING (album_id) "
"LEFT JOIN r4_album_sid ON (r4_albums.album_id = r4_album_sid.album_id AND r4_album_sid.sid = %s) "
"LEFT JOIN r4_song_sid ON (r4_songs.song_id = r4_song_sid.song_id AND r4_song_sid.sid = %s) "
"LEFT JOIN r4_song_ratings ON (r4_song_artist.song_id = r4_song_ratings.song_id AND r4_song_ratings.user_id = %s) "
"WHERE r4_song_artist.artist_id = %s AND r4_songs.song_verified = TRUE "
"ORDER BY song_exists DESC, album_name, song_title",
(sid, sid, user_id, self.id),
)
# And of course, now we have to burn extra CPU cycles to make sure the right album name is used and that we present the data
# in the same format seen everywhere else on the API. Still, much faster then loading individual song objects.
self.data["all_songs"] = {}
for configured_sids in config.station_ids:
self.data["all_songs"][configured_sids] = {}
requestable = True if user_id > 1 else False
for song in all_songs:
if not song["sid"] in config.station_ids:
continue
song["requestable"] = requestable and song["requestable"]
if not song["album_id"] in self.data["all_songs"][song["sid"]]:
self.data["all_songs"][song["sid"]][song["album_id"]] = []
self.data["all_songs"][song["sid"]][song["album_id"]].append(song)
song["albums"] = [
{
"name": song.pop("album_name"),
"id": song.pop("album_id"),
}
]
def to_dict(self, user=None):
d = super(Artist, self).to_dict(user)
d["order"] = self.data["order"]
return d
|
gpl-2.0
| 7,762,295,732,175,003,000
| 46.764706
| 257
| 0.574771
| false
| 3.281755
| false
| false
| false
|
dgjnpr/py-junos-eznc
|
lib/jnpr/junos/factory/view.py
|
1
|
8729
|
import warnings
from contextlib import contextmanager
from copy import deepcopy
from lxml import etree
from jnpr.junos.factory.viewfields import ViewFields
class View(object):
"""
View is the base-class that makes extracting values from XML
data appear as objects with attributes.
"""
ITEM_NAME_XPATH = 'name'
FIELDS = {}
GROUPS = None
# -------------------------------------------------------------------------
# CONSTRUCTOR
# -------------------------------------------------------------------------
def __init__(self, table, view_xml):
"""
:table:
instance of the RunstatTable
:view_xml:
this should be an lxml etree Elemenet object. This
constructor also accepts a list with a single item/XML
"""
# if as_xml is passed as a list, make sure it only has
# a single item, common response from an xpath search
if isinstance(view_xml, list):
if 1 == len(view_xml):
view_xml = view_xml[0]
else:
raise ValueError("constructor only accepts a single item")
# now ensure that the thing provided is an lxml etree Element
if not isinstance(view_xml, etree._Element):
raise ValueError("constructor only accecpts lxml.etree._Element")
self._table = table
self.ITEM_NAME_XPATH = table.ITEM_NAME_XPATH
self._init_xml(view_xml)
def _init_xml(self, given_xml):
self._xml = given_xml
if self.GROUPS is not None:
self._groups = {}
for xg_name, xg_xpath in self.GROUPS.items():
xg_xml = self._xml.xpath(xg_xpath)
# @@@ this is technically an error; need to trap it
if not len(xg_xml):
continue
self._groups[xg_name] = xg_xml[0]
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def T(self):
""" return the Table instance for the View """
return self._table
@property
def D(self):
""" return the Device instance for this View """
return self.T.D
@property
def name(self):
""" return the name of view item """
if self.ITEM_NAME_XPATH is None:
return self._table.D.hostname
if isinstance(self.ITEM_NAME_XPATH, str):
# simple key
return self._xml.findtext(self.ITEM_NAME_XPATH).strip()
else:
# composite key
# return tuple([self.xml.findtext(i).strip() for i in
# self.ITEM_NAME_XPATH])
return tuple([self.xml.xpath(i)[0].text.strip()
for i in self.ITEM_NAME_XPATH])
# ALIAS key <=> name
key = name
@property
def xml(self):
""" returns the XML associated to the item """
return self._xml
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def keys(self):
""" list of view keys, i.e. field names """
return self.FIELDS.keys()
def values(self):
""" list of view values """
return [getattr(self, field) for field in self.keys()]
def items(self):
""" list of tuple(key,value) """
return zip(self.keys(), self.values())
def _updater_instance(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS = deepcopy(self.__class__.FIELDS)
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS = deepcopy(self.__class__.GROUPS)
self.GROUPS.update(more.groups)
def _updater_class(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS.update(more.groups)
@contextmanager
def updater(self, fields=True, groups=False, all=True, **kvargs):
"""
provide the ability for subclassing objects to extend the
definitions of the fields. this is implemented as a
context manager with the form called from the subclass
constructor:
with self.extend() as more:
more.fields = <dict>
more.groups = <dict> # optional
"""
# ---------------------------------------------------------------------
# create a new object class so we can attach stuff to it arbitrarily.
# then pass that object to the caller, yo!
# ---------------------------------------------------------------------
more = type('RunstatViewMore', (object,), {})()
if fields is True:
more.fields = RunstatMakerViewFields()
# ---------------------------------------------------------------------
# callback through context manager
# ---------------------------------------------------------------------
yield more
updater = self._updater_class if all is True else \
self._updater_instance
updater(more)
def asview(self, view_cls):
""" create a new View object for this item """
return view_cls(self._table, self._xml)
def refresh(self):
"""
~~~ EXPERIMENTAL ~~~
refresh the data from the Junos device. this only works if the table
provides an "args_key", does not update the original table, just this
specific view/item
"""
warnings.warn("Experimental method: refresh")
if self._table.can_refresh is not True:
raise RuntimeError("table does not support this feature")
# create a new table instance that gets only the specific named
# value of this view
tbl_xml = self._table._rpc_get(self.name)
new_xml = tbl_xml.xpath(self._table.ITEM_XPATH)[0]
self._init_xml(new_xml)
return self
# -------------------------------------------------------------------------
# OVERLOADS
# -------------------------------------------------------------------------
def __repr__(self):
""" returns the name of the View with the associate item name """
return "%s:%s" % (self.__class__.__name__, self.name)
def __getattr__(self, name):
"""
returns a view item value, called as :obj.name:
"""
item = self.FIELDS.get(name)
if item is None:
raise ValueError("Unknown field: '%s'" % name)
if 'table' in item:
# if this is a sub-table, then return that now
return item['table'](self.D, self._xml)
# otherwise, not a sub-table, and handle the field
astype = item.get('astype', str)
if 'group' in item:
found = self._groups[item['group']].xpath(item['xpath'])
else:
found = self._xml.xpath(item['xpath'])
len_found = len(found)
if astype is bool:
# handle the boolean flag case separately
return bool(len_found)
if not len_found:
# even for the case of numbers, do not set the value. we
# want to detect "does not exist" vs. defaulting to 0
# -- 2013-nov-19, JLS.
return None
try:
# added exception handler to catch malformed xpath expressesion
# -- 2013-nov-19, JLS.
# added support to handle multiple xpath values, i.e. a list of
# things that have the same xpath expression (common in configs)
# -- 2031-dec-06, JLS
# added support to use the element tag if the text is empty
def _munch(x):
as_str = x if isinstance(x, str) else x.text
if as_str is not None:
as_str = as_str.strip()
if not as_str:
as_str = x.tag # use 'not' to test for empty
return astype(as_str)
if 1 == len_found:
return _munch(found[0])
return [_munch(this) for this in found]
except:
raise RuntimeError("Unable to handle field:'%s'" % name)
# and if we are here, then we didn't handle the field.
raise RuntimeError("Unable to handle field:'%s'" % name)
def __getitem__(self, name):
"""
allow the caller to extract field values using :obj['name']:
the same way they would do :obj.name:
"""
return getattr(self, name)
|
apache-2.0
| -8,300,979,280,519,996,000
| 33.366142
| 79
| 0.500745
| false
| 4.567766
| false
| false
| false
|
2baOrNot2ba/AntPat
|
scripts/viewJonespat_dual.py
|
1
|
2897
|
#!/usr/bin/env python
"""A simple viewer for Jones patterns for dual-polarized representations.
"""
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid
from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM
from antpat.reps.hamaker import convLOFARcc2DPE
import antpat.io.filetypes as antfiles
def plotJonesCanonical(theta, phi, jones, dpelemname):
normalize = True
dbscale = True
polarplt = True
IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M
g, IXRJ = jones2gIXR(jones)
IXRM = IXRJ2IXRM(IXRJ)
if IXRTYPE == 'IXR_J':
IXR = IXRJ
elif IXRTYPE == 'IXR_J':
IXR = IXRM
else:
raise RuntimeError("""Error: IXR type {} unknown.
Known types are IXR_J, IXR_M.""".format(IXRTYPE))
fig = plt.figure()
fig.suptitle(dpelemname)
plt.subplot(121, polar=polarplt)
if normalize:
g_max = numpy.max(g)
g = g/g_max
if dbscale:
g = 20*numpy.log10(g)
# nrlvls = 5
# g_lvls = numpy.max(g) - 3.0*numpy.arange(nrlvls)
plt.pcolormesh(phi, numpy.rad2deg(theta), g)
# plt.contour( phi, numpy.rad2deg(theta), g_dress, levels = g_lvls)
plt.colorbar()
plt.title('Amp gain')
plt.subplot(122, polar=polarplt)
plt.pcolormesh(phi, numpy.rad2deg(theta), 10*numpy.log10(IXR))
plt.colorbar()
plt.title('IXR_J')
plt.show()
def plotFFpat():
from antpat.reps.sphgridfun import tvecfun
for polchan in [0, 1]:
E_th = jones[:, :, polchan, 0].squeeze()
E_ph = jones[:, :, polchan, 1].squeeze()
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph, args.freq,
vcoordlist=['Ludwig3'], projection='orthographic')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("freq", type=float,
help="Frequency in Hertz")
parser.add_argument("filename", help="""
Filename of dual-polarization FF, Hamaker-Arts format,
or a single-polarization FF (p-channel)""")
parser.add_argument("filename_q", nargs='?',
help="""
Filename of second (q-channel) single-polarization FF.
""")
args = parser.parse_args()
if args.filename.endswith(antfiles.HamArtsuffix):
hp = convLOFARcc2DPE(args.filename, [args.freq])
elif args.filename.endswith(antfiles.FEKOsuffix):
hp = DualPolElem()
hp.load_ffes(args.filename, args.filename_q)
else:
raise RuntimeError("dual-pol pattern file type not known")
THETA, PHI = ZenHemisphGrid()
jones = hp.getJonesAlong([args.freq], (THETA, PHI))
plotFFpat()
# plotJonesCanonical(THETA, PHI, jones, os.path.basename(args.filename)
# + ' (' + str(args.freq/1e6) + ' MHz)')
|
isc
| 7,596,389,316,562,434,000
| 33.488095
| 78
| 0.613393
| false
| 3.111708
| false
| false
| false
|
bgmerrell/desmod
|
tests/test_timescale.py
|
1
|
1953
|
import pytest
from desmod.timescale import parse_time, scale_time
@pytest.mark.parametrize('test_input, expected', [
('12 s', (12, 's')),
('12s', (12, 's')),
('+12s', (12, 's')),
('-12s', (-12, 's')),
('12.0 s', (12.0, 's')),
('12. s', (12.0, 's')),
('+12.0 s', (12.0, 's')),
('-12.0 s', (-12.0, 's')),
('12.000 s', (12.0, 's')),
('1.2e1 s', (12.0, 's')),
('1.2e+1 s', (12.0, 's')),
('1.2e-1 s', (0.12, 's')),
('-1.2e-1 s', (-0.12, 's')),
('12.s', (12.0, 's')),
('12.0s', (12.0, 's')),
('12.000s', (12.0, 's')),
('1.2e1s', (12.0, 's')),
('.12e+2s', (12.0, 's')),
('.12s', (0.12, 's')),
('12 fs', (12, 'fs')),
('12 ps', (12, 'ps')),
('12 ns', (12, 'ns')),
('12 us', (12, 'us')),
('12 ms', (12, 'ms')),
('12.0ms', (12.0, 'ms')),
('s', (1, 's')),
('fs', (1, 'fs')),
])
def test_parse_time(test_input, expected):
m, u = parse_time(test_input)
assert (m, u) == expected
assert isinstance(m, type(expected[0]))
@pytest.mark.parametrize('test_input', [
'',
'123 s',
'123',
'123.0',
'123 S',
'123 Ms',
'123e1.3 s',
'+-123 s',
'123 ks',
'. s',
'1-.1 s',
'1e1.2 s',
])
def test_parse_time_except(test_input):
with pytest.raises(ValueError) as exc_info:
parse_time(test_input)
assert 'float' not in str(exc_info.value)
def test_parse_time_default():
assert parse_time('123', default_unit='ms') == (123, 'ms')
@pytest.mark.parametrize('input_t, input_tscale, expected', [
((1, 'us'), (1, 'us'), 1),
((1, 'us'), (10, 'us'), 0.1),
((1000, 'us'), (1, 'ms'), 1),
((1, 'us'), (100, 'ms'), 1e-5),
((50, 'ms'), (1, 'ns'), 50000000),
((5.2, 'ms'), (1, 'us'), 5200),
])
def test_scale_time(input_t, input_tscale, expected):
scaled = scale_time(input_t, input_tscale)
assert expected == scaled
assert isinstance(scaled, type(expected))
|
mit
| -2,659,281,447,644,491,300
| 24.697368
| 62
| 0.453661
| false
| 2.453518
| true
| false
| false
|
bourguet/operator_precedence_parsing
|
operator_precedence.py
|
1
|
7999
|
#! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, symbol, lprio, rprio, evaluator):
self.symbol = symbol
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
self.value = None
def __repr__(self):
return '<Symbol {} {}/{}: {}>'.format(self.symbol, self.lprio, self.rprio, self.value)
def identity_evaluator(args):
if len(args) == 1 and type(args[0]) == SymbolDesc:
return Node(args[0].symbol)
else:
return CompositeNode('ID ERROR', args)
def binary_evaluator(args):
if len(args) != 3 or type(args[0]) == SymbolDesc or type(args[1]) != SymbolDesc or type(args[2]) == SymbolDesc:
return CompositeNode('BINARY ERROR', args)
return CompositeNode(args[1].symbol, [args[0], args[2]])
class Parser:
def __init__(self):
self.symbols = {}
self.symbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
self.symbols['$eoi$'] = SymbolDesc('$eoi$', 0, 0, None)
self.reset()
def register_symbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = binary_evaluator
if type(oper) is str:
self.symbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.symbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def reset(self):
self.stack = [self.symbols['$soi$']]
def id_symbol(self, id):
return SymbolDesc(id, 1000, 1000, identity_evaluator)
def evaluate(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
curprio = self.stack[idx].lprio
while type(self.stack[idx-1]) != SymbolDesc or self.stack[idx-1].rprio == curprio:
idx -= 1
if type(self.stack[idx]) == SymbolDesc:
curprio = self.stack[idx].lprio
args = self.stack[idx:]
self.stack = self.stack[:idx]
for i in args:
if type(i) == SymbolDesc:
self.stack.append(i.evaluator(args))
return
raise RuntimeError('Internal error: no evaluator found in {}'.format(args))
def tos_symbol(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
return self.stack[idx]
def shift(self, sym):
while self.tos_symbol().rprio > sym.lprio:
self.evaluate()
self.stack.append(sym)
def push_eoi(self):
self.shift(self.symbols['$eoi$'])
def parse(self, s):
self.reset()
for tk in lexer.tokenize(s):
if tk.lexem in self.symbols:
self.shift(self.symbols[tk.lexem])
elif tk.kind == 'ID':
self.shift(self.id_symbol(tk))
elif tk.kind == 'NUMBER':
self.shift(self.id_symbol(tk))
else:
raise RuntimeError('Unexpected symbol: {}'.format(tk))
self.push_eoi()
if len(self.stack) != 3:
raise RuntimeError('Internal error: bad state of stack at end')
return self.stack[1]
def dump(self):
print('Stack')
for oper in self.stack:
print(' {}'.format(oper))
def open_parenthesis_evaluator(args):
if (len(args) == 3
and type(args[0]) == SymbolDesc and args[0].symbol == '('
and type(args[1]) != SymbolDesc
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return args[1]
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return CompositeNode('call', [args[0]])
elif (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ')'):
if args[2].token == ',':
callargs = args[2].children
else:
callargs = [args[2]]
callargs.insert(0, args[0])
return CompositeNode('call', callargs)
else:
return CompositeNode('( ERROR', args)
def close_parenthesis_evaluator(args):
return CompositeNode(') ERROR', args)
def open_bracket_evaluator(args):
return CompositeNode('get', [args[0], args[2]])
def close_bracket_evaluator(args):
return CompositeNode('] ERROR', args)
def coma_evaluator(args):
return CompositeNode(',', [x for x in args if type(x) != SymbolDesc])
def unary_evaluator(args):
if len(args) != 2:
return CompositeNode('UNARY ERROR', args)
if type(args[0]) == SymbolDesc and type(args[1]) != SymbolDesc:
return CompositeNode(args[0].symbol, [args[1]])
elif type(args[0]) != SymbolDesc and type(args[1]) == SymbolDesc:
return CompositeNode('post'+args[1].symbol, [args[0]])
else:
return CompositeNode('UNARY ERROR', args)
def unary_or_binary_evaluator(args):
if (len(args) == 2
and type(args[0]) == SymbolDesc
and type(args[1]) != SymbolDesc):
return CompositeNode(args[0].symbol, [args[1]])
elif (len(args) == 2
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc):
return CompositeNode('post'+args[1].symbol, [args[0]])
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc
and type(args[2]) != SymbolDesc):
return CompositeNode(args[1].symbol, [args[0], args[2]])
else:
return CompositeNode('1,2-ARY ERROR', args)
def question_evaluator(args):
if (len(args) != 5
or type(args[0]) == SymbolDesc
or type(args[1]) != SymbolDesc or args[1].symbol != '?'
or type(args[2]) == SymbolDesc
or type(args[3]) != SymbolDesc or args[3].symbol != ':'
or type(args[4]) == SymbolDesc):
return CompositeNode('? ERROR', args)
return CompositeNode('?', [args[0], args[2], args[4]])
def colon_evaluator(args):
return CompositeNode(': ERROR', args)
def cexp_parser():
parser = Parser()
parser.register_symbol(',', 2, 2, coma_evaluator)
parser.register_symbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_symbol('?', 7, 1.5, question_evaluator)
parser.register_symbol(':', 1.5, 6, colon_evaluator)
parser.register_symbol('||', 8, 9)
parser.register_symbol('&&', 10, 11)
parser.register_symbol('|', 12, 13)
parser.register_symbol('^', 14, 15)
parser.register_symbol('&', 16, 17, unary_or_binary_evaluator)
parser.register_symbol(['==', '!='], 18, 19)
parser.register_symbol(['<', '>', '<=', '>='], 20, 21)
parser.register_symbol(['<<', '>>'], 22, 23)
parser.register_symbol(['+', '-'], 24, 25, unary_or_binary_evaluator)
parser.register_symbol(['/', '%'], 26, 27)
parser.register_symbol(['*'], 26, 27, unary_or_binary_evaluator)
parser.register_symbol('**', 29, 28)
parser.register_symbol(['++', '--', '~', '!'], 31, 30, unary_evaluator) # +, -, *, & should be here
parser.register_symbol(['.', '->'], 32, 33)
parser.register_symbol('(', 100, 1, open_parenthesis_evaluator)
parser.register_symbol(')', 1, 100, close_parenthesis_evaluator)
parser.register_symbol('[', 100, 1, open_bracket_evaluator)
parser.register_symbol(']', 1, 100, close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
|
bsd-2-clause
| -1,557,918,058,041,208,000
| 33.478448
| 115
| 0.56182
| false
| 3.537815
| false
| false
| false
|
hernandito/SickRage
|
sickbeard/providers/generic.py
|
1
|
26015
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import itertools
from random import shuffle
from base64 import b16encode, b32decode
import requests
from hachoir_parser import createParser
import sickbeard
from sickbeard import helpers, classes, logger, db
from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT
from sickbeard import tvcache
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.common import Quality
from sickbeard.common import user_agents
from sickrage.helper.common import sanitize_filename
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from sickrage.show.Show import Show
from sickbeard import show_name_helpers
class GenericProvider(object):
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.urls = {}
self.url = ''
self.public = False
self.show = None
self.supportsBacklog = True
self.supportsAbsoluteNumbering = False
self.anime_only = False
self.search_mode = None
self.search_fallback = False
self.enabled = False
self.enable_daily = False
self.enable_backlog = False
self.cache = tvcache.TVCache(self)
self.session = requests.Session()
shuffle(user_agents)
self.headers = {'User-Agent': user_agents[0]}
self.btCacheURLS = [
'http://torcache.net/torrent/{torrent_hash}.torrent',
'http://thetorrent.org/torrent/{torrent_hash}.torrent',
'http://btdig.com/torrent/{torrent_hash}.torrent',
# 'http://torrage.com/torrent/{torrent_hash}.torrent',
# 'http://itorrents.org/torrent/{torrent_hash}.torrent',
]
shuffle(self.btCacheURLS)
self.proper_strings = ['PROPER|REPACK|REAL']
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub(r"[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
# pylint: disable=no-self-use,unused-variable
# Method could be a function, Unused variable
def _checkAuth(self):
return True
def _doLogin(self):
return True
def isActive(self):
return False
def isEnabled(self):
return self.enabled
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, params=None, timeout=30, json=False, needBytes=False):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json, needBytes=needBytes)
def _makeURL(self, result):
urls = []
filename = u''
if result.url.startswith('magnet'):
try:
torrent_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper()
try:
torrent_name = re.findall('dn=([^&]+)', result.url)[0]
except Exception:
torrent_name = 'NO_DOWNLOAD_NAME'
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).upper()
if not torrent_hash:
logger.log(u"Unable to extract torrent hash from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS]
except Exception:
logger.log(u"Unable to extract torrent hash or name from magnet: " + ex(result.url), logger.ERROR)
return urls, filename
else:
urls = [result.url]
if self.providerType == GenericProvider.TORRENT:
filename = ek(os.path.join, sickbeard.TORRENT_DIR, sanitize_filename(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
filename = ek(os.path.join, sickbeard.NZB_DIR, sanitize_filename(result.name) + '.' + self.providerType)
return urls, filename
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if url.startswith('http'):
self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
# Support for Jackett/TorzNab
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT
if helpers.download_file(url, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers.remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
def _verify_download(self, file_name=None):
"""
Checks the saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if file_name.endswith(GenericProvider.TORRENT):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
logger.log(u"Result is not a valid torrent file", logger.DEBUG)
return False
return True
def searchRSS(self, episodes):
return self.cache.findNeededEpisodes(episodes)
def getQuality(self, item, anime=False):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item)
quality = Quality.sceneQuality(title, anime)
return quality
# pylint: disable=no-self-use,unused-argument
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
return []
def _get_season_search_strings(self, episode):
return []
def _get_episode_search_strings(self, eb_obj, add_string=''):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('title', '')
if title:
title = u'' + title.replace(' ', '.')
url = item.get('link', '')
if url:
url = url.replace('&', '&').replace('%26tr%3D', '&tr=')
return title, url
def _get_size(self, item):
"""Gets the size from the item"""
logger.log(u"Provider type doesn't have _get_size() implemented yet", logger.ERROR)
return -1
def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
self._checkAuth()
self.show = show
results = {}
itemList = []
searched_scene_season = None
for epObj in episodes:
# search cache for episode result
cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
if cacheResult:
if epObj.episode not in results:
results[epObj.episode] = cacheResult
else:
results[epObj.episode].extend(cacheResult)
# found result, search next episode
continue
# skip if season already searched
if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
continue
# mark season searched for season pack searches so we can skip later on
searched_scene_season = epObj.scene_season
search_strings = []
if len(episodes) > 1 and search_mode == 'sponly':
# get season search results
search_strings = self._get_season_search_strings(epObj)
elif search_mode == 'eponly':
# get single episode search results
search_strings = self._get_episode_search_strings(epObj)
first = search_strings and isinstance(search_strings[0], dict) and 'rid' in search_strings[0]
if first:
logger.log(u'First search_string has rid', logger.DEBUG)
for curString in search_strings:
itemList += self._doSearch(curString, search_mode, len(episodes), epObj=epObj)
if first:
first = False
if itemList:
logger.log(u'First search_string had rid, and returned results, skipping query by string', logger.DEBUG)
break
else:
logger.log(u'First search_string had rid, but returned no results, searching with string query', logger.DEBUG)
# if we found what we needed already from cache then return results and exit
if len(results) == len(episodes):
return results
# sort list by quality
if len(itemList):
items = {}
itemsUnknown = []
for item in itemList:
quality = self.getQuality(item, anime=show.is_anime)
if quality == Quality.UNKNOWN:
itemsUnknown += [item]
else:
if quality not in items:
items[quality] = [item]
else:
items[quality].append(item)
itemList = list(itertools.chain(*[v for (k, v) in sorted(items.iteritems(), reverse=True)]))
itemList += itemsUnknown if itemsUnknown else []
# filter results
cl = []
for item in itemList:
(title, url) = self._get_title_and_url(item)
# parse the file name
try:
myParser = NameParser(parse_method=('normal', 'anime')[show.is_anime])
parse_result = myParser.parse(title)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
continue
except InvalidShowException:
logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
continue
showObj = parse_result.show
quality = parse_result.quality
release_group = parse_result.release_group
version = parse_result.version
addCacheEntry = False
if not (showObj.air_by_date or showObj.sports):
if search_mode == 'sponly':
if len(parse_result.episode_numbers):
logger.log(
u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
logger.DEBUG)
addCacheEntry = True
if len(parse_result.episode_numbers) and (parse_result.season_number not in set([ep.season for ep in episodes])
or not [ep for ep in episodes if ep.scene_episode in parse_result.episode_numbers]):
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
else:
if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
if not addCacheEntry:
# we just use the existing info for normal searches
actual_season = parse_result.season_number
actual_episodes = parse_result.episode_numbers
else:
sameDaySpecial = False
if not parse_result.is_air_by_date:
logger.log(
u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
logger.DEBUG)
addCacheEntry = True
else:
airdate = parse_result.air_date.toordinal()
myDB = db.DBConnection()
sql_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
[showObj.indexerid, airdate])
if len(sql_results) == 2:
if int(sql_results[0]['season']) == 0 and int(sql_results[1]['season']) != 0:
actual_season = int(sql_results[1]["season"])
actual_episodes = [int(sql_results[1]["episode"])]
sameDaySpecial = True
elif int(sql_results[1]['season']) == 0 and int(sql_results[0]['season']) != 0:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
sameDaySpecial = True
elif len(sql_results) != 1:
logger.log(
u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
logger.WARNING)
addCacheEntry = True
if not addCacheEntry and not sameDaySpecial:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
# add parsed result to cache for usage later on
if addCacheEntry:
logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
# pylint: disable=protected-access
# Access to a protected member of a client class
ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
if ci is not None:
cl.append(ci)
continue
# make sure we want the episode
wantEp = True
for epNo in actual_episodes:
if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
wantEp = False
break
if not wantEp:
logger.log(
u"Ignoring result " + title + " because we don't want an episode that is " +
Quality.qualityStrings[
quality], logger.INFO)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
# make a result object
epObj = []
for curEp in actual_episodes:
epObj.append(showObj.getEpisode(actual_season, curEp))
result = self.getResult(epObj)
result.show = showObj
result.url = url
result.name = title
result.quality = quality
result.release_group = release_group
result.version = version
result.content = None
result.size = self._get_size(item)
if len(epObj) == 1:
epNum = epObj[0].episode
logger.log(u"Single episode result.", logger.DEBUG)
elif len(epObj) > 1:
epNum = MULTI_EP_RESULT
logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
parse_result.episode_numbers), logger.DEBUG)
elif len(epObj) == 0:
epNum = SEASON_RESULT
logger.log(u"Separating full season result to check for later", logger.DEBUG)
if epNum not in results:
results[epNum] = [result]
else:
results[epNum].append(result)
# check if we have items to add to cache
if len(cl) > 0:
# pylint: disable=protected-access
# Access to a protected member of a client class
myDB = self.cache._getDB()
myDB.mass_action(cl)
return results
def findPropers(self, search_date=None):
results = self.cache.listPropers(search_date)
return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
results]
def seedRatio(self):
'''
Provider should override this value if custom seed ratio enabled
It should return the value of the provider seed ratio
'''
return ''
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.NZB
def isActive(self):
return sickbeard.USE_NZBS and self.isEnabled()
def _get_size(self, item):
try:
size = item.get('links')[1].get('length', -1)
except IndexError:
size = -1
if not size:
logger.log(u"Size was not found in your provider response", logger.DEBUG)
return int(size)
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.TORRENT
def isActive(self):
return sickbeard.USE_TORRENTS and self.isEnabled()
def _get_title_and_url(self, item):
from feedparser.util import FeedParserDict
if isinstance(item, (dict, FeedParserDict)):
title = item.get('title', '')
download_url = item.get('url', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
title = item[0]
download_url = item[1]
# Temp global block `DIAMOND` releases
if title.endswith('DIAMOND'):
logger.log(u'Skipping DIAMOND release for mass fake releases.')
title = download_url = u'FAKERELEASE'
if title:
title = self._clean_title_from_provider(title)
if download_url:
download_url = download_url.replace('&', '&')
return (title, download_url)
def _get_size(self, item):
size = -1
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024*1024:
size = -1
return size
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string.encode('utf-8').strip())
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name + ' '
if ep_obj.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', ' ')
elif ep_obj.show.sports:
ep_string += str(ep_obj.airdate).replace('-', ' ') + ('|', ' ')[len(self.proper_strings) > 1] + ep_obj.airdate.strftime('%b')
elif ep_obj.show.anime:
ep_string += "%02d" % int(ep_obj.scene_absolute_number)
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string = ep_string + ' %s' % add_string
search_string['Episode'].append(ep_string.encode('utf-8').strip())
return [search_string]
@staticmethod
def _clean_title_from_provider(title):
return (title or '').replace(' ', '.')
@property
def _custom_trackers(self):
return ('', '&tr=' + '&tr='.join(set([x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()])))[self.public] if sickbeard.TRACKERS_LIST else ''
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST]) + ')'
)
for sqlshow in sqlResults or []:
show = Show.find(sickbeard.showList, int(sqlshow["showid"]))
if show:
curEp = show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
for term in self.proper_strings:
searchString = self._get_episode_search_strings(curEp, add_string=term)
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), show))
return results
|
gpl-3.0
| -7,911,745,144,782,687,000
| 37.944611
| 189
| 0.554757
| false
| 4.353975
| false
| false
| false
|
eclee25/flu-SDI-exploratory-age
|
scripts/create_fluseverity_figs/export_zOR_classif.py
|
1
|
10068
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/18/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
def print_dict_to_file(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s\n" % (key, value[0], value[1]))
def print_dict_to_file2(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,region,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
def print_dict_to_file3(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write('season,state,mn_retro,mn_early\n')
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: nation-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: nation-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
# fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4)
|
mit
| -8,794,785,964,571,561,000
| 58.934524
| 206
| 0.698153
| false
| 2.526474
| false
| false
| false
|
RoboCupULaval/StrategyIA
|
ai/GameDomainObjects/ball.py
|
1
|
1041
|
# Under MIT License, see LICENSE.txt
from typing import Dict
from Util import Position
class Ball:
def __init__(self, position=Position()):
self._position = position
self._velocity = Position()
def update(self, new_dict: Dict):
self.position = new_dict['position']
self.velocity = new_dict['velocity']
def is_moving_fast(self, fast_speed = 600.0): # mm/s
return fast_speed < self.velocity.norm
def is_mobile(self, immobile_speed = 300.0): # mm/s
return immobile_speed < self.velocity.norm
def is_immobile(self):
return not self.is_mobile()
@property
def position(self) -> Position:
return self._position
@position.setter
def position(self, value):
assert isinstance(value, Position)
self._position = value
@property
def velocity(self) -> Position:
return self._velocity
@velocity.setter
def velocity(self, value):
assert isinstance(value, Position)
self._velocity = value
|
mit
| -1,920,990,554,748,314,000
| 23.209302
| 56
| 0.630163
| false
| 4.034884
| false
| false
| false
|
RogerRueegg/lvw-young-talents
|
src/profiles/views.py
|
1
|
2796
|
from __future__ import unicode_literals
from django.views import generic
from django.shortcuts import get_object_or_404, redirect
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from . import forms
from . import models
import datetime
class ShowProfile(LoginRequiredMixin, generic.TemplateView):
template_name = "profiles/show_profile.html"
http_method_names = ['get']
def get(self, request, *args, **kwargs):
slug = self.kwargs.get('slug')
if slug:
profile = get_object_or_404(models.Profile, slug=slug)
user = profile.user
else:
user = self.request.user
if user == self.request.user:
kwargs["editable"] = True
kwargs["show_user"] = user
return super(ShowProfile, self).get(request, *args, **kwargs)
class EditProfile(LoginRequiredMixin, generic.TemplateView):
template_name = "profiles/edit_profile.html"
http_method_names = ['get', 'post']
def get(self, request, *args, **kwargs):
user = self.request.user
if "user_form" not in kwargs:
kwargs["user_form"] = forms.UserForm(instance=user)
if "profile_form" not in kwargs:
kwargs["profile_form"] = forms.ProfileForm(instance=user.profile)
return super(EditProfile, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
user = self.request.user
user_form = forms.UserForm(request.POST, instance=user)
profile_form = forms.ProfileForm(request.POST,
request.FILES,
instance=user.profile)
if not (user_form.is_valid() and profile_form.is_valid()):
message = ""
if profile_form.errors:
if 'phone_number' in profile_form.errors.keys():
message += "Bitte gibt Deine Natelnummer wie folgt ein: +41791234567. "
if 'bdate' in profile_form.errors.keys():
message += "Bitte gibt das Geburtsdatum wie folgt ein: 2002-01-15 für 15. Januar 2002"
messages.error(request, message)
user_form = forms.UserForm(instance=user)
profile_form = forms.ProfileForm(instance=user.profile)
return super(EditProfile, self).get(request,
user_form=user_form,
profile_form=profile_form)
# Both forms are fine. Time to save!
user_form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
messages.success(request, "Profile details saved!")
return redirect("profiles:show_self")
|
mit
| 7,446,359,648,485,783,000
| 41.348485
| 106
| 0.598927
| false
| 4.086257
| false
| false
| false
|
Erotemic/ibeis
|
super_setup.py
|
1
|
26677
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Requirements:
pip install gitpython click ubelt
"""
import re
from os.path import exists
from os.path import join
from os.path import dirname
from os.path import abspath
import ubelt as ub
import functools
class ShellException(Exception):
"""
Raised when shell returns a non-zero error code
"""
class DirtyRepoError(Exception):
"""
If the repo is in an unexpected state, its very easy to break things using
automated scripts. To be safe, we don't do anything. We ensure this by
raising this error.
"""
def parse_version(package):
"""
Statically parse the version number from __init__.py
CommandLine:
python -c "import setup; print(setup.parse_version('ovharn'))"
"""
from os.path import dirname, join
import ast
init_fpath = join(dirname(__file__), package, '__init__.py')
with open(init_fpath) as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if target.id == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version
class GitURL(object):
"""
Represent and transform git urls between protocols defined in [3]_.
The code in GitURL is largely derived from [1]_ and [2]_.
Credit to @coala and @FriendCode.
Note:
while this code aims to suport protocols defined in [3]_, it is only
tested for specific use cases and therefore might need to be improved.
References:
.. [1] https://github.com/coala/git-url-parse
.. [2] https://github.com/FriendCode/giturlparse.py
.. [3] https://git-scm.com/docs/git-clone#URLS
Example:
>>> self = GitURL('git@gitlab.kitware.com:computer-vision/netharn.git')
>>> print(ub.repr2(self.parts()))
>>> print(self.format('ssh'))
>>> print(self.format('https'))
>>> self = GitURL('https://gitlab.kitware.com/computer-vision/netharn.git')
>>> print(ub.repr2(self.parts()))
>>> print(self.format('ssh'))
>>> print(self.format('https'))
"""
SYNTAX_PATTERNS = {
# git allows for a url style syntax
'url': re.compile(r'(?P<transport>\w+://)'
r'((?P<user>\w+[^@]*@))?'
r'(?P<host>[a-z0-9_.-]+)'
r'((?P<port>:[0-9]+))?'
r'/(?P<path>.*\.git)'),
# git allows for ssh style syntax
'ssh': re.compile(r'(?P<user>\w+[^@]*@)'
r'(?P<host>[a-z0-9_.-]+)'
r':(?P<path>.*\.git)'),
}
r"""
Ignore:
# Helper to build the parse pattern regexes
def named(key, regex):
return '(?P<{}>{})'.format(key, regex)
def optional(pat):
return '({})?'.format(pat)
parse_patterns = {}
# Standard url format
transport = named('transport', r'\w+://')
user = named('user', r'\w+[^@]*@')
host = named('host', r'[a-z0-9_.-]+')
port = named('port', r':[0-9]+')
path = named('path', r'.*\.git')
pat = ''.join([transport, optional(user), host, optional(port), '/', path])
parse_patterns['url'] = pat
pat = ''.join([user, host, ':', path])
parse_patterns['ssh'] = pat
print(ub.repr2(parse_patterns))
"""
def __init__(self, url):
self._url = url
self._parts = None
def parts(self):
"""
Parses a GIT URL and returns an info dict.
Returns:
dict: info about the url
Raises:
Exception : if parsing fails
"""
info = {
'syntax': '',
'host': '',
'user': '',
'port': '',
'path': None,
'transport': '',
}
for syntax, regex in self.SYNTAX_PATTERNS.items():
match = regex.search(self._url)
if match:
info['syntax'] = syntax
info.update(match.groupdict())
break
else:
raise Exception('Invalid URL {!r}'.format(self._url))
# change none to empty string
for k, v in info.items():
if v is None:
info[k] = ''
return info
def format(self, protocol):
"""
Change the protocol of the git URL
"""
parts = self.parts()
if protocol == 'ssh':
parts['user'] = 'git@'
url = ''.join([
parts['user'], parts['host'], ':', parts['path']
])
else:
parts['transport'] = protocol + '://'
parts['port'] = ''
parts['user'] = ''
url = ''.join([
parts['transport'], parts['user'], parts['host'],
parts['port'], '/', parts['path']
])
return url
class Repo(ub.NiceRepr):
"""
Abstraction that references a git repository, and is able to manipulate it.
A common use case is to define a `remote` and a `code_dpath`, which lets
you check and ensure that the repo is cloned and on a particular branch.
You can also query its status, and pull, and perform custom git commands.
Args:
*args: name, dpath, code_dpath, remotes, remote, branch
Attributes:
All names listed in args are attributse. In addition, the class also
exposes these derived attributes.
url (URI): where the primary location is
Example:
>>> # Here is a simple example referencing ubelt
>>> from super_setup import *
>>> import ubelt as ub
>>> repo = Repo(
>>> remote='https://github.com/Erotemic/ubelt.git',
>>> code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')),
>>> )
>>> print('repo = {}'.format(repo))
>>> repo.check()
>>> repo.ensure()
>>> repo.check()
>>> repo.status()
>>> repo._cmd('python setup.py build')
>>> repo._cmd('./run_doctests.sh')
repo = <Repo('ubelt')>
>>> # Here is a less simple example referencing ubelt
>>> from super_setup import *
>>> import ubelt as ub
>>> repo = Repo(
>>> name='ubelt-local',
>>> remote='github',
>>> branch='master',
>>> remotes={
>>> 'github': 'https://github.com/Erotemic/ubelt.git',
>>> 'fakemirror': 'https://gitlab.com/Erotemic/ubelt.git',
>>> },
>>> code_dpath=ub.ensuredir(ub.expandpath('~/tmp/demo-repos')),
>>> )
>>> print('repo = {}'.format(repo))
>>> repo.ensure()
>>> repo._cmd('python setup.py build')
>>> repo._cmd('./run_doctests.sh')
"""
def __init__(repo, **kwargs):
repo.name = kwargs.pop('name', None)
repo.dpath = kwargs.pop('dpath', None)
repo.code_dpath = kwargs.pop('code_dpath', None)
repo.remotes = kwargs.pop('remotes', None)
repo.remote = kwargs.pop('remote', None)
repo.branch = kwargs.pop('branch', 'master')
repo._logged_lines = []
repo._logged_cmds = []
if repo.remote is None:
if repo.remotes is None:
raise ValueError('must specify some remote')
else:
if len(repo.remotes) > 1:
raise ValueError('remotes are ambiguous, specify one')
else:
repo.remote = ub.peek(repo.remotes)
else:
if repo.remotes is None:
_default_remote = 'origin'
repo.remotes = {
_default_remote: repo.remote
}
repo.remote = _default_remote
repo.url = repo.remotes[repo.remote]
if repo.name is None:
suffix = repo.url.split('/')[-1]
repo.name = suffix.split('.git')[0]
if repo.dpath is None:
repo.dpath = join(repo.code_dpath, repo.name)
repo.pkg_dpath = join(repo.dpath, repo.name)
for path_attr in ['dpath', 'code_dpath']:
path = getattr(repo, path_attr)
if path is not None:
setattr(repo, path_attr, ub.expandpath(path))
repo.verbose = kwargs.pop('verbose', 3)
if kwargs:
raise ValueError('unknown kwargs = {}'.format(kwargs.keys()))
repo._pygit = None
def set_protocol(self, protocol):
"""
Changes the url protocol to either ssh or https
Args:
protocol (str): can be ssh or https
"""
gurl = GitURL(self.url)
self.url = gurl.format(protocol)
def info(repo, msg):
repo._logged_lines.append(('INFO', 'INFO: ' + msg))
if repo.verbose >= 1:
print(msg)
def debug(repo, msg):
repo._logged_lines.append(('DEBUG', 'DEBUG: ' + msg))
if repo.verbose >= 1:
print(msg)
def _getlogs(repo):
return '\n'.join([t[1] for t in repo._logged_lines])
def __nice__(repo):
return '{}, branch={}'.format(repo.name, repo.branch)
def _cmd(repo, command, cwd=ub.NoParam, verbose=ub.NoParam):
if verbose is ub.NoParam:
verbose = repo.verbose
if cwd is ub.NoParam:
cwd = repo.dpath
repo._logged_cmds.append((command, cwd))
repo.debug('Run {!r} in {!r}'.format(command, cwd))
info = ub.cmd(command, cwd=cwd, verbose=verbose)
if verbose:
if info['out'].strip():
repo.info(info['out'])
if info['err'].strip():
repo.debug(info['err'])
if info['ret'] != 0:
raise ShellException(ub.repr2(info))
return info
@property
# @ub.memoize_property
def pygit(repo):
""" pip install gitpython """
import git as gitpython
if repo._pygit is None:
repo._pygit = gitpython.Repo(repo.dpath)
return repo._pygit
def develop(repo):
devsetup_script_fpath = join(repo.dpath, 'run_developer_setup.sh')
if not exists(devsetup_script_fpath):
raise AssertionError('Assume we always have run_developer_setup.sh: repo={!r}'.format(repo))
repo._cmd(devsetup_script_fpath, cwd=repo.dpath)
def doctest(repo):
devsetup_script_fpath = join(repo.dpath, 'run_doctests.sh')
if not exists(devsetup_script_fpath):
raise AssertionError('Assume we always have run_doctests.sh: repo={!r}'.format(repo))
repo._cmd(devsetup_script_fpath, cwd=repo.dpath)
def clone(repo):
if exists(repo.dpath):
raise ValueError('cannot clone into non-empty directory')
args = '--recursive'
if repo.branch is not None:
args += ' -b {}'.format(repo.branch)
command = 'git clone {args} {url} {dpath}'.format(args=args, url=repo.url, dpath=repo.dpath)
repo._cmd(command, cwd=repo.code_dpath)
def _assert_clean(repo):
if repo.pygit.is_dirty():
raise DirtyRepoError('The repo={} is dirty'.format(repo))
def check(repo):
repo.ensure(dry=True)
def versions(repo):
"""
Print current version information
"""
fmtkw = {}
fmtkw['pkg'] = parse_version(repo.pkg_dpath) + ','
fmtkw['sha1'] = repo._cmd('git rev-parse HEAD', verbose=0)['out'].strip()
try:
fmtkw['tag'] = repo._cmd('git describe --tags', verbose=0)['out'].strip() + ','
except ShellException:
fmtkw['tag'] = '<None>,'
fmtkw['branch'] = repo.pygit.active_branch.name + ','
fmtkw['repo'] = repo.name + ','
repo.info('repo={repo:<14} pkg={pkg:<12} tag={tag:<18} branch={branch:<10} sha1={sha1}'.format(
**fmtkw))
def ensure_clone(repo):
if exists(repo.dpath):
repo.debug('No need to clone existing repo={}'.format(repo))
else:
repo.debug('Clone non-existing repo={}'.format(repo))
repo.clone()
def ensure(repo, dry=False):
"""
Ensure that the repo is checked out on your local machine, that the
correct branch is checked out, and the upstreams are targeting the
correct remotes.
"""
if repo.verbose > 0:
if dry:
repo.debug(ub.color_text('Checking {}'.format(repo), 'blue'))
else:
repo.debug(ub.color_text('Ensuring {}'.format(repo), 'blue'))
if not exists(repo.dpath):
repo.debug('NEED TO CLONE {}'.format(repo))
if dry:
return
repo.ensure_clone()
repo._assert_clean()
# Ensure all registered remotes exist
for remote_name, remote_url in repo.remotes.items():
try:
remote = repo.pygit.remotes[remote_name]
have_urls = list(remote.urls)
if remote_url not in have_urls:
print('WARNING: REMOTE NAME EXIST BUT URL IS NOT {}. '
'INSTEAD GOT: {}'.format(remote_url, have_urls))
except (IndexError):
try:
print('NEED TO ADD REMOTE {}->{} FOR {}'.format(
remote_name, remote_url, repo))
if not dry:
repo._cmd('git remote add {} {}'.format(remote_name, remote_url))
except ShellException:
if remote_name == repo.remote:
# Only error if the main remote is not available
raise
# Ensure we have the right remote
try:
remote = repo.pygit.remotes[repo.remote]
except IndexError:
if not dry:
raise AssertionError('Something went wrong')
else:
remote = None
if remote is not None:
try:
if not remote.exists():
raise IndexError
else:
repo.debug('The requested remote={} name exists'.format(remote))
except IndexError:
repo.debug('WARNING: remote={} does not exist'.format(remote))
else:
if remote.exists():
repo.debug('Requested remote does exists')
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
repo.info('Branch name not found in local remote. Attempting to fetch')
if dry:
repo.info('dry run, not fetching')
else:
repo._cmd('git fetch {}'.format(remote.name))
repo.info('Fetch was successful')
else:
repo.debug('Requested remote does NOT exist')
# Ensure the remote points to the right place
if repo.url not in list(remote.urls):
repo.debug('WARNING: The requested url={} disagrees with remote urls={}'.format(repo.url, list(remote.urls)))
if dry:
repo.info('Dry run, not updating remote url')
else:
repo.info('Updating remote url')
repo._cmd('git remote set-url {} {}'.format(repo.remote, repo.url))
# Ensure we are on the right branch
if repo.branch != repo.pygit.active_branch.name:
repo.debug('NEED TO SET BRANCH TO {} for {}'.format(repo.branch, repo))
try:
repo._cmd('git checkout {}'.format(repo.branch))
except ShellException:
repo.debug('Checkout failed. Branch name might be ambiguous. Trying again')
try:
repo._cmd('git checkout -b {} {}/{}'.format(repo.branch, repo.remote, repo.branch))
except ShellException:
raise Exception('does the branch exist on the remote?')
tracking_branch = repo.pygit.active_branch.tracking_branch()
if tracking_branch is None or tracking_branch.remote_name != repo.remote:
repo.debug('NEED TO SET UPSTREAM FOR FOR {}'.format(repo))
try:
remote = repo.pygit.remotes[repo.remote]
if not remote.exists():
raise IndexError
except IndexError:
repo.debug('WARNING: remote={} does not exist'.format(remote))
else:
if remote.exists():
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
if dry:
repo.info('Branch name not found in local remote. Dry run, use ensure to attempt to fetch')
else:
repo.info('Branch name not found in local remote. Attempting to fetch')
repo._cmd('git fetch {}'.format(repo.remote))
remote_branchnames = [ref.remote_head for ref in remote.refs]
if repo.branch not in remote_branchnames:
raise Exception('Branch name still does not exist')
if not dry:
repo._cmd('git branch --set-upstream-to={remote}/{branch} {branch}'.format(
remote=repo.remote, branch=repo.branch
))
else:
repo.info('Would attempt to set upstream')
# Print some status
repo.debug(' * branch = {} -> {}'.format(
repo.pygit.active_branch.name,
repo.pygit.active_branch.tracking_branch(),
))
def pull(repo):
repo._assert_clean()
repo._cmd('git pull')
def status(repo):
repo._cmd('git status')
def worker(repo, funcname, kwargs):
repo.verbose = 0
func = getattr(repo, funcname)
func(**kwargs)
return repo
class RepoRegistry(ub.NiceRepr):
def __init__(registery, repos):
registery.repos = repos
def __nice__(registery):
return ub.repr2(registery.repos, si=1, nl=1)
def apply(registery, funcname, num_workers=0, **kwargs):
print(ub.color_text('--- APPLY {} ---'.format(funcname), 'white'))
print(' * num_workers = {!r}'.format(num_workers))
if num_workers == 0:
processed_repos = []
for repo in registery.repos:
print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
try:
getattr(repo, funcname)(**kwargs)
except DirtyRepoError:
print(ub.color_text('Ignoring dirty repo={}'.format(repo), 'red'))
processed_repos.append(repo)
else:
from concurrent import futures
# with futures.ThreadPoolExecutor(max_workers=num_workers) as pool:
with futures.ProcessPoolExecutor(max_workers=num_workers) as pool:
tasks = []
for i, repo in enumerate(registery.repos):
future = pool.submit(worker, repo, funcname, kwargs)
future.repo = repo
tasks.append(future)
processed_repos = []
for future in futures.as_completed(tasks):
repo = future.repo
print(ub.color_text('--- REPO = {} ---'.format(repo), 'blue'))
try:
repo = future.result()
except DirtyRepoError:
print(ub.color_text('Ignoring dirty repo={}'.format(repo), 'red'))
else:
print(repo._getlogs())
processed_repos.append(repo)
print(ub.color_text('--- FINISHED APPLY {} ---'.format(funcname), 'white'))
SHOW_CMDLOG = 1
if SHOW_CMDLOG:
print('LOGGED COMMANDS')
import os
ORIG_CWD = MY_CWD = os.getcwd()
for repo in processed_repos:
print('# --- For repo = {!r} --- '.format(repo))
for t in repo._logged_cmds:
cmd, cwd = t
if cwd is None:
cwd = os.get_cwd()
if cwd != MY_CWD:
print('cd ' + ub.shrinkuser(cwd))
MY_CWD = cwd
print(cmd)
print('cd ' + ub.shrinkuser(ORIG_CWD))
def determine_code_dpath():
"""
Returns a good place to put the code for the internal dependencies.
Returns:
PathLike: the directory where you want to store your code
In order, the methods used for determing this are:
* the `--codedpath` command line flag (may be undocumented in the CLI)
* the `--codedir` command line flag (may be undocumented in the CLI)
* the CODE_DPATH environment variable
* the CODE_DIR environment variable
* the directory above this script (e.g. if this is in ~/code/repo/super_setup.py then code dir resolves to ~/code)
* the user's ~/code directory.
"""
import os
candidates = [
ub.argval('--codedir', default=''),
ub.argval('--codedpath', default=''),
os.environ.get('CODE_DPATH', ''),
os.environ.get('CODE_DIR', ''),
]
valid = [c for c in candidates if c != '']
if len(valid) > 0:
code_dpath = valid[0]
else:
try:
# This file should be in the top level of a repo, the directory from
# this file should be the code directory.
this_fpath = abspath(__file__)
code_dpath = abspath(dirname(dirname(this_fpath)))
except NameError:
code_dpath = ub.expandpath('~/code')
if not exists(code_dpath):
code_dpath = ub.expandpath(code_dpath)
# if CODE_DIR and not exists(CODE_DIR):
# import warnings
# warnings.warn('environment variable CODE_DIR={!r} was defined, but does not exist'.format(CODE_DIR))
if not exists(code_dpath):
raise Exception(ub.codeblock(
'''
Please specify a correct code_dir using the CLI or ENV.
code_dpath={!r} does not exist.
'''.format(code_dpath)))
return code_dpath
def make_netharn_registry():
code_dpath = determine_code_dpath()
CommonRepo = functools.partial(Repo, code_dpath=code_dpath)
repos = [
# The util libs
CommonRepo(
name='utool', branch='master', remote='Erotemic',
remotes={'Erotemic': 'git@github.com:Erotemic/utool.git'},
),
CommonRepo(
name='vtool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': 'git@github.com:Erotemic/vtool_ibeis.git'},
),
CommonRepo(
name='dtool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': 'git@github.com:Erotemic/dtool_ibeis.git'},
),
CommonRepo(
name='plottool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': 'git@github.com:Erotemic/plottool_ibeis.git'},
),
CommonRepo(
name='guitool_ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': 'git@github.com:Erotemic/guitool_ibeis.git'},
),
CommonRepo(
name='ibeis', branch='master', remote='Erotemic',
remotes={'Erotemic': 'git@github.com:Erotemic/ibeis.git'},
),
]
registery = RepoRegistry(repos)
return registery
def main():
import click
registery = make_netharn_registry()
only = ub.argval('--only', default=None)
if only is not None:
only = only.split(',')
registery.repos = [repo for repo in registery.repos if repo.name in only]
num_workers = int(ub.argval('--workers', default=8))
if ub.argflag('--serial'):
num_workers = 0
protocol = ub.argval('--protocol', None)
if ub.argflag('--https'):
protocol = 'https'
if ub.argflag('--http'):
protocol = 'http'
if ub.argflag('--ssh'):
protocol = 'ssh'
if protocol is not None:
for repo in registery.repos:
repo.set_protocol(protocol)
default_context_settings = {
'help_option_names': ['-h', '--help'],
'allow_extra_args': True,
'ignore_unknown_options': True}
@click.group(context_settings=default_context_settings)
def cli_group():
pass
@cli_group.add_command
@click.command('pull', context_settings=default_context_settings)
def pull():
registery.apply('pull', num_workers=num_workers)
@cli_group.add_command
@click.command('ensure', context_settings=default_context_settings)
def ensure():
"""
Ensure is the live run of "check".
"""
registery.apply('ensure', num_workers=num_workers)
@cli_group.add_command
@click.command('ensure_clone', context_settings=default_context_settings)
def ensure_clone():
registery.apply('ensure_clone', num_workers=num_workers)
@cli_group.add_command
@click.command('check', context_settings=default_context_settings)
def check():
"""
Check is just a dry run of "ensure".
"""
registery.apply('check', num_workers=num_workers)
@cli_group.add_command
@click.command('status', context_settings=default_context_settings)
def status():
registery.apply('status', num_workers=num_workers)
@cli_group.add_command
@click.command('develop', context_settings=default_context_settings)
def develop():
registery.apply('develop', num_workers=0)
@cli_group.add_command
@click.command('doctest', context_settings=default_context_settings)
def doctest():
registery.apply('doctest')
@cli_group.add_command
@click.command('versions', context_settings=default_context_settings)
def versions():
registery.apply('versions')
cli_group()
if __name__ == '__main__':
main()
|
apache-2.0
| -6,734,773,674,303,073,000
| 34.009186
| 125
| 0.52922
| false
| 4.09722
| false
| false
| false
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/toon/GroupPanel.py
|
1
|
18189
|
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.nametag import NametagGlobals
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from toontown.toon import ToonAvatarPanel
from toontown.toontowngui import TTDialog
class GroupPanel(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('GroupPanel')
def __init__(self, boardingParty):
self.boardingParty = boardingParty
self.leaderId = self.boardingParty.getGroupLeader(localAvatar.doId)
self.elevatorIdList = self.boardingParty.getElevatorIdList()
self.frame = None
self.confirmQuitDialog = None
self.goButton = None
self.destScrollList = None
self.destFrame = None
self.goingToLabel = None
self.destIndexSelected = 0
self.__load()
self.ignore('stickerBookEntered')
self.accept('stickerBookEntered', self.__forceHide)
self.ignore('stickerBookExited')
self.accept('stickerBookExited', self.__forceShow)
return
def cleanup(self):
base.setCellsAvailable(base.leftCells, 1)
self.quitButton.destroy()
self.hideButton.destroy()
self.showButton.destroy()
self.scrollList.destroy()
if self.goButton:
self.goButton.destroy()
self.goButton = None
if self.destScrollList:
self.destScrollList.destroy()
self.destScrollList = None
if self.destFrame:
self.destFrame.destroy()
self.destFrame = None
if self.goingToLabel:
self.goingToLabel.destroy()
self.goingToLabel = None
if self.frame:
self.frame.destroy()
self.frame = None
self.leaveButton = None
self.boardingParty = None
self.ignoreAll()
return
def __load(self):
self.guiBg = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_groupListBg')
self.__defineConstants()
if self.boardingParty.maxSize == 4:
bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop_half')
bgImageZPos = 0.14
frameZPos = -0.121442
quitButtonZPos = -0.019958
else:
bgImage = self.guiBg.find('**/tt_t_gui_brd_memberListTop')
bgImageZPos = 0
frameZPos = 0.0278943
quitButtonZPos = -0.30366
guiButtons = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_status')
self.frame = DirectFrame(parent=base.a2dLeftCenter, relief=None, image=bgImage, image_scale=(0.5, 1, 0.5), image_pos=(0, 0, bgImageZPos), textMayChange=1, pos=(0.32, 0, 0))
self.frameBounds = self.frame.getBounds()
leaveButtonGui = loader.loadModel('phase_3.5/models/gui/tt_m_gui_brd_leaveBtn')
leaveImageList = (leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveDown'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveHover'),
leaveButtonGui.find('**/tt_t_gui_brd_leaveUp'))
self.leaderButtonImage = guiButtons.find('**/tt_t_gui_brd_statusLeader')
self.availableButtonImage = guiButtons.find('**/tt_t_gui_brd_statusOn')
self.battleButtonImage = guiButtons.find('**/tt_t_gui_brd_statusBattle')
if localAvatar.doId == self.leaderId:
quitText = TTLocalizer.QuitBoardingPartyLeader
else:
quitText = TTLocalizer.QuitBoardingPartyNonLeader
self.disabledOrangeColor = Vec4(1, 0.5, 0.25, 0.9)
self.quitButton = DirectButton(parent=self.frame, relief=None, image=leaveImageList, image_scale=0.065, command=self.__handleLeaveButton, text=('',
quitText,
quitText,
''), text_scale=0.06, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0.045, 0.0), text_align=TextNode.ALeft, pos=(0.223, 0, quitButtonZPos), image3_color=self.disabledOrangeColor)
arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_arrow')
hideImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover'))
showImageList = (arrowGui.find('**/tt_t_gui_brd_arrow_up'), arrowGui.find('**/tt_t_gui_brd_arrow_down'), arrowGui.find('**/tt_t_gui_brd_arrow_hover'))
self.hideButton = DirectButton(parent=base.a2dLeftCenter, relief=None, text_pos=(0, 0.15), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), text_shadow=Vec4(1, 1, 1, 1), image=hideImageList, image_scale=(-0.35, 1, 0.5), pos=(0.04, 0, 0.03), scale=1.05, command=self.hide)
self.showButton = DirectButton(parent=base.a2dLeftCenter, relief=None, text=('', TTLocalizer.BoardingGroupShow, TTLocalizer.BoardingGroupShow), text_pos=(0.03, 0), text_scale=0.06, text_align=TextNode.ALeft, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), image=showImageList, image_scale=(0.35, 1, 0.5), pos=(0.04, 0, 0.03), scale=1.05, command=self.show)
self.showButton.hide()
self.frame.show()
self.__makeAvatarNameScrolledList()
if localAvatar.doId == self.leaderId:
self.__makeDestinationScrolledList()
else:
self.__makeDestinationFrame()
self.__makeGoingToLabel()
self.accept('updateGroupStatus', self.__checkGroupStatus)
self.accept('ToonBattleIdUpdate', self.__possibleGroupUpdate)
base.setCellsAvailable([base.leftCells[1], base.leftCells[2]], 0)
if self.boardingParty.isGroupLeader(localAvatar.doId):
base.setCellsAvailable([base.leftCells[0]], 0)
self.__addTestNames(self.boardingParty.maxSize)
self.guiBg.removeNode()
guiButtons.removeNode()
leaveButtonGui.removeNode()
arrowGui.removeNode()
return
def __defineConstants(self):
self.forcedHidden = False
self.textFgcolor = Vec4(0.0, 0.6, 0.2, 1.0)
self.textBgRolloverColor = Vec4(1, 1, 0, 1)
self.textBgDownColor = Vec4(0.5, 0.9, 1, 1)
self.textBgDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
def __handleLeaveButton(self):
messenger.send('wakeup')
if not base.cr.playGame.getPlace().getState() == 'elevator':
self.confirmQuitDialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.QuitBoardingPartyConfirm, command=self.__confirmQuitCallback)
self.confirmQuitDialog.show()
def __confirmQuitCallback(self, value):
if self.confirmQuitDialog:
self.confirmQuitDialog.destroy()
self.confirmQuitDialog = None
if value > 0:
if self.boardingParty:
self.boardingParty.requestLeave()
return
def __handleGoButton(self):
offset = self.destScrollList.getSelectedIndex()
elevatorId = self.elevatorIdList[offset]
self.boardingParty.requestGoToFirstTime(elevatorId)
def __handleCancelGoButton(self):
self.boardingParty.cancelGoToElvatorDest()
def __checkGroupStatus(self):
if not self.boardingParty:
return
self.notify.debug('__checkGroupStatus %s' % self.boardingParty.getGroupMemberList(localAvatar.doId))
myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId)
self.scrollList.removeAndDestroyAllItems(refresh=0)
if myMemberList:
for avId in myMemberList:
avatarButton = self.__getAvatarButton(avId)
if avatarButton:
self.scrollList.addItem(avatarButton, refresh=0)
self.scrollList.refresh()
def __possibleGroupUpdate(self, avId):
self.notify.debug('GroupPanel __possibleGroupUpdate')
if not self.boardingParty:
return
myMemberList = self.boardingParty.getGroupMemberList(localAvatar.doId)
if avId in myMemberList:
self.__checkGroupStatus()
def __makeAvatarNameScrolledList(self):
friendsListGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.scrollList = DirectScrolledList(parent=self.frame, relief=None, incButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'),
friendsListGui.find('**/FndsLst_ScrollDN'),
friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'),
friendsListGui.find('**/FndsLst_ScrollUp')), incButton_pos=(0.0, 0.0, -0.35), incButton_image1_color=Vec4(1.0, 0.9, 0.4, 0), incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), incButton_scale=(1.0, 1.0, -1.0), incButton_relief=None, decButton_image=(friendsListGui.find('**/FndsLst_ScrollUp'),
friendsListGui.find('**/FndsLst_ScrollDN'),
friendsListGui.find('**/FndsLst_ScrollUp_Rllvr'),
friendsListGui.find('**/FndsLst_ScrollUp')), decButton_pos=(0.0, 0.0, 0.1), decButton_image1_color=Vec4(1.0, 1.0, 0.6, 0), decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0), decButton_relief=None, itemFrame_pos=(-0.195, 0.0, 0.185), itemFrame_borderWidth=(0.1, 0.1), numItemsVisible=8, itemFrame_scale=1.0, forceHeight=0.07, items=[], pos=(0, 0, 0.075))
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.235, 0, 0)))
clipNP = self.scrollList.attachNewNode(clipper)
self.scrollList.setClipPlane(clipNP)
friendsListGui.removeNode()
return
def __makeDestinationScrolledList(self):
arrowGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoArrow')
incrementImageList = (arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoDown'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoHover'),
arrowGui.find('**/tt_t_gui_brd_arrowL_gotoUp'))
if self.boardingParty.maxSize == 4:
zPos = -0.177083
else:
zPos = -0.463843
bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_leader')
self.destScrollList = DirectScrolledList(
parent=self.frame,
relief=None,
image=bottomImage,
image_scale=(0.5, 1, 0.5),
incButton_image=incrementImageList,
incButton_pos=(0.217302, 0, 0.07),
incButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5),
incButton_scale=(-0.5, 1, 0.5),
incButton_relief=None,
incButtonCallback=self.__informDestChange,
decButton_image=incrementImageList,
decButton_pos=(-0.217302, 0, 0.07),
decButton_scale=(0.5, 1, 0.5),
decButton_image3_color=Vec4(1.0, 1.0, 0.6, 0.5),
decButton_relief=None,
decButtonCallback=self.__informDestChange,
itemFrame_pos=(0, 0, 0.06),
itemFrame_borderWidth=(0.1, 0.1),
numItemsVisible=1,
itemFrame_scale=TTLocalizer.GPdestScrollList,
forceHeight=0.07,
items=[],
pos=(0, 0, zPos),
scrollSpeed=0.1)
arrowGui.removeNode()
self.__addDestNames()
self.__makeGoButton()
return
def __addDestNames(self):
for i in xrange(len(self.elevatorIdList)):
destName = self.__getDestName(i)
self.destScrollList.addItem(destName, refresh=0)
self.destScrollList.refresh()
def __getDestName(self, offset):
elevatorId = self.elevatorIdList[offset]
elevator = base.cr.doId2do.get(elevatorId)
if elevator:
destName = elevator.getDestName()
return destName
def __makeDestinationFrame(self):
destName = self.__getDestName(self.destIndexSelected)
if self.boardingParty.maxSize == 4:
zPos = -0.12
else:
zPos = -0.404267
bottomImage = self.guiBg.find('**/tt_t_gui_brd_memberListBtm_nonLeader')
self.destFrame = DirectFrame(parent=self.frame, relief=None, image=bottomImage, image_scale=(0.5, 1, 0.5), text=destName, text_align=TextNode.ACenter, text_scale=TTLocalizer.GPdestFrame, pos=(0, 0, zPos))
return
def __makeGoButton(self):
goGui = loader.loadModel('phase_9/models/gui/tt_m_gui_brd_gotoBtn')
self.goImageList = (goGui.find('**/tt_t_gui_brd_gotoUp'),
goGui.find('**/tt_t_gui_brd_gotoDown'),
goGui.find('**/tt_t_gui_brd_gotoHover'),
goGui.find('**/tt_t_gui_brd_gotoUp'))
self.cancelGoImageList = (goGui.find('**/tt_t_gui_brd_cancelGotoUp'),
goGui.find('**/tt_t_gui_brd_cancelGotoDown'),
goGui.find('**/tt_t_gui_brd_cancelGotoHover'),
goGui.find('**/tt_t_gui_brd_cancelGotoUp'))
if self.boardingParty.maxSize == 4:
zPos = -0.028
zPos = -0.0360483
else:
zPos = -0.0353787
self.goButton = DirectButton(parent=self.destScrollList, relief=None, image=self.goImageList, image_scale=(0.48, 1, 0.48), command=self.__handleGoButton, text=('',
TTLocalizer.BoardingGo,
TTLocalizer.BoardingGo,
''), text_scale=TTLocalizer.GPgoButton, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.12), pos=(-0.003, 0, zPos))
goGui.removeNode()
return
def __getAvatarButton(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return None
toonName = toon.getName()
inBattle = 0
buttonImage = self.availableButtonImage
if toon.battleId:
inBattle = 1
buttonImage = self.battleButtonImage
if avId == localAvatar.doId:
self.__forceHide()
else:
if avId == self.leaderId:
buttonImage = self.leaderButtonImage
if avId == localAvatar.doId:
self.__forceShow()
return DirectButton(parent=self.frame, relief=None, image=buttonImage, image_scale=(0.06, 1.0, 0.06), text=toonName, text_align=TextNode.ALeft, text_wordwrap=16, text_scale=0.04, text_pos=(0.05, -0.015), text_fg=self.textFgcolor, text1_bg=self.textBgDownColor, text2_bg=self.textBgRolloverColor, text3_fg=self.textBgDisabledColor, pos=(0, 0, 0.2), command=self.__openToonAvatarPanel, extraArgs=[toon, avId])
def __openToonAvatarPanel(self, avatar, avId):
if avId != localAvatar.doId and avatar:
messenger.send('clickedNametag', [avatar])
def __addTestNames(self, num):
for i in xrange(num):
avatarButton = self.__getAvatarButton(localAvatar.doId)
self.scrollList.addItem(avatarButton, refresh=0)
self.scrollList.refresh()
def __isForcedHidden(self):
if self.forcedHidden and self.frame.isHidden():
return True
else:
return False
def hide(self):
self.frame.hide()
self.hideButton.hide()
self.showButton.show()
def show(self):
self.frame.show()
self.forcedHidden = False
self.showButton.hide()
self.hideButton.show()
def __forceHide(self):
if not self.frame.isHidden():
self.forcedHidden = True
self.hide()
def __forceShow(self):
if self.__isForcedHidden():
self.show()
def __informDestChange(self):
self.boardingParty.informDestChange(self.destScrollList.getSelectedIndex())
def changeDestination(self, offset):
if localAvatar.doId != self.leaderId:
self.destIndexSelected = offset
if self.destFrame:
self.destFrame['text'] = self.__getDestName(self.destIndexSelected)
def scrollToDestination(self, offset):
if localAvatar.doId == self.leaderId:
if self.destScrollList:
self.destIndexSelected = offset
self.destScrollList.scrollTo(offset)
def __makeGoingToLabel(self):
if self.boardingParty.maxSize == 4:
zPos = -0.0466546
else:
zPos = -0.331731
self.goingToLabel = DirectLabel(parent=self.frame, relief=None, text=TTLocalizer.BoardingGoingTo, text_scale=0.045, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), pos=(-0.1966, 0, zPos))
return
def disableQuitButton(self):
if self.quitButton and not self.quitButton.isEmpty():
self.quitButton['state'] = DGG.DISABLED
def enableQuitButton(self):
if self.quitButton and not self.quitButton.isEmpty():
self.quitButton['state'] = DGG.NORMAL
def disableGoButton(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['state'] = DGG.DISABLED
self.goButton['image_color'] = Vec4(1, 1, 1, 0.4)
def enableGoButton(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['state'] = DGG.NORMAL
self.goButton['image_color'] = Vec4(1, 1, 1, 1)
def disableDestinationScrolledList(self):
if self.destScrollList and not self.destScrollList.isEmpty():
self.destScrollList.incButton['state'] = DGG.DISABLED
self.destScrollList.decButton['state'] = DGG.DISABLED
def enableDestinationScrolledList(self):
if self.destScrollList and not self.destScrollList.isEmpty():
self.destScrollList.incButton['state'] = DGG.NORMAL
self.destScrollList.decButton['state'] = DGG.NORMAL
def changeGoToCancel(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['image'] = self.cancelGoImageList
self.goButton['text'] = (TTLocalizer.BoardingCancelGo,
TTLocalizer.BoardingCancelGo,
TTLocalizer.BoardingCancelGo,
'')
self.goButton['command'] = self.__handleCancelGoButton
def changeCancelToGo(self):
if self.goButton and not self.goButton.isEmpty():
self.goButton['image'] = self.goImageList
self.goButton['text'] = ('',
TTLocalizer.BoardingGo,
TTLocalizer.BoardingGo,
'')
self.goButton['command'] = self.__handleGoButton
|
mit
| 558,389,024,408,593,340
| 45.164975
| 415
| 0.630601
| false
| 3.357143
| false
| false
| false
|
endlessm/chromium-browser
|
third_party/chromite/scripts/cros_oobe_autoconfig_unittest.py
|
1
|
6578
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for cros_oobe_autoconfig.py"""
from __future__ import print_function
import json
import os
import pwd
import sys
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import image_lib
from chromite.lib import osutils
from chromite.scripts import cros_oobe_autoconfig
pytestmark = [cros_test_lib.pytestmark_inside_only,
cros_test_lib.pytestmark_skip('https://crbug.com/1000761')]
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
_TEST_DOMAIN = 'test.com'
_TEST_CLI_PARAMETERS = (
'image.bin', '--x-demo-mode', '--x-network-onc', '{}',
'--x-network-auto-connect', '--x-eula-send-statistics',
'--x-eula-auto-accept', '--x-update-skip', '--x-wizard-auto-enroll',
'--enrollment-domain', _TEST_DOMAIN)
_TEST_CONFIG_JSON = {
'demo-mode': True,
'network-onc': '{}',
'network-auto-connect': True,
'eula-send-statistics': True,
'eula-auto-accept': True,
'update-skip': True,
'wizard-auto-enroll': True
}
_IMAGE_SIZE = 4 * 1024 * 1024
_BLOCK_SIZE = 4096
_SECTOR_SIZE = 512
_STATEFUL_SIZE = _IMAGE_SIZE // 2
_STATEFUL_OFFSET = 120 * _SECTOR_SIZE
class SanitizeDomainTests(cros_test_lib.TestCase):
"""Tests for SanitizeDomain()"""
def testASCII(self):
"""Tests that ASCII-only domains are not mangled."""
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain('FoO.cOm'), 'foo.com')
def testUnicodeCase(self):
"""Tests that ASCII-only domains are not mangled."""
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(u'föo.com'),
'xn--fo-fka.com')
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(u'fÖo.com'),
'xn--fo-fka.com')
def testHomographs(self):
"""Tests that a Unicode domain is punycoded."""
# "tеѕt.com" looks like "test.com" but isn't!
self.assertEqual(cros_oobe_autoconfig.SanitizeDomain(
u't\u0435\u0455t.com'), 'xn--tt-nlc2k.com')
class PrepareImageTests(cros_test_lib.MockTempDirTestCase):
"""Tests for PrepareImage()"""
def setUp(self):
"""Create a small test disk image for testing."""
self.image = os.path.join(self.tempdir, 'image.bin')
state = os.path.join(self.tempdir, 'state.bin')
# Allocate space for the disk image and stateful partition.
osutils.AllocateFile(self.image, _IMAGE_SIZE)
osutils.AllocateFile(state, _STATEFUL_SIZE)
commands = (
# Format the stateful image as ext4.
['/sbin/mkfs.ext4', state],
# Create the GPT headers and entry for the stateful partition.
['cgpt', 'create', self.image],
['cgpt', 'boot', '-p', self.image],
['cgpt', 'add', self.image, '-t', 'data',
'-l', str(constants.CROS_PART_STATEFUL),
'-b', str(_STATEFUL_OFFSET // _SECTOR_SIZE),
'-s', str(_STATEFUL_SIZE // _SECTOR_SIZE), '-i', '1'],
# Copy the stateful partition into the GPT image.
['dd', 'if=%s' % state, 'of=%s' % self.image, 'conv=notrunc', 'bs=4K',
'seek=%d' % (_STATEFUL_OFFSET // _BLOCK_SIZE),
'count=%s' % (_STATEFUL_SIZE // _BLOCK_SIZE)],
['sync'])
for cmd in commands:
cros_build_lib.run(cmd, quiet=True)
# Run the preparation script on the image.
cros_oobe_autoconfig.main([self.image] + list(_TEST_CLI_PARAMETERS)[1:])
# Mount the image's stateful partition for inspection.
self.mount_tmp = os.path.join(self.tempdir, 'mount')
osutils.SafeMakedirs(self.mount_tmp)
self.mount_ctx = image_lib.LoopbackPartitions(self.image, self.mount_tmp)
self.mount = os.path.join(self.mount_tmp,
'dir-%s' % constants.CROS_PART_STATEFUL)
self.oobe_autoconf_path = os.path.join(self.mount, 'unencrypted',
'oobe_auto_config')
self.config_path = os.path.join(self.oobe_autoconf_path, 'config.json')
self.domain_path = os.path.join(self.oobe_autoconf_path,
'enrollment_domain')
def testChronosOwned(self):
"""Test that the OOBE autoconfig directory is owned by chronos."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
chronos_uid = pwd.getpwnam('chronos').pw_uid
self.assertExists(self.oobe_autoconf_path)
self.assertEqual(os.stat(self.config_path).st_uid, chronos_uid)
def testConfigContents(self):
"""Test that the config JSON matches the correct data."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
with open(self.config_path) as fp:
data = json.load(fp)
self.assertEqual(data, _TEST_CONFIG_JSON)
def testDomainContents(self):
"""Test that the domain file matches the correct data."""
with self.mount_ctx:
# TODO(mikenichols): Remove unneeded mount call once context
# handling is in place, http://crrev/c/1795578
_ = self.mount_ctx.Mount((constants.CROS_PART_STATEFUL,))[0]
self.assertEqual(osutils.ReadFile(self.domain_path), _TEST_DOMAIN)
class GetConfigContentTests(cros_test_lib.MockTestCase):
"""Tests for GetConfigContent()"""
def testBasic(self):
"""Test that config is generated correctly with all options."""
opts = cros_oobe_autoconfig.ParseArguments(_TEST_CLI_PARAMETERS)
conf = cros_oobe_autoconfig.GetConfigContent(opts)
self.assertEqual(json.loads(conf), _TEST_CONFIG_JSON)
def testUnspecified(self):
"""Test that config is generated correctly with some options missing."""
cli = list(_TEST_CLI_PARAMETERS)
cli.remove('--x-update-skip')
expected = dict(_TEST_CONFIG_JSON)
expected['update-skip'] = False
opts = cros_oobe_autoconfig.ParseArguments(cli)
conf = cros_oobe_autoconfig.GetConfigContent(opts)
self.assertEqual(json.loads(conf), expected)
class MainTests(cros_test_lib.MockTestCase):
"""Tests for main()"""
def setUp(self):
self.PatchObject(cros_oobe_autoconfig, 'PrepareImage')
def testBasic(self):
"""Simple smoke test"""
cros_oobe_autoconfig.main(_TEST_CLI_PARAMETERS)
|
bsd-3-clause
| 1,724,904,975,207,223,000
| 36.352273
| 79
| 0.65896
| false
| 3.291938
| true
| false
| false
|
jonberliner/keras
|
keras/optimizers.py
|
1
|
7022
|
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .utils.theano_utils import shared_zeros, shared_scalar
from six.moves import zip
def clip_norm(g, c, n):
if c > 0:
g = T.switch(T.ge(n, c), g * c / n, g)
return g
def kl_divergence(p, p_hat):
return p_hat - p + p * T.log(p / p_hat)
class Optimizer(object):
def get_updates(self, params, constraints, loss):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = T.grad(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
return grads
def get_config(self):
return {"name": self.__class__.__name__}
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
updates = [(self.iterations, self.iterations + 1.)]
for p, g, c in zip(params, grads, constraints):
m = shared_zeros(p.get_value().shape) # momentum
v = self.momentum * m - lr * g # velocity
updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"momentum": self.momentum,
"decay": self.decay,
"nesterov": self.nesterov}
class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"rho": self.rho,
"epsilon": self.epsilon}
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, c in zip(params, grads, accumulators, constraints):
new_a = a + g ** 2 # update accumulator
updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
updates.append((p, c(new_p))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"epsilon": self.epsilon}
class Adadelta(Optimizer):
'''
Reference: http://arxiv.org/abs/1212.5701
'''
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
updates = []
for p, g, a, d_a, c in zip(params, grads, accumulators, delta_accumulators, constraints):
new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator
updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon)
new_p = p - self.lr * update
updates.append((p, c(new_p))) # apply constraints
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2
updates.append((d_a, new_d_a))
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"rho": self.rho,
"epsilon": self.epsilon}
class Adam(Optimizer):
'''
Reference: http://arxiv.org/abs/1412.6980
Default parameters follow those provided in the original paper
lambda is renamed kappa.
'''
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, kappa=1-1e-8, *args, **kwargs):
self.__dict__.update(kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
updates = [(self.iterations, self.iterations+1.)]
i = self.iterations
beta_1_t = self.beta_1 * (self.kappa**i)
# the update below seems missing from the paper, but is obviously required
beta_2_t = self.beta_2 * (self.kappa**i)
for p, g, c in zip(params, grads, constraints):
m = theano.shared(p.get_value() * 0.) # zero init of moment
v = theano.shared(p.get_value() * 0.) # zero init of velocity
m_t = (beta_1_t * m) + (1 - beta_1_t) * g
v_t = (beta_2_t * v) + (1 - beta_2_t) * (g**2)
m_b_t = m_t / (1 - beta_1_t)
v_b_t = v_t / (1 - beta_2_t)
p_t = p - self.lr * m_b_t / (T.sqrt(v_b_t) + self.epsilon)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, c(p_t))) # apply constraints
return updates
def get_config(self):
return {"name": self.__class__.__name__,
"lr": self.lr,
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"kappa": self.kappa}
# aliases
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
from .utils.generic_utils import get_from_module
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs)
|
mit
| 6,855,293,725,898,082,000
| 32.122642
| 104
| 0.548277
| false
| 3.44047
| true
| false
| false
|
errikos/amtt
|
amtt/exporter/isograph/__init__.py
|
1
|
2915
|
"""Exporter module for Isograph Availability Workbench."""
import logging
import networkx as nx
from itertools import count
from amtt.translator.ir import component_basename
from amtt.exporter import Exporter
from amtt.exporter.isograph.emitter.xml import XmlEmitter
from amtt.exporter.isograph.rbd import Rbd
from amtt.exporter.isograph.failure_models import fm_export
_logger = logging.getLogger(__name__)
class IsographExporter(Exporter):
"""Exporter to export the model to Isograph."""
def __init__(self, translator):
"""Initialize IsographExporter."""
self._translator = translator
self._emitter = XmlEmitter(translator.output_basedir)
@staticmethod
def normalize_block_names(ir_container):
"""Normalize the component (block) names.
Isograph imposes a 40 character limit for the component names.
In case the model uses template components, there is a big chance that
the names will grow very big in length. Therefore, we store the
base name in the description field and assign a unique integer (ID)
as the components name.
"""
g = ir_container.component_graph
if ir_container.uses_templates:
_logger.info('Template usage detected:')
_logger.info(' * Normalizing component names for Isograph')
# Create relabeling mapping.
# Each component name will be replaced with a number (ID).
relabel_mapping = {n: c for n, c in zip(g.nodes_iter(), count(1))}
del relabel_mapping['ROOT'] # We don't want to relabel ROOT
# Relabel and rename components graph
# -- copy=False means "relabel in-place"
nx.relabel_nodes(g, relabel_mapping, copy=False)
for u, v in nx.bfs_edges(g, 'ROOT'):
# -- get a hold of the associated object
vo = g.node[v]['obj']
# -- set base name as description
vo.description = component_basename(vo.name)
# -- set ID number as name
vo.name = v
# Note: No need to relabel or rename failures graph
def export(self):
"""Export the model to Isograph importable format."""
# Normalize block names, if necessary
self.normalize_block_names(self._translator.ir_container)
# Export RBD (blocks, nodes, connections)
self._export_rbd()
# Export failure model definitions
self._export_failure_models()
# Write output file
self._emitter.commit()
def _export_rbd(self):
# Create block diagram from input
rbd = Rbd()
rbd.from_ir_container(self._translator.ir_container)
# Dump reliability block diagram to output
rbd.serialize(self._emitter)
def _export_failure_models(self):
fm_export(self._translator.ir_container, self._emitter)
|
gpl-3.0
| 8,075,975,394,118,118,000
| 39.486111
| 78
| 0.64048
| false
| 4.236919
| false
| false
| false
|
corredD/upy
|
autodeskmaya/mayaHelper.py
|
1
|
118218
|
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/autodeskmaya/mayaHelper.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 23:30:44 2010
@author: Ludovic Autin - ludovic.autin@gmail.com
"""
import sys, os, os.path, struct, math, string
from math import *
#import numpy
from types import StringType, ListType
import maya
from maya import cmds,mel,utils
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import maya.OpenMayaFX as omfx
import pymel.core as pm
#base helper class
from upy import hostHelper
if hostHelper.usenumpy:
import numpy
from numpy import matrix
from upy.hostHelper import Helper
lefthand =[[ 1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]]
from upy.transformation import decompose_matrix
class MayaSynchro:
#period problem
def __init__(self,cb=None, period=0.1):
self.period = period
self.callback = None
self.timeControl = oma.MAnimControl()
if cb is not None :
self.doit = cb
def change_period(self,newP):
self.period = newP
self.remove_callback()
self.set_callback()
def set_callback(self):
self.callback = om.MTimerMessage.addTimerCallback(self.period,self.doit)
def remove_callback(self):
om.MMessage.removeCallback(self.callback)
def doit(self,*args,**kw):#period,time,userData=None):
pass
class mayaHelper(Helper):
"""
The maya helper abstract class
============================
This is the maya helper Object. The helper
give access to the basic function need for create and edit a host 3d object and scene.
"""
SPLINE = "kNurbsCurve"
INSTANCE = "kTransform"
MESH = "kTransform"
POLYGON = "kMesh"#"kTransform"
# MESH = "kMesh"
EMPTY = "kTransform"
BONES="kJoint"
PARTICULE = "kParticle"
SPHERE = "sphere"
CYLINDER = "cylinder"
CUBE = "cube"
IK="kIkHandle"
msutil = om.MScriptUtil()
pb = False
pbinited = False
host = "maya"
def __init__(self,master=None,**kw):
Helper.__init__(self)
self.updateAppli = self.update
self.Cube = self.box
self.Box = self.box
self.Geom = self.newEmpty
#self.getCurrentScene = c4d.documents.GetActiveDocument
self.IndexedPolygons = self.polygons
self.Points = self.PointCloudObject
self.pb = True
self.hext = "ma"
self.timeline_cb={}
self.LIGHT_OPTIONS = {"Area" : maya.cmds.ambientLight,
"Sun" : maya.cmds.directionalLight,
"Spot":maya.cmds.spotLight}
def fit_view3D(self):
pass#
def resetProgressBar(self,max=None):
"""reset the Progress Bar, using value"""
if self.pb :
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar');
maya.cmds.progressBar(gMainProgressBar, edit=True, endProgress=True)
self.pbinited = False
# self.pb = False
# maya.cmds.progressBar(maya.pb, edit=True, maxValue=max,progress=0)
def progressBar(self,progress=None,label=None):
""" update the progress bar status by progress value and label string
@type progress: Int/Float
@param progress: the new progress
@type label: string
@param label: the new message to put in the progress status
"""
if self.pb :
gMainProgressBar = maya.mel.eval('$tmp = $gMainProgressBar');
if not self.pbinited :
cmds.progressBar( gMainProgressBar,
edit=True,
beginProgress=True,
isInterruptable=False,
status=label,
maxValue=100)
# if progress == 1 :
# prev = cmds.progressBar(gMainProgressBar,q=1,progress=1)
# progress = prev/100. + 0.1
# progress*=100.
if label is not None and progress is None :
cmds.progressBar(gMainProgressBar, edit=True, status = label)
elif label is not None and progress is not None:
cmds.progressBar(gMainProgressBar, edit=True, progress=progress*100.,status = label)
elif label is None and progress is not None:
cmds.progressBar(gMainProgressBar, edit=True, progress=progress*100.)
if progress == 1 or progress == 100.:
self.resetProgressBar()
#maxValue = 100
#did not work
#maya.cmds.progressBar(maya.pb, edit=True, progress=progress*100)
# cmds.progressBar(maya.pb, edit=True, step=1)
#maya.cmds.progressBar(maya.pb, edit=True, step=1)
def synchronize(self,cb):
self.timeline_cb[cb] = MayaSynchro(cb=cb,period=0.05)
self.timeline_cb[cb].set_callback()
def unsynchronize(self,cb):
self.timeline_cb[cb].remove_callback()
def update(self,):
#how do I update the redraw
cmds.refresh()
def updateAppli(self,):
#how do I update the redraw
cmds.refresh()
def checkName(self,name):
invalid=[]
if type(name) is None :
print ("None name or not a string",name)
return ""
#sometime the name is a list ie [u'name']
if type(name) is list or type(name) is tuple :
if len(name) == 1 :
name = name[0]
elif len(name) == 2 :
name = name[1]#transform node
else :
name = name[0] #?
if (type(name) is not str and type(name) is not unicode) :
print ("not a string",name,type(name))
return ""
if not len(name):
print ("empty name",name)
for i in range(9):
invalid.append(str(i))
if type(name) is list or type(name) is tuple:
name = name[0]
if type(name) is not str and type(name) is not unicode:
name = name.name()
if len(name) and name[0] in invalid:
name= name[1:]
#also remove some character and replace it by _
name=name.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
return name
def setCurrentSelection(self,obj):
if obj is None :
return
if type (obj) is list or type (obj) is tuple :
for o in obj :
cmds.select(self.getObject(o))
else :
cmds.select(self.getObject(obj))
def getCurrentSelection(self):
slist = om.MSelectionList()
if not slist :
return []
om.MGlobal.getActiveSelectionList(slist)
selection = []
slist.getSelectionStrings(selection)
return selection
def checkPrimitive(self,object):
try :
cmds.polySphere(object,q=1,r=1)
return "sphere"
except :
pass
try :
cmds.sphere(object,q=1,r=1)
return "sphere"
except :
pass
try :
cmds.polyCube(object,q=1,w=1)
return "cube"
except :
pass
try :
cmds.polyCylinder(object,q=1,r=1)
return "cylinder"
except :
pass
return None
def getType(self,object):
#first tryto see if isa primitive
prim = self.checkPrimitive(object)
if prim is not None :
return prim
object = self.getNode(object)
if hasattr(object,"apiTypeStr"):
# print (object.apiTypeStr())
return object.apiTypeStr()
else :
# print (type(object))
return type(object)
# return type(object)
def getMName(self,o):
return o.name()
def setName(self,o,name):
if o is None :
return
cmds.rename( self.checkName(o), name, ignoreShape=False)
def getName(self,o):
if o is None: return ""
if type(o) == str or type(o) == unicode :
name = o.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
elif type(o) == unicode : name = o
elif type(o) is om.MFnMesh:
return o
elif hasattr(o,"name") :
if type(o.name) == str :
return o.name
else : return o.name()
elif type(o) is list or type(o) is tuple:
name=o[0]
else : name=o
return name
def getMObject(self,name):
# Create a selection list, get an MObject of the nodes which name is name
selectionList = om.MSelectionList()
selectionList.add( name ) #should be unic..
node = om.MObject()
selectionList.getDependNode( 0, node )
#//Create a function set, connect to it,
fnDep = om.MFnDependencyNode(node)
#print fnDep.name() #object name
#print fnDep.typeName() #type name ie mesh, transform etc..
return node,fnDep
def getObject(self,name,doit=True):
if type(name) is list or type(name) is tuple :
if len(name) == 1 :
name = name[0]
elif len(name) == 2 :
name = name[1]#transform node
else :
name = name[0] #?
name=self.checkName(name)
if name.find(":") != -1 :
name=name.replace(":","_").replace(" ","_").replace("'","").replace("-","_")
if doit :
name=cmds.ls(name)
if len(name)==0:
return None
if len(name) == 1 :
return name[0]
return name
def checkIsMesh(self,poly):
if type(poly) is str or type(poly) is unicode :
mesh = self.getMShape(poly)#dagPath
else :
#have to a object shape node or dagpath
mesh = poly
try :
meshnode = om.MFnMesh(mesh)
return meshnode
except :
return mesh
def getMesh(self,name):
mesh = None
if type(name) != str:
return name
# path = om.MDagPath()
try :
name = self.checkName(name)
mesh = cmds.ls(name)#NMesh.GetRaw(name)
except:
mesh = None
return mesh
def getMeshFrom(self,obj):
if type(obj) is not str and type(obj) is not unicode:
obj = self.getMName(obj)
return self.getMShape(obj)
def getTransformNode(self,name):
if type(name) is list :
name = name[0]
if type(name) is str or type(name) is unicode :
name = self.checkName(name)
node = self.getNode(name)
else :
node = name
dag = om.MFnDagNode(node)
path = om.MDagPath()
dag.getPath(path)
return path.transform(),path
def getMShape(self,name,):
# print name,type(name)
if type(name) is list :
name = name[0]
if type(name) is str or type(name) is unicode :
name = self.checkName(name)
node = self.getNode(name)
else :
node = name
dag = om.MFnDagNode(node)
path = om.MDagPath()
dag.getPath(path)
# self.msutil.createFromInt(0)
# pInt = self.msutil.asUintPtr()
# path.numberOfShapesDirectlyBelow(pInt)
try :
path.extendToShape()
return path
except :
# if self.msutil.getUint(pInt) == 0 :
node = path.child(0)
return self.getMShape(node)
#problem with primitive
# try :
# path.extendToShape()
# except :
# path = None
# return path
def deleteObject(self,obj):
sc = self.getCurrentScene()
if type(obj) is str or type(obj) is unicode:
obj=self.checkName(obj)
else :
if type(obj) is list or type(obj) is tuple :
for o in obj :
self.deleteObject(o)
else :
obj = obj.name()
try :
#print "del",obj
cmds.delete(obj)
except:
print "problem deleting ", obj
#######Special for maya#######################
def getNode( self,name ):
# print "getNode",type(name)
# if type(name) != str :
# return name
name = self.checkName(name)
selectionList = om.MSelectionList()
selectionList.add( name )
node = om.MObject()
selectionList.getDependNode( 0, node )
return node
def getNodePlug(self, attrName, nodeObject ):
"""
example:
translatePlug = nameToNodePlug( "translateX", perspNode )
print "Plug name: %s" % translatePlug.name()
print "Plug value %g" % translatePlug.asDouble()
"""
depNodeFn = om.MFnDependencyNode( nodeObject )
attrObject = depNodeFn.attribute( attrName )
plug = om.MPlug( nodeObject, attrObject )
return plug
################################################
def newLocator(self,name,location=None,**kw):
name = self.checkName(name)
if name.find(":") != -1 : name=name.replace(":","_")
empty=cmds.spaceLocator( n=name, a=True)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.reParent(empty,parent)
return str(empty)
def newEmpty(self,name,location=None,**kw):
#return self.newLocator(name,location=location, **kw)
name = self.checkName(name)
if name.find(":") != -1 : name=name.replace(":","_")
empty=cmds.group( em=True, n=name)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.reParent(empty,parent)
return str(empty)
def updateMasterInstance(self,master, newobjects,instance=True, **kw):
"""
Update the reference of the passed instance by adding/removing-hiding objects
* overwrited by children class for each host
>>> sph = helper.Sphere("sph1")
>>> instance_sph = helper.newInstance("isph1",sph,location = [10.0,0.0,0.0])
@type instance: string/hostObj
@param instance: name of the instance
@type objects: list hostObject/string
@param objects: the list of object to remove/add to the instance reference
@type add: bool
@param add: if True add the objec else remove
@type hide: bool
@param hide: hide instead of remove
@type kw: dictionary
@param kw: you can add your own keyword, but it should be interpreted by all host
"""
#the instance shoud point to an empy that have shape as child
#what we should do is eitherduplicae or reParent the the new object under this master parent
#or usethe replace command ? use particule ?
#replace the mesh node of the master by the given ones....
#hide and remove every previous children....
chs = self.getChilds(master)
for o in chs :
r=cmds.duplicate(o, renameChildren=True)
print r
cmds.delete(chs)#or move or uninstance ?
if instance :
n=[]
for o in newobjects :
name = self.getName(master)+"Instance"
i1=self.getObject(name+"1")
if i1 is not None :
cmds.delete(i1)
i=self.newInstance(name,o,parent=master)
else :
self.reParent(newobjects,master)
def newMInstance(self,name,object,location=None,
hostmatrice=None,matrice=None,parent=None,**kw):
#first create a MObject?
#only work on Mes
name = self.checkName(name)
fnTrans = om.MFnTransform()
minstance = fnTrans.create()
fnTrans.setName(name)
#now add the child as an instance.
#print fnTrans.name()
#is this will work withany object ?
object=self.getNode(object)#or the shape ?
fnTrans.addChild(object,fnTrans.kNextPos,True)
#print name, object , fnTrans
if matrice is not None and isinstance(matrice,om.MTransformationMatrix):
hostmatrice=matrice
matrice = None
if hostmatrice is not None and not isinstance(hostmatrice,om.MTransformationMatrix):
matrice = hostmatrice
hostmatrice = None
if location is not None :
fnTrans.setTranslation(self.vec2m(location),om.MSpace.kPostTransform)
elif hostmatrice is not None :
fnTrans.set(hostmatrice)
elif matrice is not None :
#first convert
hmatrice = self.matrixp2m(matrice)
fnTrans.set(hmatrice)
if parent is not None:
mparent = self.getNode(parent)
# onode = om.MFnDagNode(mobj)
# print "name",fnTrans.name()
oparent = om.MFnDagNode(mparent)
oparent.addChild(self.getNode(fnTrans.name()),oparent.kNextPos,False)
return fnTrans.name()
def newInstance(self,name,object,location=None,hostmatrice=None,matrice=None,
parent=None,material=None,**kw):
#instance = None#
#instance parent = object
#instance name = name
# return self.newMInstance(name,object,location=location,
# hostmatrice=hostmatrice,matrice=matrice,parent=parent,**kw)
#
name = self.checkName(name)
instance = cmds.instance(object,name=name)
if location != None :
#set the position of instance with location
cmds.move(float(location[0]),float(location[1]),float(location[2]), name,
absolute=True )
if matrice is not None :
if self._usenumpy :
#matrice = numpy.array(matrice)#matrix(matrice)*matrix(lefthand)#numpy.array(matrice)
#transpose only rotation
matrice = numpy.array(matrice).transpose()#we do transpoe hee
#m = matrice.copy()
# m[0,:3]=matrice[0,:3]#thi work with numpy
# m[1,:3]=matrice[1,:3]
# m[2,:3]=matrice[2,:3]
#matrice[:3,:3] = matrice[:3,:3].transpose()
hm = matrice.reshape(16,).tolist()
#shoudl I apply some transformatio first ?
cmds.xform(name, a=True, m=hm,roo="xyz")#a for absolute
else :
self.setTransformation(instance[0],mat=matrice)
#set the instance matrice
#self.setObjectMatrix(self,object,matrice=matrice,hostmatrice=hostmatrice)
if parent is not None:
self.reParent(instance,parent)
if material is not None:
self.assignMaterial(instance,material)
return instance
#alias
setInstance = newInstance
def matrixToParticles(self,name,matrices,vector=[0.,1.,0.],transpose=True,**kw):#edge size ?
#blender user verex normal for rotated the instance
#quad up vector should use the inpu vector
axe=self.rerieveAxis(vector)
#axe="+Y"
quad=numpy.array(self.quad[axe])#*10.0
print ("matrixToParticles",axe,vector,quad)
# f=[0,1,2,3]
v=[]
f=[]
e=[]
n=[]
vi=0
#one mat is
#rot[3][:3] tr
# rot[:3,:3] rot
#create particle system
# obj = self.checkName(obj)
# partO=self.getMShape(obj) #shape..
# fnP = omfx.MFnParticleSystem(partO)
# oriPsType = fnP.renderType()
rot=om.MVectorArray()#fnP.count())
pos=om.MVectorArray()#fnP.count())
tr=[]
#set position and rotation
for i,m in enumerate(matrices):
mat = numpy.array(m)
if transpose :
mat = numpy.array(m).transpose()
# t = m[3][:3]
# rot = m[:3,:3]
scale, shear, euler, translate, perspective=decompose_matrix(mat)
tr.append(translate.tolist())
#need euler angle
# e=self.FromMat(rot).rotation().asEulerRotation()
p = om.MVector( float(translate[0]),float(translate[1]),float(translate[2]) )
pos.append(p)
r = om.MVector( float(euler[0]),float(euler[1]),float(euler[2]) )/(math.pi) *180
rot.append(r)
# fnP.setPerParticleAttribute("rotationPP",rot)
# fnP.setPerParticleAttribute("position",pos)
part,partShape= pm.nParticle(n=name+"_ps",position = tr)
# part,partShape=cmds.particle(n=name+"_ps",p=list(tr))
pm.setAttr('nucleus1.gravity', 0.0)#?
# cmds.setAttr(partShape+'.computeRotation',1)
partShape.computeRotation.set(True)
pm.addAttr(partShape, ln = 'rotationPP', dt = 'vectorArray')
pm.addAttr(partShape, ln = 'rotationPP0', dt = 'vectorArray')
particle_fn = omfx.MFnParticleSystem(partShape.__apimobject__())
particle_fn.setPerParticleAttribute('rotationPP', rot)
particle_fn.setPerParticleAttribute('rotationPP0', rot)
if 'parent' in kw and kw['parent'] is not None:
parent = self.getObject(kw['parent'])
self.reParent(name+"_ps",parent)
return part,partShape
#particleInstancer -addObject
#-object locator1 -cycle None -cycleStep 1 -cycleStepUnits Frames
#-levelOfDetail Geometry -rotationUnits Degrees
#-rotationOrder XYZ -position worldPosition -age age crn_A_clouddsShape;
def instancePolygon(self,name, matrices=None,hmatrices=None, mesh=None,parent=None,
transpose=False,globalT=True,**kw):
hm = False
if hmatrices is not None :
matrices = hmatrices
hm = True
if matrices == None : return None
if mesh == None : return None
instance = []
#print len(matrices)#4,4 mats
if self.instance_dupliFace:
v=[0.,1.,0.]
if "axis" in kw and kw["axis"] is not None:
v=kw["axis"]
print ("axis",v)
o = self.getObject(name+"_pis")
if o is None :
# o,m=self.matrixToVNMesh(name,matrices,vector=v)
particle,partShape=self.matrixToParticles(name,matrices,vector=v,
transpose=transpose,parent=parent)
p_instancer = pm.PyNode(pm.particleInstancer(
partShape, addObject=True, object=pm.ls(mesh),name=name+"_pis",
cycle='None', cycleStep=1, cycleStepUnits='Frames',
levelOfDetail='Geometry', rotationUnits='Degrees',
rotationOrder='XYZ', position='worldPosition', age='age'))
pm.particleInstancer(partShape, name = p_instancer, edit = True, rotation = "rotationPP")
if parent is not None :
self.reParent(name+"_pis",parent)
# cmds.particleInstancer(
# partShape, addObject=True, object=self.getMShape(mesh),
# cycle='None', cycleStep=1, cycleStepUnits='Frames',
# levelOfDetail='Geometry', rotationUnits='Degrees',
# rotationOrder='XYZ', position='worldPosition', age='age')
# cmds.particleInstancer(partShape, name = "p_instancer",
# edit = True, rotation = "rotationPP")
else :
#update
pass
return name+"_pis"
#rotation checkbox->use normal
else :
for i,mat in enumerate(matrices):
inst = self.getObject(name+str(i))
if inst is None :
#Minstance?
if hm :
inst=self.newInstance(name+str(i),mesh,hostmatrice=mat,
parent=parent,globalT=globalT)
else :
inst=self.newInstance(name+str(i),mesh,matrice=mat,
parent=parent,globalT=globalT)
instance.append(inst)
return instance
def resetTransformation(self,name):
m= [1.,0.,0.,0.,
0.,1.,0.,0.,
0.,0.,1.,0.,
0.,0.,0.,0.]
cmds.xform(name, a=True, m=m)
def setObjectMatrix(self,object,matrice,hostmatrice=None,**kw):
"""
set a matrix to an hostObject
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
#have to manipulate the DAG/upper transform node...
#let just take the owner Transofrm node of the shape
#we should be able to setAttr either 'matrix' or 'worldMatrix'
object = self.getObject(object)
if hostmatrice !=None :
#set the instance matrice
matrice=hostmatrice
if matrice != None:
#convert the matrice in host format
#set the instance matrice
pass
transpose = True
if "transpose" in kw :
transpose = kw["transpose"]
if matrice is not None :
if self._usenumpy :
#matrice = numpy.array(matrice)#matrix(matrice)*matrix(lefthand)#numpy.array(matrice)
#transpose only rotation
matrice = numpy.array(matrice)
if transpose :
matrice=matrice.transpose()#we do transpoe hee
#m = matrice.copy()
# m[0,:3]=matrice[0,:3]#thi work with numpy
# m[1,:3]=matrice[1,:3]
# m[2,:3]=matrice[2,:3]
#matrice[:3,:3] = matrice[:3,:3].transpose()
hm = matrice.reshape(16,).tolist()
#shoudl I apply some transformatio first ?
cmds.xform(object, a=True, m=hm,roo="xyz")#a for absolute
else :
self.setTransformation(object,mat=matrice)
def concatObjectMatrix(self,object,matrice,hostmatrice=None):
"""
apply a matrix to an hostObject
@type object: hostObject
@param object: the object who receive the transformation
@type hostmatrice: list/Matrix
@param hostmatrice: transformation matrix in host format
@type matrice: list/Matrix
@param matrice: transformation matrix in epmv/numpy format
"""
#get current transformation
if hostmatrice !=None :
#compute the new matrix: matrice*current
#set the new matrice
pass
if matrice != None:
#convert the matrice in host format
#compute the new matrix: matrice*current
#set the new matrice
pass
def addObjectToScene(self,doc,obj,parent=None,**kw):
#its just namely put the object under a parent
#return
if obj == None : return
if parent is not None :
if type(obj) is list or type(obj) is tuple :
if len(obj) == 1 :
obj = obj[0]
elif len(obj) == 2 :
obj = obj[1]#transform node
else :
obj = obj[0] #?
obj=self.checkName(obj)
parent=self.checkName(parent)
#print obj,parent
# cmds.parent( obj, parent)
self.parent(obj, parent)
def parent(self,obj,parent,instance=False):
if type(parent) == unicode :
parent = str(parent)
if type(parent) != str :
print ("parent is not String ",type(parent))
return
# print ("parenting ", obj,parent, instance )
mobj = self.getNode(obj)
mparent = self.getNode(parent)
# onode = om.MFnDagNode(mobj)
oparent = om.MFnDagNode(mparent)
# print ("parenting dag node", obj,parent, mobj,oparent.kNextPos,instance )
oparent.addChild(mobj,oparent.kNextPos,instance)
def reParent(self,obj,parent,instance=False):
if parent == None :
print ("parent is None")
return
if type(obj) is not list and type(obj) is not tuple :
obj = [obj,]
try :
[self.parent(o,parent,instance=instance) for o in obj]
except :
print ("failure")
def getChilds(self,obj):
if type(obj) is str or type(obj) is unicode:
o = self.checkName(obj)
else :
o = self.getName(obj)
childs= cmds.listRelatives(o, c=True)
if childs is None :
return []
else :
return childs
def addCameraToScene(self,name,Type='persp',focal=30.0,center=[0.,0.,0.],sc=None):
# Create a camera and get the shape name.
cameraName = cmds.camera(n=name)
cameraShape = cameraName[1]
# Set the focal length of the camera.
cmds.camera(cameraShape, e=True, fl=focal)
#change the location
cmds.move(float(center[0]),float(center[1]),float(center[2]), cameraName[0], absolute=True )
#should I rotate it
cmds.rotate( 0, '0', '360deg',cameraName[0] )
# Change the film fit type.
#cmds.camera( cameraShape, e=True, ff='overscan' )
return cameraName
def addLampToScene(self,name,Type='Area',rgb=[1.,1.,1.],dist=25.0,energy=1.0,
soft=1.0,shadow=False,center=[0.,0.,0.],sc=None,**kw):
#print Type
#each type have a different cmds
lcmd = self.LIGHT_OPTIONS[Type]
light = lcmd(n=name)
# light = cmds.pointLight(n=name)
#cmds.pointLight(light,e=1,i=energy,rgb=rgb,ss=soft,drs=dist)
lcmd(light,e=1,i=energy)
lcmd(light,e=1,ss=soft)
# cmds.pointLight(light,e=1,drs=dist)
lcmd(light,e=1,rgb=rgb)
cmds.move(float(center[0]),float(center[1]),float(center[2]), light, absolute=True )
return light
def toggleDisplay(self,ob,display,**kw):
# ob = self.getObject(ob)
# if ob is None :
# return
# ob=self.checkName(ob)
# if display :
# cmds.showHidden(ob)
# else :
# cmds.hide(ob)
if ob is None :
return
node = self.getNode(self.checkName(ob))
if node is None :
return
attrDis = self.getNodePlug("visibility",node)
attrDis.setBool(bool(display))
# def toggleXray(self,object,xray):
# o = self.getObject(object)
# cmds.select(o)
# cmds.displySurface(xRay = True)
def getVisibility(self,obj,editor=True, render=False, active=False):
#0 off, 1#on, 2 undef
node = self.getNode(self.checkName(obj))
attrDis = self.getNodePlug("visibility",node)
if editor and not render and not active:
return attrDis.asBool()
elif not editor and render and not active:
return attrDis.asBool()
elif not editor and not render and active:
return attrDis.asBool()
else :
return attrDis.get(),attrDis.get(),attrDis.get()
def getTranslation(self,name,absolue=True):
name = self.checkName(name)
return self.FromVec(cmds.xform(name,q=1,ws=int(absolue),t=1))
def getTranslationOM(self,name):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
return fnTrans.getTranslation(om.MSpace.kWorld)#kPostTransform)
def setTranslation(self,name,pos):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
newT = self.vec2m(pos)
fnTrans.setTranslation(newT,om.MSpace.kPostTransform)
def translateObj(self,obj,position,use_parent=False):
#is om would be faster ?
if len(position) == 1 : c = position[0]
else : c = position
#print "upadteObj"
newPos=c#c=c4dv(c)
o=self.getObject(obj)
if use_parent :
parentPos = self.getPosUntilRoot(obj)#parent.get_pos()
c = newPos - parentPos
cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
else :
cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
def scaleObj(self,obj,sc):
obj = self.checkName(obj)
if type(sc) is float :
sc = [sc,sc,sc]
cmds.scale(float(sc[0]),float(sc[1]),float(sc[2]), obj,absolute=True )
def getScale(self,name,absolue=True,**kw):
node = self.getNode(name)
fnTrans = om.MFnTransform(node,)
# First create an array and a pointer to it
scaleDoubleArray = om.MScriptUtil()
scaleDoubleArray.createFromList( [0.0, 0.0, 0.0], 3 )
scaleDoubleArrayPtr = scaleDoubleArray.asDoublePtr()
# Now get the scale
fnTrans.getScale( scaleDoubleArrayPtr )
# Each of these is a decimal number reading from the pointer's reference
x_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 0 )
y_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 1 )
z_scale = om.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 2 )
return [x_scale,y_scale,z_scale]#kPostTransform) or om.MVector(v[0], v[1], v[2])?
def getSize(self,obj):
#take degree
obj = self.checkName(obj)
meshnode = self.getMShape(obj)
try :
mesh = om.MFnMesh(meshnode)
except :
return [1,1,1]
obj = self.getMName(mesh)
x=cmds.getAttr(obj+'.width')
y=cmds.getAttr(obj+'.height')
z=cmds.getAttr(obj+'.depth')
return [x,y,z]
def rotateObj(self,obj,rot):
#take degree
obj = self.checkName(obj)
cmds.setAttr(obj+'.rx',degrees(float(rot[0])))
cmds.setAttr(obj+'.ry',degrees(float(rot[1])))
cmds.setAttr(obj+'.rz',degrees(float(rot[2])))
def getTransformation(self,name):
node = self.getNode(name)
fnTrans = om.MFnTransform(node)
mmat = fnTrans.transformation()
#maya matrix
return mmat
def setTransformation(self,name,mat=None,rot=None,scale=None,trans=None,order="str",**kw):
node = self.getNode(name)
fnTrans = om.MFnTransform(node)
if mat is not None :
if isinstance(mat,om.MTransformationMatrix):
fnTrans.set(mat)
else :
fnTrans.set(self.matrixp2m(mat))
if trans is not None :
fnTrans.setTranslation(self.vec2m(trans),om.MSpace.kPostTransform)
if rot is not None :
rotation = om.MEulerRotation (rot[0], rot[1], rot[2])
fnTrans.setRotation(rotation)
if scale is not None :
fnTrans.setScale(self.arr2marr(scale))
def ObjectsSelection(self,listeObjects,typeSel="new"):
"""
Modify the current object selection.
@type listeObjects: list
@param listeObjects: list of object to joins
@type typeSel: string
@param listeObjects: type of modification: new,add,...
"""
dic={"add":True,"new":False}
sc = self.getCurrentScene()
for obj in listeObjects:
cmds.select(self.getObject(obj),add=dic[typeSel])
#Put here the code to add/set an object to the current slection
#[sc.SetSelection(x,dic[typeSel]) for x in listeObjects]
def JoinsObjects(self,listeObjects):
"""
Merge the given liste of object in one unique geometry.
@type listeObjects: list
@param listeObjects: list of object to joins
"""
sc = self.getCurrentScene()
#put here the code to add the liste of object to the selection
cmds.select(self.getObject(listeObjects[0]))
for i in range(1,len(listeObjects)):
cmds.select(listeObjects[i],add=True)
cmds.polyUnite()
#no need to joins? but maybe better
#then call the command/function that joins the object selected
# c4d.CallCommand(CONNECT)
#need face indice
def color_mesh_perVertex(self,mesh,colors,faces=None,perVertex=True,
facesSelection=None,faceMaterial=False):
if colors[0] is not list and len(colors) == 3 :
colors = [colors,]
if not isinstance(mesh,maya.OpenMaya.MFnMesh):
if self.getType(mesh) != self.POLYGON and self.getType(mesh) != self.MESH:
return False
mcolors=om.MColorArray()
iv=om.MIntArray()
meshnode = mesh
# print mesh
if type(mesh) is str or type(mesh) is unicode :
meshnode = self.getMShape(mesh)
try :
mesh = om.MFnMesh(meshnode)
except:
return False
mesh.findPlug('displayColors').setBool(True)
if not isinstance(mesh,maya.OpenMaya.MFnMesh):
return
nv=mesh.numVertices()
nf=mesh.numPolygons()
mfaces = self.getMeshFaces(meshnode)
if facesSelection is not None :
if type(facesSelection) is bool :
fsel,face_sel_indice = self.getMeshFaces(mesh,selected=True)
else :
face_sel_indice = facesSelection
fsel=[]
for i in face_sel_indice:
fsel.append(mfaces[i])
vsel=[]
for f in fsel:
for v in f:
if v not in vsel:
vsel.append(v)
mfaces = fsel
nf = len(fsel)
nv = len(vsel)
# print "selected ",face_sel_indice
#check if its ok
if len(colors) == nv:
perVertex = True
elif len(colors) == nf:
perVertex = False
if perVertex:
N=range(nv)
else :
N=range(nf)
if facesSelection is not None :
N = face_sel_indice
perVertex = False
for k,i in enumerate(N) :
if len(colors) == 1 : ncolor = colors[0]
else :
if k >= len(colors) :
ncolor = [0.,0.,0.] #problem
else :
ncolor = colors[i]
#print ncolor
#if max(ncolor) < 1 : ncolor = map( lambda x: x*255, ncolor)
col=om.MColor(float(ncolor[0]),float(ncolor[1]),float(ncolor[2]))
#print ncolor
mcolors.append(col)
iv.append(int(i))
# print "i",i,ncolor
#mesh.setVertexColor(col,int(i))
if perVertex:
mesh.setVertexColors(mcolors,iv)
else :
# print iv#should be the fdace index
mesh.setFaceColors(mcolors,iv)
return True
###################MATERIAL CODE FROM Rodrigo Araujo#####################################################################################
#see http://linil.wordpress.com/2008/01/31/python-maya-part-2/
def createMaterial(self, name, color, type ):
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
shaderSG = cmds.sets(renderable=True, noSurfaceShader=True, empty=True,
name=name+"SG" )
#material
cmds.shadingNode( type, asShader=True, name=name )
#phong ?
#cmds.setAttr((shader+ '.reflectivity'), 0)# no rayTrace
#cmds.setAttr((shader+ '.cosinePower'), 3)
cmds.setAttr( name+".color", color[0], color[1], color[2],
type="double3")
cmds.connectAttr(name+".outColor", shaderSG+".surfaceShader")
def createTexturedMaterial(self,name,filename):
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
shaderSG = cmds.sets(renderable=True, noSurfaceShader=True, empty=True,
name=name+"SG" )
#material
cmds.shadingNode("lambert", asShader=True, name=name )
cmds.connectAttr(name+".outColor", shaderSG+".surfaceShader")
#create the texture and connect it
texture = cmds.shadingNode('file', asTexture=True,name=name+"Texture")
cmds.connectAttr(name+"Texture"+'.outColor', name+".color")
cmds.setAttr(name+"Texture"+'.fileTextureName', filename, type='string')
return name
def create_mMayaMaterials(self):
existingSGs = cmds.ls(type = 'shadingEngine')
shaderHits = 0;
shaderSG, shaderSGAmbOcc, ambOcc, ramp = '', '', '', ''
for existingSG in existingSGs:
if mel.eval('attributeExists mMaya_atomShaderSG ' +existingSG):
shaderSG = existingSG
shaderHits += 1
if mel.eval('attributeExists mMaya_atomShaderSGAmbOcc ' +existingSG):
shaderSGAmbOcc = existingSG
shaderHits += 1
existingAmbOccs = cmds.ls(type = 'mib_amb_occlusion')
for existingAmbOcc in existingAmbOccs:
if mel.eval('attributeExists mMaya_atomShaderAmbOcc ' +existingAmbOcc):
ambOcc = existingAmbOcc
shaderHits += 1
existingRamps = cmds.ls(type = 'ramp')
for existingRamp in existingRamps:
if mel.eval('attributeExists mMaya_atomShaderRGBRamp ' +existingRamp):
ramp = existingRamp
shaderHits += 1
if shaderHits == 4:
return shaderSG, shaderSGAmbOcc, ambOcc, ramp
elif shaderHits == 0:
shader = cmds.shadingNode('phong', asShader = 1, name = ("atomShader"))
cmds.setAttr((shader+ '.reflectivity'), 0)# no rayTrace
cmds.setAttr((shader+ '.cosinePower'), 3)
shaderSG = cmds.sets(renderable = 1, noSurfaceShader = 1, empty = 1)
cmds.addAttr(shaderSG, ln = 'mMaya_atomShaderSG', at = 'bool', h = 1)
cmds.connectAttr((shader+ '.outColor'), (shaderSG+ '.surfaceShader'))
shaderAmbOcc = cmds.shadingNode('phong', asShader = 1, name = ("atomShaderAmbOcc"))
cmds.setAttr((shaderAmbOcc+ '.reflectivity'), 0)
cmds.setAttr((shaderAmbOcc+ '.cosinePower'), 3)
cmds.setAttr((shaderAmbOcc+ '.ambientColor'), 0.7, 0.7, 0.7)
cmds.setAttr((shaderAmbOcc+ '.diffuse'), 0.2)
ambOcc = cmds.createNode('mib_amb_occlusion')
cmds.addAttr(ambOcc, ln = 'mMaya_atomShaderAmbOcc', at = 'bool', h = 1)
cmds.connectAttr((ambOcc+ '.outValue'), (shaderAmbOcc+ '.color'))
cmds.connectAttr((shaderAmbOcc+ '.color'), (shaderAmbOcc+ '.specularColor'))
partySampler = cmds.createNode('particleSamplerInfo')
cmds.connectAttr((partySampler+ '.outTransparency'), (shader+ '.transparency'))
cmds.connectAttr((partySampler+ '.outIncandescence'), (shader+ '.incandescence'))
cmds.connectAttr((partySampler+ '.outColor'), (shader+ '.color'))
cmds.connectAttr((partySampler+ '.outTransparency'), (shaderAmbOcc+ '.transparency'))
cmds.connectAttr((partySampler+ '.outIncandescence'), (shaderAmbOcc+ '.incandescence'))
cmds.connectAttr((partySampler+ '.outColor'), (ambOcc+ '.bright'))
shaderSGAmbOcc = cmds.sets(renderable = 1, noSurfaceShader = 1, empty = 1)
cmds.addAttr(shaderSGAmbOcc, ln = 'mMaya_atomShaderSGAmbOcc', at = 'bool', h = 1)
cmds.connectAttr((shaderAmbOcc+ '.outColor'), (shaderSGAmbOcc+ '.surfaceShader'))
ramp = cmds.createNode('ramp')
cmds.setAttr((ramp + '.interpolation'), 0)
cmds.addAttr(ramp, ln = 'mMaya_atomShaderRGBRamp', at = 'bool', h = 1)
valChangePMA = cmds.createNode('plusMinusAverage')
cmds.addAttr(valChangePMA, ln = 'mMaya_atomShaderRGBRampPMA', at = 'bool', h = 1)
cmds.connectAttr((ramp+ '.mMaya_atomShaderRGBRamp'), (valChangePMA+ '.mMaya_atomShaderRGBRampPMA'))
indexDivFactor = 1000.0;
for elem in elems:
indexElem = vanRad_CPK[elem][4]
col = vanRad_CPK[elem][1:-1]
cmds.setAttr((ramp + '.colorEntryList[' +str(indexElem)+ '].position'), (indexElem/indexDivFactor))
#cmds.setAttr((ramp + '.colorEntryList[' +str(indexElem)+ '].color'), col[0], col[1], col[2], type = 'double3')
shade = cmds.shadingNode('surfaceShader', asTexture = 1)
cmds.setAttr((shade + '.outColor'), col[0], col[1], col[2], type = 'double3')
cmds.connectAttr((shade+ '.outColor'), (ramp+ '.colorEntryList[' +str(indexElem)+ '].color'))
cmds.connectAttr((shade+ '.outColor'), (valChangePMA+ '.input3D[' +str(indexElem)+ ']'))
cmds.rename(shade, elems[elem])
return shaderSG, shaderSGAmbOcc, ambOcc, ramp
else:
mel.eval('error "a mMaya default shader has been deleted"')
def addMaterial(self, name, color ):
if color is None :
color = (1.,0.,0.)
name = self.checkName(name)
mat=cmds.ls(name, mat=True)
if len(mat)==0: #create only if mat didnt exist already
#shading group
cmds.sets( renderable=True, noSurfaceShader=True, empty=True, name=name+"SG" )
#material
# = name[1:]
cmds.shadingNode( 'lambert', asShader=True, name=name )
cmds.setAttr( name+".color", color[0], color[1], color[2], type="double3")
cmds.connectAttr(name+".outColor", name+"SG.surfaceShader")
mat = cmds.ls(name, mat=True)
return mat
def assignMaterial(self,object,matname,texture = True,**kw):
object = self.getObject(object,doit=True)
#print "assign " , matname
#print matname
if type(matname) != list :
# name = name.replace(":","_")
matname = self.checkName(matname)
mat=cmds.ls(matname, mat=True)
else :
if type(matname[0]) is list :
mat = matname[0]
matname = str(matname[0][0])
else :
mat = matname
matname = str(matname[0])
#print "find " ,mat
matname = self.checkName(matname)
# if not mat:
# self.createMaterial (matname, (1.,1.,1.), 'lambert')
# conn = cmds.listConnections(cmds.listHistory(object))
## if len(conn) >= 2:
# shade = cmds.listHistory(object)[0].split('|')[1]
# cmds.hyperShade( matname,o=shade,assign=True )
#print 'assign ',object,matname
# print mat,matname
try :
cmds.sets(object, edit=True, forceElement=matname+"SG")
except :
print "problem assigning mat" + matname + " to object "+object
def assignNewMaterial(self, matname, color, type, object):
print matname, color, type, object
self.createMaterial (matname, color, type)
self.assignMaterial (object,matname)
def colorMaterial(self,matname, color):
matname=self.getMaterial(matname)
if len(matname)==1:
matname=matname[0]
cmds.setAttr( str(matname)+".color", color[0], color[1], color[2], type="double3")
def getMaterial(self,matname):
if type(matname) != str :
return matname
matname = self.checkName(matname)
mat=cmds.ls(matname, mat=True)
if len(mat)==0:
return None
else :
return mat
def getMaterialName(self,mat):
return str(mat)
def getAllMaterials(self):
#return unicode list of material
#mat=getMaterials()
matlist=cmds.ls(mat=True)#[]
return matlist
def getMaterialObject(self,obj):
obj = self.getObject(obj)
matnames = cmds.listConnections(cmds.listHistory(obj,f=1),type='lambert')
return matnames
def changeObjColorMat(self,obj,color):
#obj should be the object name, in case of mesh
#in case of spher/cylinder etc...atom name give the mat name
#thus matname should be 'mat_'+obj
obj = self.checkName(obj)
matname = "mat_"+str(obj)
self.colorMaterial(matname,color)
def changeColor(self,mesh,colors,perVertex=True,perObjectmat=None,pb=False,
facesSelection=None,faceMaterial=False):
#if hasattr(geom,'obj'):obj=geom.obj
#else : obj=geom
#mesh = self.getMesh(mesh)
if colors[0] is not list and len(colors) == 3 :
colors = [colors,]
print "change color",type(mesh),mesh
res = self.color_mesh_perVertex(mesh,colors,perVertex=perVertex,
facesSelection=facesSelection,
faceMaterial=faceMaterial)
if not res or len(colors) == 1:
#simply apply the color/material to mesh
#get object material, if none create one
# print "material assign"
mats = self.getMaterialObject(mesh)
# print mats
if not mats :
self.assignNewMaterial("mat"+self.getName(mesh), colors[0],
'lambert', mesh)
else :
self.colorMaterial(mats[0],colors[0])
def getMaterialProperty(self,material, **kw):
"""
Change a material properties.
* overwrited by children class for each host
@type material: string/Material
@param material: the material to modify
- color
- specular
- ...
"""
mat =self.getMaterial(material)
if len(mat)==1:
mat=mat[0]
res = {}
if mat is None :
return
if "specular" in kw :
res["specular"] = True#mat[c4d.MATERIAL_USE_SPECULAR]
if "specular_color" in kw :
res["specular_color"] = [0,0,0]#self.ToVec(mat[c4d.MATERIAL_SPECULAR_COLOR],pos=False)
if "specular_width" in kw :
res["specular_width"] = 0#mat[c4d.MATERIAL_SPECULAR_WIDTH]
if "color" in kw :
res["color"] = cmds.getAttr( str(mat)+".color")[0]
if "diffuse" in kw :
res["diffuse"] = cmds.getAttr( str(mat)+".diffuse")[0]
return res
###################Meshs and Objects#####################################################################################
def Sphere(self,name,res=16.,radius=1.0,pos=None,color=None,
mat=None,parent=None,type="nurb"):
# iMe[atn],node=cmds.sphere(name=name+"Atom_"+atn,r=rad)
name = self.checkName(name)
t=res/100.
if type == "nurb" :
transform_node,shape = cmds.sphere(name=name,r=radius,sections=int(res),
spans=int(res)) #NurbSphere
elif type == "poly":
transform_node,shape = cmds.polySphere( n=name, r=radius,sx=int(res), sy=int(res))
#shape is name+"Shape"
if pos is not None :
cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),
transform_node,absolute=True )
if mat is not None :
mat = self.getMaterial(mat)
if mat is not None :
self.assignMaterial(transform_node,mat)
else :
if color is not None :
mat = self.addMaterial("mat"+name,color)
else :
mat = self.addMaterial("mat"+name,[1.,1.,0.])
# mat = self.getMaterial(name)
self.assignMaterial(transform_node,mat)
if parent is not None :
self.reParent(transform_node,parent)
return transform_node,shape
def updateSphereMesh(self,mesh,verts=None,faces=None,basemesh=None,
scale=None,typ=True,**kw):
#scale or directly the radius..Try the radius
#scale is actualy the radius
# name = self.getObject(mesh)
#would it be faster with openMaya
mesh = self.checkName(mesh)
if typ:
cmds.sphere(mesh,e=1,r=scale)
else :
cmds.polySphere(mesh,e=1,r=scale)
def updateSphereObj(self,obj,coords=None):
if obj is None or coords is None: return
obj = self.getObject(obj)
#would it be faster we transform action
self.setTranslation(obj,coords)
# cmds.move(float(coords[0]),float(coords[1]),float(coords[2]), obj, absolute=True )
# def updateSphereObjs(self,g,coords=None):
# if not hasattr(g,'obj') : return
# if coords == None :
# newcoords=g.getVertices()
# else :
# newcoords=coords
# #print "upadteObjSpheres"
# #again map function ?
# for i,nameo in enumerate(g.obj):
# c=newcoords[i]
# o=getObject(nameo)
# cmds.move(float(c[0]),float(c[1]),float(c[2]), o, absolute=True )
def instancesCylinder(self,name,points,faces,radii,
mesh,colors,scene,parent=None):
cyls=[]
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
cyl = self.oneCylinder(name+str(i),points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
instance=mesh,material=mat,parent = parent)
cyls.append(cyl)
return cyls
def updateInstancesCylinder(self,name,cyls,points,faces,radii,
mesh,colors,scene,parent=None,delete = True):
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
col=None
if i < len(colors):
col = colors[i]
if i < len(cyls):
self.updateOneCylinder(cyls[i],points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
material=mat,color=col)
self.toggleDisplay(cyls[i],True)
else :
cyl = self.oneCylinder(name+str(i),points[faces[i][0]],
points[faces[i][1]],radius=radii[i],
instance=mesh,material=mat,parent = parent)
cyls.append(cyl)
if len(faces) < len(cyls) :
#delete the other ones ?
for i in range(len(faces),len(cyls)):
if delete :
obj = cyls.pop(i)
self.deleteObject(obj)
else :
self.toggleDisplay(cyls[i],False)
return cyls
def instancesSphere(self,name,centers,radii,meshsphere,colors,scene,parent=None):
name = self.checkName(name)
sphs=[]
mat = None
if len(colors) == 1:
print (colors)
mat = self.retrieveColorMat(colors[0])
if mat == None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(centers)):
sphs.append(cmds.instance(meshsphere,name=name+str(i)))
#local transformation ?
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),name+str(i))
cmds.scale(float(radii[i]),float(radii[i]),float(radii[i]), name+str(i),absolute=True )
if mat == None : mat = self.addMaterial("matsp"+str(i),colors[i])
self.assignMaterial(name+str(i),mat)#mat[bl.retrieveColorName(sphColors[i])]
self.addObjectToScene(scene,sphs[i],parent=parent)
return sphs
def updateInstancesSphere(self,name,sphs,centers,radii,meshsphere,
colors,scene,parent=None,delete=True):
mat = None
if len(colors) == 1:
mat = self.retrieveColorMat(colors[0])
if mat == None and colors[0] is not None:
mat = self.addMaterial('mat_'+name,colors[0])
for i in range(len(centers)):
if len(radii) == 1 :
rad = radii[0]
elif i >= len(radii) :
rad = radii[0]
else :
rad = radii[i]
if i < len(sphs):
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),sphs[i])#name+str(i))
cmds.scale(float(rad),float(rad),float(rad), sphs[i],absolute=True )
# sphs[i].SetAbsPos(self.FromVec(centers[i]))
# sphs[i][905]=c4d.Vector(float(rad),float(rad),float(rad))
if mat == None :
if colors is not None and i < len(colors) and colors[i] is not None :
mat = self.addMaterial("matsp"+str(i),colors[i])
if colors is not None and i < len(colors) and colors[i] is not None :
self.colorMaterial(mat,colors[i])
self.toggleDisplay(sphs[i],True)
else :
sphs.append(cmds.instance(meshsphere,name=name+str(i)))
#local transformation ?
cmds.move(float(centers[i][0]),float(centers[i][1]),float(centers[i][2]),name+str(i))
cmds.scale(float(rad),float(rad),float(rad), name+str(i),absolute=True )
if mat == None : mat = self.addMaterial("matsp"+str(i),colors[i])
self.assignMaterial(name+str(i),mat)#mat[bl.retrieveColorName(sphColors[i])]
self.addObjectToScene(scene,sphs[i],parent=parent)
if mat == None :
if colors is not None and i < len(colors) and colors[i] is not None :
mat = self.addMaterial("matsp"+str(i),colors[i])
self.addObjectToScene(scene,sphs[i],parent=parent)
if len(centers) < len(sphs) :
#delete the other ones ?
for i in range(len(centers),len(sphs)):
if delete :
obj = sphs.pop(i)
print "delete",obj
self.deleteObject(obj)
else :
self.toggleDisplay(sphs[i],False)
return sphs
def constraintLookAt(self,object):
"""
Cosntraint an hostobject to llok at the camera
@type object: Hostobject
@param object: object to constraint
"""
self.getObject(object)
cmds.orientConstraint( 'persp', object )
def updateText(self,text,string="",parent=None,size=None,pos=None,font=None):
text = self.checkName(text)
if string : cmds.textCurves(text, e=1, t=string )
# if size is not None : text[c4d.PRIM_TEXT_HEIGHT]= size
# if pos is not None : self.setTranslation(text,pos)
# if parent is not None : self.reParent(text,parent)
def extrudeText(self,text,**kw):
tr,parent = self.getTransformNode(text)
nChild = parent.childCount()
print nChild
#dag = om.MFnDagNode(node)
dnode = om.MFnDependencyNode(parent.transform())
child_path = om.MDagPath()
cmd ="constructionHistory=True,normalsOutwards=True,range=False,polygon=1,\
tolerance=0.01,numberOfSides=4 ,js=True,width=0 ,depth=0 ,extrudeDepth=0.5,\
capSides=4 ,bevelInside=0 ,outerStyle=0 ,innerStyle=0 ,\
polyOutMethod=0,polyOutCount=200,polyOutExtrusionType=2 ,\
polyOutExtrusionSamples=3,polyOutCurveType=2 ,\
polyOutCurveSamples=3,polyOutUseChordHeightRatio=0)"
for i in range(nChild):
#get all curve
node_child = parent.child(i)
child_tr,child_path = self.getTransformNode(node_child)
dnode = om.MFnDependencyNode(node_child)
nChildChild = child_path.childCount()
for j in range(nChildChild):
cmdchilds="cmds.bevelPlus("
node_child_child = child_path.child(j)
dnode = om.MFnDependencyNode(node_child_child)
cmdchilds+='"'+dnode.name()+'",'
cmdchilds+="n='bevel_"+dnode.name()+str(j)+"',"+cmd
cmdbis = 'cmds.bevel("'+dnode.name()+'",n="bevel_'+dnode.name()+str(j)+'", ed=0.5)'
eval(cmdbis)
cmds.bevel(e=1,w=0,d=0)
def Text(self,name="",string="",parent=None,size=5.,pos=None,font='Courier',
lookAt=False,**kw):
return_extruder = False
name = self.checkName(name)
if "extrude" in kw :
extruder = None
if type(kw["extrude"]) is bool and kw["extrude"]:
pass
text = cmds.textCurves( n= name, f=font, t=string )
## Result: [u'testShape', u'makeTextCurves2'] #
if pos is not None :
#should add -14
pos[0] = pos[0]-14.0#not center
self.setTranslation(name+'Shape',pos)
# if parent is not None:
self.addObjectToScene(self.getCurrentScene(),name+'Shape',parent=parent)
if lookAt:
self.constraintLookAt(name)
self.scaleObj(text[0],[size,size,size])
if "extrude" in kw :
extruder = None
#create an extruder
if type(kw["extrude"]) is bool and kw["extrude"]:
self.extrudeText(text)
# extruder = cmds.bevelPlus( text[1], ed=0.5)
# extruder = cmds.bevel( text, ed=0.5,w=0.0,d=0.0)
#reparent the extruder ?
# self.reParent(extruder,parent)
#po=1, cap=4,
# extruded=cmds.extrude( extrude_obj,self.checkName(name)+"_spline",
# et = 2, ucp = 1,n=name, fpt=1,upn=1)
return_extruder = True
else :
self.extrudeText(text)
# extruder = cmds.bevel( text, ed=0.5,w=0.0,d=0.0)
self.reParent(extruder,parent)
# if extruder is not None :
# pass
self.addObjectToScene(self.getCurrentScene(),name+'Shape',parent=parent)
if return_extruder :
return text,None
return text
def getBoxSize(self,name):
#kPolyCube
# cmds.select(name)
# print(name)
sx = cmds.polyCube(name, q=True,w=True)
sy = cmds.polyCube(name, q=True,h=True)
sz = cmds.polyCube(name, q=True,d=True)
return [sx,sy,sz]
def box(self,name,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,visible=1,
mat=None,**kw):
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
res = 15.
name = self.checkName(name)
box,shape = cmds.polyCube(name=name,w=float(size[0]),h=float(size[1]),
d=float(size[2]), sx=res, sy=res, sz=res )
mat = self.addMaterial("mat"+name,[1.,1.,0.])
self.assignMaterial(box,mat)
cmds.move(float(center[0]),float(center[1]),float(center[2]),box)
parent = None
if "parent" in kw :
parent = kw["parent"]
self.addObjectToScene(self.getCurrentScene(),box,parent=parent)
return box,shape
def updateBox(self,box,center=[0.,0.,0.],size=[1.,1.,1.],cornerPoints=None,
visible=1, mat = None):
box=self.getObject(box)
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
cmds.move(float(center[0]),float(center[1]),float(center[2]),box)
cmds.polyCube(box,e=1,w=float(size[0]),h=float(size[1]),
d=float(size[2]))
def Cone(self,name,radius=1.0,length=1.,res=16,pos = None,parent=None):
name = self.checkName(name)
diameter = 2*radius
cone,mesh=cmds.cone(name=name,axis=[0.0,1.0,0.0],hr=length,
r=radius,s=res,nsp=res)
if pos != None : cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),cone)
if parent is not None:
self.reParent(cone,parent)
# self.addObjectToScene(self.getCurrentScene(),instance)
return str(cone),mesh
def Cylinder(self,name,radius=1.,length=1.,res=16,pos = None,parent=None,**kw):
#import numpy
name = self.checkName(name)
diameter = 2*radius
axis = [0.0,0.0,1.0]
if "axis" in kw : #orientation
dic = {"+X":[1.,0.,0.],"-X":[-1.,0.,0.],"+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
"+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
if type(kw["axis"]) is str :
axis = dic[kw["axis"]]
else :
axis = kw["axis"]
cyl,mesh=cmds.polyCylinder(name=name,axis=axis,
r=radius, sx=res, sy=res, sz=5, h=length)
if pos != None : cmds.move(float(pos[0]),float(pos[1]),float(pos[2]),cyl)
if parent is not None:
self.reParent(cyl,parent)
# self.addObjectToScene(self.getCurrentScene(),instance)
return str(cyl),mesh#,mesh
def oneCylinder(self,name,head,tail,radius=None,instance=None,material=None,
parent = None,color=None):
name = self.checkName(name)
laenge,wsz,wz,coord=self.getTubeProperties(head,tail)
# print "oneCylinder instance",instance
if instance == None :
obj = self.Cylinder(name)
else :
obj = self.newMInstance(name,instance,parent=parent)
# obj = name
# self.translateObj(name,coord)
# self.setTranslation(name,coord)
# #obj.setLocation(float(coord[0]),float(coord[1]),float(coord[2]))
# cmds.setAttr(name+'.ry',float(degrees(wz)))
# cmds.setAttr(name+'.rz',float(degrees(wsz)))
# cmds.scale( 1, 1, laenge, name,absolute=True )
if radius is None :
radius= 1.0
self.setTransformation(obj,trans=coord,scale=[radius, radius, laenge],
rot=[0.,wz,wsz])
if material is not None :
self.assignMaterial(obj,material)
elif color is not None :
mats = self.getMaterialObject(obj)
if not mats :
mat = self.addMaterial("mat_"+name,color)
self.assignMaterial(obj,mat)
else :
self.colorMaterial(mats[0],color)
return obj
def updateOneCylinder(self,name,head,tail,radius=None,material=None,color=None):
name = self.checkName(name)
laenge,wsz,wz,coord=self.getTubeProperties(head,tail)
obj = self.getObject(name)
if radius is None :
radius= 1.0
self.setTransformation(obj,trans=coord,scale=[radius, radius, laenge],
rot=[0.,wz,wsz])
if material is not None :
self.assignMaterial(obj,material)
elif color is not None :
mats = self.getMaterialObject(obj)
if not mats :
mat = self.addMaterial("mat_"+name,color)
self.assignMaterial(obj,mat)
else :
self.colorMaterial(mats[0],color)
return obj
def updateTubeObj(self,o,coord1,coord2):
laenge,wsz,wz,pos=self.getTubeProperties(coord1,coord2)
self.setTransformation(o,trans=pos,scale=[1., 1., laenge],
rot=[0.,wz,wsz])
# cmds.scale( 1., 1., laenge, o,absolute=True )
# self.setTranslation(o,pos)
## cmds.move(float(pos[0]),float(pos[1]),float(pos[2]), o, absolute=True )
# cmds.setAttr(o+'.ry',float(degrees(wz)))
# cmds.setAttr(o+'.rz',float(degrees(wsz)))
def updateTubeMeshold(self,atm1,atm2,bicyl=False,cradius=1.0,quality=0):
self.updateTubeObj(atm1,atm2,bicyl=bicyl,cradius=cradius)
def updateTubeMesh(self,mesh,basemesh=None,cradius=1.0,quality=0):
# print mesh
# print cradius, mesh
mesh = self.getObject(str(mesh))
# print mesh
maya.cmds.polyCylinder(mesh,e=True,r=cradius)
# def updateTubeObjs(self,g):
# if not hasattr(g,'obj') : return
# newpoints=g.getVertices()
# newfaces=g.getFaces()
# #print "upadteObjTubes"
# for i,o in enumerate(g.obj):
# laenge,wsz,wz,pos=self.getTubeProperties(points[f[0]],points[f[1]])
# cmds.scale( 1, 1, laenge, o,absolute=True )
# cmds.move(float(pos[0]),float(pos[1]),float(pos[2]), o, absolute=True )
# cmds.setAttr(o+'.ry',float(degrees(wz)))
# cmds.setAttr(o+'.rz',float(degrees(wsz)))
def plane(self,name,center=[0.,0.,0.],size=[1.,1.],cornerPoints=None,visible=1,**kw):
#polyPlane([axis=[linear, linear, linear]], [
# constructionHistory=boolean], [createUVs=int], [height=linear],
# [name=string], [object=boolean], [subdivisionsX=int],
# [subdivisionsY=int], [texture=int], [width=linear])
plane,shape = cmds.polyPlane(name=name,w=float(size[0]),h=float(size[1]),
ax=[0.,0.,1.])
if cornerPoints != None :
for i in range(3):
size[i] = cornerPoints[1][i]-cornerPoints[0][i]
for i in range(3):
center[i]=(cornerPoints[0][i]+cornerPoints[1][i])/2.
cmds.move(float(center[0]),float(center[1]),float(center[2]),plane)
if "subdivision" in kw :
cmds.polyPlane(plane,e=1,
sx=kw["subdivision"][0],sy=kw["subdivision"][1])
if "axis" in kw : #orientation
dic = { "+X":[1.,0.,0.],"-X":[-1.,0.,0.],
"+Y":[0.,1.,0.],"-Y":[0.,-1.,0.],
"+Z":[0.,0.,1.],"-Z":[0.,0.,-1.]}
idic = { 0:[1.,0.,0.],1:[-1.,0.,0.],
2:[0.,1.,0.],3:[0.,-1.,0.],
4:[0.,0.,1.],5:[0.,0.,-1.]}
if type(kw["axis"]) is str :
axis = dic[kw["axis"]]
else : #int
axis = idic[kw["axis"]]
cmds.polyPlane(plane,e=1,ax=axis)
# if "material" in kw :
# texture = plane.MakeTag(c4d.Ttexture)
# if type(kw["material"]) is c4d.BaseMaterial :
# texture[1010] = kw["material"]
# else :
# texture[1010] = self.addMaterial("plane",[1.,1.,0.])
parent = None
if "parent" in kw :
parent = kw["parent"]
self.addObjectToScene(self.getCurrentScene(),plane,parent=parent)
return plane,shape
def PointCloudObject(self,name,**kw):
#print "cloud", len(coords)
name = self.checkName(name)
coords=kw['vertices']
# nface = 0
# if kw.has_key("faces"):
# nface = len(kw['faces'])
# obj = self.createsNmesh(name+'ds',coords,None,[])
# return obj[0]
partShape,part = self.particule(name+"ds", coords)
return part,partShape
def getJointPosition(self,jointname):
return self.getTranslation(jointname)
#return self.getTranslationOM(jointname)
# fnJt=oma.MFnIkJoint()
# mobj = self.getNode(jointname)
# if not fnJt.hasObj(mobj ) :
# print "no joint provided!"
# return None
# fnJt.setObject(mobj)
# cvs = om.MPointArray()
# ncurve.getCVs(cvs,om.MSpace.kPostTransform)
# return cvs
def updateArmature(self,basename,coords,listeName=None,scn=None,root=None,**kw):
for j in range(len(coords)):
atC=coords[j]
name = basename+'bone'+str(j)
if listeName is not None:
name = listeName[j]
relativePos=[atC[0],atC[1],atC[2]]
cmds.joint(self.checkName(name),e=1, p=relativePos)
def armature(self,basename,coords,listeName=None,scn=None,root=None,**kw):
#bones are called joint in maya
#they can be position relatively or globally
basename = self.checkName(basename)
bones=[]
# center = self.getCenter(coords)
parent = self.newEmpty(basename)
self.addObjectToScene(scn,parent,parent=root)
for j in range(len(coords)):
atC=coords[j]
#bones.append(c4d.BaseObject(BONE))
relativePos=[atC[0],atC[1],atC[2]]
name = basename+'bone'+str(j)
if listeName is not None:
name = listeName[j]
joint=cmds.joint(n=self.checkName(name), p=relativePos) #named "joint1"
bones.append(joint)
if scn != None :
if j==0 : self.addObjectToScene(scn,bones[j],parent=parent)
else : self.addObjectToScene(scn,bones[j],parent=bones[j-1])
return parent,bones
def bindGeom2Bones(self,listeObject,bones):
"""
Make a skinning. Namely bind the given bones to the given list of geometry.
This function will joins the list of geomtry in one geometry
@type listeObjects: list
@param listeObjects: list of object to joins
@type bones: list
@param bones: list of joins
"""
if len(listeObject) >1:
self.JoinsObjects(listeObject)
else :
self.ObjectsSelection(listeObject,"new")
#2- add the joins to the selection
self.ObjectsSelection(bones,"add")
#3- bind the bones / geoms
cmds.bindSkin()
#IK:cmds.ikHandle( sj='joint1', ee='joint5', p=2, w=.5 )
def getParticulesPosition(self,name):
name = self.checkName(name)
partO=self.getMShape(name) #shape..
fnP = omfx.MFnParticleSystem(partO)
pos=om.MVectorArray(fnP.count())
oriPsType = fnP.renderType()
if(oriPsType == omfx.MFnParticleSystem.kTube):
fnP.position0(pos);
else:
fnP.position(pos);
return pos
def setParticulesPosition(self,newPos,PS=None):
if PS == None :
return
obj = self.checkName(PS)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in newPos:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
# pts.append(p)
#fnP.emit(pts)
fnP.setPerParticleAttribute("position",pos)
def getParticles(self,name,**kw):
PS = self.getObject(name)
return PS
def updateParticles(self,newPos,PS=None,**kw):
if PS == None :
return
obj = self.checkName(PS)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
currentN = fnP.count()
N = len(newPos)
fnP.setCount(N)
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in newPos:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
fnP.setPerParticleAttribute("position",pos)
#this update the particle position not the particle number
def updateParticleRotation(self,obj,rotation):
obj = self.checkName(obj)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
rot=om.MVectorArray(fnP.count())
#euler angle?
for v in rotation:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
fnP.setPerParticleAttribute("rotationPP",rot)
#this update the particle position not the particle number
def updateParticle(self,obj,vertices,faces):
obj = self.checkName(obj)
partO=self.getMShape(obj) #shape..
fnP = omfx.MFnParticleSystem(partO)
oriPsType = fnP.renderType()
if(oriPsType == omfx.MFnParticleSystem.kTube):
if faces is None :
return
position0 = om.MVectorArray()
position1 = om.MVectorArray()
for i,f in enumerate(face):
coord1 = c = vertices[f[0]]
coord2 = vertices[f[1]]
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position0.append(p)
c= coord2
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position1.append(p)
fnP.setPerParticleAttribute("position0",position0)
fnP.setPerParticleAttribute("position1",position1)
else :
pos=om.MVectorArray(fnP.count())
#pts = om.MPointArray(fnP.count())
for v in vertices:
p = om.MVector( float(v[0]),float(v[1]),float(v[2]) )
pos.append(p)
# pts.append(p)
#fnP.emit(pts)
fnP.setPerParticleAttribute("position",pos)
#fnP.setPerParticleAttribute? position
#stat = resultPs.emit(finalPos);
def particule(self,name, coord,**kw):
name = self.checkName(name)
if coord is not None :
try :
coord = numpy.array(coord).tolist()
except :
pass
part,partShape=cmds.particle(n=name,p=list(coord))
else :
part,partShape=cmds.particle(n=name)
# instant = cmds.particleInstancer(part, a = 1, object = cyl[0],
# position = 'bondPos', aimDirection = 'velocity',
# scale = 'bondScaler',
# name = (chainName+ '_geoBondsInstances'))
return partShape,part
def updateMetaball(self,name,vertices=None):
if vertices is None :
return
self.updateParticle(name,vertices=vertices,faces=None)
def metaballs(self,name,coords,radius,scn=None,root=None,**kw):
# atoms=selection.findType(Atom)
#no metaball native in mauya, need to use particle set to blobby surface
#use of the point cloud polygon object as the emmiter
# name is on the form 'metaballs'+mol.name
# if scn == None:
# scn = self.getCurrentScene()
#molname = name.split("balls")[1]
#emiter = molname+"_cloud"
name = self.checkName(name)
partShape,part = self.particule(name, coords)
#need to change the rep
node = self.getNode(partShape)
plug = self.getNodePlug("particleRenderType",node)
plug.setInt(7); #Bloby surface s/w
return part,partShape
def splinecmds(self,name,coords,type="",extrude_obj=None,scene=None,parent=None):
#Type : "sBezier", "tBezier" or ""
name = self.checkName(name)
if scene is None :
scene = self.getCurrentScene()
#parent=newEmpty(name)
curve = cmds.curve(n=name,p=coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
objName=cmds.ls("curveShape1")
cmds.rename(objName,name+"Shape")
cmds.setAttr(name+"Shape"+".dispEP",1)
if parent is not None :
cmds.parent( name, parent)
return name,None
def extrudeSpline(self,spline,**kw):
extruder = None
shape = None
spline_clone = None
if "shape" in kw:
if type(kw["shape"]) == str :
shape = self.build_2dshape("sh_"+kw["shape"]+"_"+str(spline),
kw["shape"])[0]
else :
shape = kw["shape"]
if shape is None :
shapes = self.build_2dshape("sh_circle"+str(spline))[0]
if "extruder" in kw:
extruder = kw["extruder"]
# if extruder is None :
# extruder=self.sweepnurbs("ex_"+spline.GetName())
if "clone" in kw and kw["clone"] :
spline_clone = cmds.duplicate(spline,n="exd"+str(spline))
self.resetTransformation(spline_clone)
extruder=cmds.extrude( shape[0],spline_clone,
et = 2, ucp = 1,n="ex_"+str(spline), fpt=1,upn=1)
self.toggleDisplay(spline_clone,False)
return extruder,shape,spline_clone
else :
extruder=cmds.extrude( shape[0],spline,
et = 2, ucp = 1,n="ex_"+str(spline), fpt=1,upn=1)
return extruder,shape
#setAttr "extrudedSurfaceShape1.simplifyMode" 1;
def build_2dshape(self,name,type="circle",**kw):
shapedic = {"circle":{"obj":cmds.circle,"size":["r",]},
# "rectangle":{"obj":None,"size":[0,0]}
}
shape = shapedic[type]["obj"](n=name, nr=(1, 0, 0), c=(0, 0, 0),r=0.3)
dopts = [1.,1.]
if "opts" in kw :
dopts = kw["opts"]
if len(shapedic[type]["size"]) == 1 :
pass
# shape[shapedic[type]["size"][0]] = dopts[0]
else :
for i in range(len(shapedic[type]["size"])) :
pass
# shape[shapedic[type]["size"][i]] = dopts[i]
self.addObjectToScene(None,shape)
return shape,name+"Shape"
def spline(self,name,coords,type="",extrude_obj=None,scene=None,
parent=None,**kw):
#Type :
name = self.checkName(name)
if scene is None :
scene = self.getCurrentScene()
#parent=newEmpty(name)
if extrude_obj is not None:
shape,curve = self.omCurve(name+"_spline",coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
if parent is not None :
cmds.parent( self.checkName(name)+"_spline", parent)
# extrude profile curve along path curve using "flat" method
# The extrude type can be distance-0, flat-1, or tube-2
extruded=cmds.extrude( extrude_obj,self.checkName(name)+"_spline",
et = 2, ucp = 1,n=name, fpt=1,upn=1)
#setAttr "extrudedSurfaceShape1.simplifyMode" 1;
return name,shape,extruded
shape,curve = self.omCurve(name,coords)
#return the name only, but create a transform node with name : name
#and create a curveShape named curveShape1
if parent is not None :
cmds.parent( self.checkName(name), parent)
return name,shape
def getSplinePoints(self,name,convert=False):
name = self.checkName(name)
ncurve = om.MFnNurbsCurve()
mobj = self.getNode(self.checkName(name))
if not ncurve.hasObj(mobj ) :
mobj = self.getNode(self.checkName(name)+"Shape")
if not ncurve.hasObj(mobj) :
print "no curve shape provided!"
return None
ncurve.setObject(mobj)
cvs = om.MPointArray()
ncurve.getCVs(cvs,om.MSpace.kPostTransform)
return cvs
def update_spline(self,name,coords):
#need to provide the object shape name
name = self.checkName(name)
ncurve = om.MFnNurbsCurve()
mobj = self.getNode(self.checkName(name))
if not ncurve.hasObj(mobj ) :
mobj = self.getNode(self.checkName(name)+"Shape")
if not ncurve.hasObj(mobj) :
print "no curve shape provided!"
return None
ncurve.setObject(mobj)
deg = 3; #Curve Degree
ncvs = len(coords); #Number of CVs
spans = ncvs - deg # Number of spans
nknots = spans+2*deg-1 # Number of knots
controlVertices = om.MPointArray()
knotSequences = om.MDoubleArray()
# point array of plane vertex local positions
for c in coords:
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
controlVertices.append(p)
# for i in range(nknots):
# knotSequences.append(i)
# create(controlVertices,knotSequences, deg,
# om.MFnNurbsCurve.kOpen, False, False
ncurve.setCVs(controlVertices,om.MSpace.kPostTransform)
# ncurve.setKnots(knotSequences)
ncurve.updateCurve()
def omCurve(self,name,coords,**kw):
#default value
name = self.checkName(name)
deg = 3; #Curve Degree
ncvs = len(coords); #Number of CVs
if kw.has_key("deg"):
deg = kw['deg']
spans = ncvs - deg # Number of spans
nknots = spans+2*deg-1 # Number of knots
controlVertices = om.MPointArray()
knotSequences = om.MDoubleArray()
# point array of plane vertex local positions
for c in coords:
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
controlVertices.append(p)
for i in range(nknots):
knotSequences.append(i)
curveFn=om.MFnNurbsCurve()
curve = curveFn.create(controlVertices,knotSequences, deg,
om.MFnNurbsCurve.kOpen, False, False)
# curveFn.setName(name)
print (curveFn.partialPathName())
print (curveFn.name())
shapename = curveFn.name()
objName = shapename.split("Shape")[0]
n = shapename.split("Shape")[1]
# objName=cmds.ls("curve1")[0]
cmds.rename(objName+n,name)
nodeName = curveFn.name() #curveShape
cmds.rename(nodeName, name+"Shape")
return curveFn, curve
def createLines(self,name,coords,normal,faces):
partShape,part = self.linesAsParticles(name,coords,faces)
return part
def linesAsParticles(self,name,coords,face):
#what about omfx to create the system...
name = self.checkName(name)
partShape,part = self.particule(name, None)
path = self.getMShape(part)
node = path.node()
depNodeFn = om.MFnDependencyNode( node )
plug = self.getNodePlug("particleRenderType", node )
plug.setInt(9); #Tube s/w
fnP = omfx.MFnParticleSystem(path)
pts = om.MPointArray()
position0 = om.MVectorArray()
position1 = om.MVectorArray()
for i,f in enumerate(face):
coord1 = c = coords[f[0]]
coord2 = coords[f[1]]
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position0.append(p)
c= coord2
p = om.MVector( float(c[0]),float(c[1]),float(c[2]) )
#print 'point:: %f, %f, %f' % (p.x, p.y, p.z)
position1.append(p)
laenge,wsz,wz,c=self.getTubeProperties(coord1,coord2)
p = om.MPoint(om.MFloatPoint( float(c[0]),float(c[1]),float(c[2]) ))
pts.append(p)
# fnP.emit(pts)
fnP.setPerParticleAttribute("position0",position0)
fnP.setPerParticleAttribute("position1",position1)
fnP.emit(pts)
return partShape,part
def mayaVec(self,v):
return om.MFloatPoint( float(v[0]),float(v[1]),float(v[2]) )
def getFaces(self,obj,**kw):
# import numpy
node = self.getNode('mesh_'+obj)
meshnode = om.MFnMesh(node)
triangleCounts =om.MIntArray()
triangleVertices= om.MIntArray()
meshnode.getTriangles(triangleCounts,triangleVertices)
if self._usenumpy :
return numpy.array(triangleVertices).reshape((len(triangleVertices)/3,3))
else :
return triangleVertices
def polygons(self,name,proxyCol=False,smooth=False,color=[[1,0,0],], material=None, **kw):
normals = kw["normals"]
name,meshFS = self.createsNmesh(name,kw['vertices'],normals,kw['faces'],color=color,
smooth=smooth,material=material)
return name
def createsNmesh(self,name,vertices,normal,faces,color=[[1,0,0],],smooth=False,
material=None,proxyCol=False,**kw):
"""
This is the main function that create a polygonal mesh.
@type name: string
@param name: name of the pointCloud
@type vertices: array
@param vertices: list of x,y,z vertices points
@type vnormals: array
@param vnormals: list of x,y,z vertex normals vector
@type faces: array
@param faces: list of i,j,k indice of vertex by face
@type smooth: boolean
@param smooth: smooth the mesh
@type material: hostApp obj
@param material: material to apply to the mesh
@type proxyCol: booelan
@param proxyCol: do we need a special object for color by vertex (ie C4D)
@type color: array
@param color: r,g,b value to color the mesh
@rtype: hostApp obj
@return: the polygon object
"""
if len(color) == 3 :
if type(color[0]) is not list :
color = [color,]
outputMesh = om.MObject()
#print outputMesh.name()
#cmds.rename(outputMesh.name(), name)
#test=cmds.createNode( 'transform', n='transform1' )
name=name.replace(":","_")
name=name.replace("-","_")
name=name.replace("'","")
name=name.replace('"',"")
name=self.checkName(name)
#print "NMesh ",name
numFaces = 0
if faces is not None :
numFaces = len(faces)
numVertices = len(vertices)
# point array of plane vertex local positions
points = om.MFloatPointArray()
for v in vertices:
points.append(self.mayaVec(v))
#mayaVertices=map(mayaVec,vertices)
#map(points.append,mayaVertices)
# vertex connections per poly face in one array of indexs into point array given above
faceConnects = om.MIntArray()
for f in faces:
for i in f :
faceConnects.append(int(i))
# an array to hold the total number of vertices that each face has
faceCounts = om.MIntArray()
for c in range(0,numFaces,1):
faceCounts.append(int(len(f)))
#create mesh object using arrays above and get name of new mesh
meshFS = om.MFnMesh()
newMesh = meshFS.create(numVertices, numFaces, points, faceCounts,
faceConnects, outputMesh)
# meshFS.updateSurface()
nodeName = meshFS.name()
cmds.rename(nodeName, "mesh_"+name)
#print 'Mesh node name is: %s' % nodeName
objName=cmds.ls("polySurface1")[0]
cmds.rename(objName,name)
#newName should bydefault polySurface something
# assign new mesh to default shading group
if color is not None and len(color) > 1:
self.color_mesh_perVertex(meshFS,color)
doMaterial = True
if type(material) is bool :
doMaterial = material
if doMaterial:
if material == None :
if len(name.split("_")) == 1 : splitname = name
else :
splitname = name.split("_")[1]
#print name,name[:4],splitname,splitname[:4]
self.assignNewMaterial( "mat_"+name, color[0],'lambert' ,"mesh_"+name)
else :
self.assignMaterial("mesh_"+name,material)
if "parent" in kw :
parent = kw["parent"]
# print "reparent ", name,parent
self.reParent(name,parent)
return name,meshFS#,outputMesh
def updatePoly(self,obj,vertices=None,faces=None):
if type(obj) is str:
obj = self.getObject(obj)
if obj is None : return
node = self.getMShape(self.checkName(obj))
if node.hasFn(om.MFn.kMesh):
self.updateMesh(obj,vertices=vertices,faces=faces)
elif node.hasFn(om.MFn.kParticle):
self.updateParticle(obj,vertices=vertices,faces=faces)
def updateMesh(self,meshnode,vertices=None,faces=None, smooth=False,**kw):#chains.residues.atoms.coords,indices
# print meshnode,type(meshnode)
if type(meshnode) is str or type(meshnode) is unicode:
node = self.getMShape(self.checkName(meshnode))#self.getNode(self.checkName(meshnode))
meshnode = om.MFnMesh(node)
# meshnode = self.getObject(meshnode,doit=True)
if meshnode is None:
return
nv = meshnode.numVertices()
nf = meshnode.numPolygons()
if vertices is not None :
numVertices = len(vertices)
# point array of plane vertex local positions
points = om.MFloatPointArray()
for v in vertices:
points.append(self.mayaVec(v))
else :
return
#numVertices = nv
if faces is not None :
numFaces = len(faces)
else :
numFaces = nf
faces = []
faceConnects = om.MIntArray()
for f in faces:
for i in f :
faceConnects.append(int(i))
# an array to hold the total number of vertices that each face has
faceCounts = om.MIntArray()
for c in range(0,numFaces,1):
faceCounts.append(int(len(f)))
#newMesh = meshFS.create(numVertices, numFaces, points, faceCounts, faceConnects, outputMesh)
result = meshnode.createInPlace(numVertices, numFaces, points, faceCounts, faceConnects)
meshnode.updateSurface()
def ToVec(self,v,**kw):
if hasattr(v,"x") :
return [v.x,v.y,v.z]
else :
return v
def arr2marr(self,v):
#from http://www.rtrowbridge.com/blog/2009/02/maya-api-docs-demystified-for-python-users/
self.msutil.createFromList( v, len(v) )
doubleArrayPtr = self.msutil.asDoublePtr()
return doubleArrayPtr
# def vecp2m(self,v):
# #from http://www.rtrowbridge.com/blog/2009/02/maya-api-docs-demystified-for-python-users/
# doubleArrayPtr = self.arr2marr(v)
# vec = om.MVector( doubleArrayPtr )
# return vec
def FromVec(self,v,pos=True):
if isinstance(v,om.MVector):
return v
else :
return om.MVector(v[0], v[1], v[2])
def vec2m(self,v):
if isinstance(v,om.MVector):
return v
else :
return om.MVector(float(v[0]), float(v[1]), float(v[2]))
def ToMat(self,mat,**kw):
#maya - > python
return self.m2matrix(mat)
def FromMat(self,mat,**kw):
#pythn->maya
return self.matrixp2m(mat)
def matrixp2m(self,mat):
#from http://www.rtrowbridge.com/blog/2009/02/python-api-mtransformationmatrixgetrotation-bug/
if isinstance(mat,om.MTransformationMatrix) :
return mat
getMatrix = om.MMatrix()
matrixList = mat#mat.transpose().reshape(16,)
om.MScriptUtil().createMatrixFromList(matrixList, getMatrix)
mTM = om.MTransformationMatrix( getMatrix )
rotOrder = om.MTransformationMatrix().kXYZ
return mTM
def m2matrix(self,mMat):
#return mMat
#do we use numpy
if isinstance(mMat,om.MTransformationMatrix) :
matrix = mMat.asMatrix()
elif isinstance(mMat,om.MMatrix):
matrix = mMat
else :
return mMat
us=om.MScriptUtil()
out_mat = [0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0]
us.createFromList( out_mat, len(out_mat) )
ptr1 = us.asFloat4Ptr()
matrix.get(ptr1)
res_mat = [[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0],
[0.0, 0.0, 0.0,0.0]]
for i in range(4):
for j in range(4):
val = us.getFloat4ArrayItem(ptr1, i,j)
res_mat[i][j]=val
return res_mat
def alignNormal(self,poly):
pass
def triangulate(self,poly):
#select poly
doc = self.getCurrentScene()
mesh = self.getMShape(poly)
meshname= mesh.partialPathName()
#checkType
if self.getType(meshname) != self.MESH :
return
cmds.polyTriangulate(meshname)
def getMeshVertices(self,poly,transform=False,selected = False):
meshnode = self.checkIsMesh(poly)
if selected :
mverts_indice = []
verts =[]
v = om.MIntArray()
vertsComponent = om.MObject()
meshDagPath = om.MDagPath()
activeList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(activeList)
selIter = om.MItSelectionList(activeList,om.MFn.kMeshVertComponent)
while selIter.isDone():
selIter.getDagPath(meshDagPath, vertsComponent)
if not vertsComponent.isNull():
# ITERATE THROUGH EACH "FACE" IN THE CURRENT FACE COMPONENT:
vertIter = om.MItMeshVertex(meshDagPath,vertsComponent)
while vertIter.isDone():
mverts_indice.append(vertIter.index()) #indice of the faces
pts = faceIter.position(om.MSpace.kWorld)
verts.append(self.ToVec(pts))
faces.append(v[0],v[1],v[2])
vertIter.next()
selIter.next()
return verts,mverts_indice
else :
nv = meshnode.numVertices()
points = om.MFloatPointArray()
meshnode.getPoints(points)
vertices = [self.ToVec(points[i]) for i in range(nv)]
return vertices
def getMeshNormales(self,poly,selected = False):
meshnode = self.checkIsMesh(poly)
nv = meshnode.numNormals()
normals = om.MFloatVectorArray()
meshnode.getVertexNormals(False,normals)
vnormals = [self.ToVec(normals[i]) for i in range(nv)]
if selected :
v,indice = self.getMeshVertices(poly,selected = selected)
vn=[]
for i in indice:
vn.append(vnormals[i])
return vn,indice
return vnormals
def getMeshEdges(self,poly,selected = False):
#to be tested
meshnode = self.checkIsMesh(poly)
ne= meshnode.numEdges()
edges = []
edgeConnects = om.MIntArray()
for i in range(ne):
meshnode.getEdgeVertices(i,edgeConnects)
edges.append(edgeConnects)
return edges
def getMeshFaces(self,poly,selected = False):
meshnode = self.checkIsMesh(poly)
faceConnects = om.MIntArray()
faceCounts = om.MIntArray()
meshnode.getTriangles(faceCounts,faceConnects)
if selected :
mfaces_indice = []
faces =[]
v = om.MIntArray()
faceComponent = om.MObject()
meshDagPath = om.MDagPath()
activeList = om.MSelectionList()
om.MGlobal.getActiveSelectionList(activeList)
selIter = om.MItSelectionList(activeList,om.MFn.kMeshPolygonComponent)
# print "itersel",selIter.isDone()
while 1:
selIter.getDagPath(meshDagPath, faceComponent);
# print "faces ?",faceComponent.isNull()
if not faceComponent.isNull():
# print ' ITERATE THROUGH EACH "FACE" IN THE CURRENT FACE COMPONENT:'
faceIter = om.MItMeshPolygon(meshDagPath,faceComponent)
while 1:
mfaces_indice.append(faceIter.index()) #indice of the faces
faceIter.getVertices(v)
faces.append([v[0],v[1],v[2]])
faceIter.next()
if faceIter.isDone() : break
selIter.next()
if selIter.isDone() : break
return faces,mfaces_indice
if self._usenumpy :
return numpy.array(faceConnects).reshape((len(faceConnects)/3,3))
else :
return faceConnects
def DecomposeMesh(self,poly,edit=True,copy=True,tri=True,transform=True,**kw):
# import numpy
if tri:
self.triangulate(poly)
if type(poly) is str or type(poly) is unicode or type(poly) is list:
mesh = self.getMShape(poly)#dagPath
else :
#have to a object shape node or dagpath
mesh = poly
print ("mesh ", mesh)
if self.getType(mesh.partialPathName()) != self.POLYGON :
if self.getType(mesh.partialPathName()) == self.PARTICULE:
v = self.getParticulesPosition(mesh.partialPathName())
return None,v,None
return None,None,None
#again problem with instance.....
meshnode = om.MFnMesh(mesh)
print ("meshnode",meshnode)
fnTrans = om.MFnTransform(self.getTransformNode(poly)[0])
print ("fnTrans",fnTrans)
# fnTrans = om.MFnTransform(mesh.transform())
#get infos
nv = meshnode.numVertices()
nf = meshnode.numPolygons()
# m = om.MFloatMatrix()
points = om.MFloatPointArray()
normals = om.MFloatVectorArray()
faceConnects = om.MIntArray()
faceCounts = om.MIntArray()
meshnode.getPoints(points)
#meshnode.getNormals(normals)
meshnode.getVertexNormals(False,normals)
meshnode.getTriangles(faceCounts,faceConnects)
fnormals=[]
if self._usenumpy :
faces = numpy.array(faceConnects).reshape((len(faceConnects)/3,3))
else :
faces = faceConnects
vertices = [self.ToVec(points[i]) for i in range(nv)]
vnormals = [self.ToVec(normals[i]) for i in range(nv)]
#remove the copy if its exist? or keep it ?
#need to apply the transformation
if transform :
#node = self.getNode(mesh)
#fnTrans = om.MFnTransform(mesh)
mmat = fnTrans.transformation()
if self._usenumpy :
mat = self.m2matrix(mmat)
vertices = self.ApplyMatrix(vertices,numpy.array(mat).transpose())
vnormals = self.ApplyMatrix(vnormals,numpy.array(mat).transpose())#??
else :
out_mat = [0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0,
0.0, 0.0, 0.0,0.0]
self.msutil.createFromList( out_mat, len(out_mat) )
ptr1 = self.msutil.asFloat4Ptr()
mmat.asMatrix().get(ptr1)
m = om.MFloatMatrix(ptr1)
vertices = []
for i in range(nv) :
v = points[i]*m
vertices.append(self.ToVec(v))
# vertices = [self.ToVec(p*m) for p in points]
# if edit and copy :
# self.getCurrentScene().SetActiveObject(poly)
# c4d.CallCommand(100004787) #delete the obj
print ("ok",len(faces),len(vertices),len(vnormals))
if "fn" in kw and kw["fn"] :
fnormals = []
p = om.MVector( 0.,0.,0. )
for i in range(len(faces)) :
meshnode.getPolygonNormal(i,p,om.MSpace.kWorld)#kPostTransform
fnormals.append(self.ToVec(p))
return faces,vertices,vnormals,fnormals
else :
return faces,vertices,vnormals
def connectAttr(self,shape,i=0,mat=None):
if mat is not None :
#print shape
#print mat+"SG"
cmds.isConnected( shape+'.instObjGroups['+i+']', mat+'SG.dagSetMembers')
#need to get the shape : name+"Shape"
def rotation_matrix(self,angle, direction, point=None,trans=None):
"""
Return matrix to rotate about axis defined by point and direction.
"""
if self._usenumpy:
return Helper.rotation_matrix(angle, direction, point=point,trans=trans)
else :
direction = self.FromVec(direction)
direction.normalize()
out_mat = [1.0, 0.0, 0.0,0.0,
0.0, 1.0, 0.0,0.0,
0.0, 0.0, 1.0,0.0,
0.0, 0.0, 0.0,1.0]
m = self.matrixp2m(out_mat)
# m = om.MTransformationMatrix()
m.setToRotationAxis (direction,angle)
if point is not None:
point = self.FromVec(point)
m.setTranslation(point,om.MSpace.kPostTransform)# = point - (point * m)self.vec2m(trans),om.MSpace.kPostTransform
if trans is not None :
trans = self.FromVec(trans)
m.setTranslation(trans,om.MSpace.kPostTransform)
# M = m2matrix(m)
return m
#==============================================================================
# properties objec
#==============================================================================
def getPropertyObject(self, obj, key=["radius"]):
"""
Return the property "key" of the object obj
* overwrited by children class for each host
@type obj: host Obj
@param obj: the object that contains the property
@type key: string
@param key: name of the property
@rtype : int, float, str, dict, list
@return : the property value
"""
res = []
if "pos" in key :
res.append(self.ToVec(self.getTranslation(obj)))
if "scale" in key :
res.append(self.ToVec(self.getScale(obj)))
if "rotation" in key :
mo = self.getTransformation(obj)
m = self.ToMat(mo)#.transpose()
mws = m.transpose()
rotMatj = mws[:]
rotMatj[3][:3]*=0.0
res.append(rotMatj)
if self.getType(obj) == self.SPHERE :
for k in key :
if k == "radius" :
try :
r=cmds.polySphere(obj,q=1,r=1)
except :
r=cmds.sphere(obj,q=1,r=1)
res.append(r)
if self.getType(obj) == self.CYLINDER :
for k in key :
if k == "radius" :
r=cmds.polyCylinder(obj,q=1,r=1)
res.append(r)
elif k == "length" :
h=cmds.polyCylinder(obj,q=1,h=1)
res.append(h)
elif k == "axis" :
ax = cmds.polyCylinder(obj,q=1,axis=1)
res.append(ax)
if self.getType(obj) == self.CUBE :
for k in key :
if k == "length" :
l = self.getBoxSize(obj)#cmds.polyCube(obj, q=True,h=True)
res.append(l)
return res
#===============================================================================
# Texture Mapping / UV
#===============================================================================
def getUV(self,object,faceIndex,vertexIndex,perVertice=True):
mesh = self.getMShape(object)
meshnode = om.MFnMesh(mesh)
#uv=[]
u_util = maya.OpenMaya.MScriptUtil()
u_util.createFromDouble(0.0)
u_ptr = u_util.asFloatPtr()
v_util = maya.OpenMaya.MScriptUtil()
v_util.createFromDouble(0.0)
v_ptr = v_util.asFloatPtr()
if perVertice :
meshnode.getUV(vertexIndex, u_ptr, v_ptr)
u = u_util.getFloat(u_ptr)
v = v_util.getFloat(v_ptr)
return [u,v]
else :
def getuv(faceIndex,iv,u_ptr,v_ptr):
meshnode.getPolygonUV(faceIndex,iv,u_ptr,v_ptr)
u = u_util.getFloat(u_ptr)
v = v_util.getFloat(v_ptr)
return [u,v]
#uv of the face
return [getuv(faceIndex,iv,u_ptr,v_ptr) for iv in range(3)]
#
#
##meshFn = maya.OpenMaya.MFnMesh(node)
##
#u_util = maya.OpenMaya.MScriptUtil()
#u_util.createFromDouble(0.0)
#u_ptr = u_util.asFloatPtr()
#v_util = maya.OpenMaya.MScriptUtil()
#v_util.createFromDouble(0.0)
#v_ptr = v_util.asFloatPtr()
#
#meshFn.getUV(0, u_ptr, v_ptr)
#
#u = u_util.getFloat(u_ptr)
#v = v_util.getFloat(v_ptr))
##getPolygonUVid
##getPolygonUV
#
#should be faster ?
def setUVs(self,object,uvs):
#uvs is a dictionary key are faceindex, values it the actual uv for the 3-4 vertex
ob = self.getObject(object)
node = self.getNode('mesh_'+ob)
meshnode = om.MFnMesh(node)
meshnode.clearUVs()
u = om.MFloatArray()
v = om.MFloatArray()
uvCounts = om.MIntArray()
uvIds = om.MIntArray()
i = 0
for f in uvs:
for k,uv in enumerate(uvs[f]):
uvIds.append(i)
uvCounts.append(len(uvs[f]))
u.append(uv[0])
v.append(uv[1])
#meshnode.setUV(i,uv[0],uv[1])
#meshnode.assignUV(f,k,i)
i = i +1
meshnode.setUVs(u,v)
meshnode.assignUVs(uvCounts,uvIds)
def setUV(self,object,faceIndex,vertexIndex,uv,perVertice=True,uvid=0):
ob = self.getObject(object)
node = self.getNode('mesh_'+ob)
meshnode = om.MFnMesh(node)
for k in range(3):
luv = uv[k]
meshnode.setUV(uvid,luv[0],luv[1])
meshnode.assignUV(faceIndex,k,uvid)
uvid = uvid +1
return uvid
def hyperShade_meVertCol(self):
#mel command : nodeReleaseCallback graph1HyperShadeEd mentalrayVertexColors1 none;
# nodeOutlinerInputsCmd connectWindow|tl|cwForm|connectWindowPane|leftSideCW connectWindow|tl|cwForm|connectWindowPane|rightSideCW; nodeOutliner -e -r connectWindow|tl|cwForm|connectWindowPane|rightSideCW;
# connectAttr -f mesh_MSMS_MOL1crn.colorSet[0].colorName mentalrayVertexColors1.cpvSets[0];
# // Result: Connected mesh_MSMS_MOL1crn.colorSet.colorName to mentalrayVertexColors1.cpvSets. //
# // Result: connectWindow|tl|cwForm|connectWindowPane|rightSideCW //
pass
#==============================================================================
# import / expor / read load / save
#==============================================================================
def readFile(self,filename,**kw):
fileName, fileExtension = os.path.splitext(filename)
fileExtension=fileExtension.replace(".","")
fileExtension=fileExtension.upper()
if fileExtension == "MA":
fileExtension = "mayaAscii"
elif fileExtension == "DAE":
fileExtension = "DAE_FBX"
elif fileExtension == "FBX":
pass
else :
print ("not supported by uPy, contact us!")
return
# doc = self.getCurrentScene()
cmds.file(filename ,type=fileExtension,loadReferenceDepth="all", i=True ) #merge the documets
# c4d.documents.MergeDocument(doc,filename,c4d.SCENEFILTER_OBJECTS|c4d.SCENEFILTER_MATERIALS)
def read(self,filename,**kw):
fileName, fileExtension = os.path.splitext(filename)
fileExtension=fileExtension.replace(".","")
fileExtension=fileExtension.upper()
if fileExtension == "MA":
fileExtension = "mayaAscii"
cmds.file(filename ,type=fileExtension,loadReferenceDepth="all", i=True )
elif fileExtension == "DAE" or fileExtension == "FBX":
import maya.mel as mel
#mel.eval('FBXImportMode -v exmerge;')
filename = filename.replace("\\","\\\\")
mel.eval('FBXImport -f "%s" -t 0;' % filename)#FBXGetTakeName ?
else :
print ("not supported by uPy, contact us!")
return
def write(self,listObj,**kw):
pass
#==============================================================================
# raycasting
#==============================================================================
def raycast(self,obj,start, end, length, **kw ):
#posted on cgtalk.com
#part of http://code.google.com/p/dynamica/
mo = self.getTransformation(obj)
mi = mo.asMatrixInverse()
mat = self.ToMat(mi)#.transpose()
point = self.ApplyMatrix([start],numpy.array(mat).transpose())[0]
direction = self.ApplyMatrix([end],numpy.array(mat).transpose())[0]
#om.MGlobal.clearSelectionList()
om.MGlobal.selectByName(obj)
sList = om.MSelectionList()
#Assign current selection to the selection list object
om.MGlobal.getActiveSelectionList(sList)
item = om.MDagPath()
sList.getDagPath(0, item)
item.extendToShape()
fnMesh = om.MFnMesh(item)
raySource = om.MFloatPoint(float(point[0]), float(point[1]), float(point[2]), 1.0)
rayDir = om.MFloatVector(float(direction[0]-point[0]), float(direction[1]-point[1]), float(direction[2]-point[2]))
faceIds = None
triIds = None
idsSorted = False
testBothDirections = False
worldSpace = om.MSpace.kWorld
maxParam = length#999999
accelParams = None
sortHits = True
hitPoints = om.MFloatPointArray()
#hitRayParams = om.MScriptUtil().asFloatPtr()
hitRayParams = om.MFloatArray()
hitFaces = om.MIntArray()
hitTris = None
hitBarys1 = None
hitBarys2 = None
tolerance = 0.0001
#http://download.autodesk.com/us/maya/2010help/API/class_m_fn_mesh.html#114943af4e75410b0172c58b2818398f
hit = fnMesh.allIntersections(raySource, rayDir, faceIds, triIds, idsSorted, worldSpace,
maxParam, testBothDirections, accelParams, sortHits,
hitPoints, hitRayParams, hitFaces, hitTris, hitBarys1,
hitBarys2, tolerance)
om.MGlobal.clearSelectionList()
#print hit, len(hitFaces)
if "count" in kw :
#result = int(fmod(len(hitFaces), 2))
return hit, len(hitFaces)
#clear selection as may cause problem if the function is called multiple times in succession
return result
|
gpl-3.0
| -7,838,186,313,154,218,000
| 39.361557
| 212
| 0.542971
| false
| 3.67479
| false
| false
| false
|
phenoxim/nova
|
nova/tests/json_ref.py
|
1
|
2271
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_serialization import jsonutils
def _resolve_ref(ref, base_path):
file_path, _, json_path = ref.partition('#')
if json_path:
raise NotImplementedError('JSON refs with JSON path after the "#" is '
'not yet supported')
path = os.path.join(base_path, file_path)
# binary mode is needed due to bug/1515231
with open(path, 'r+b') as f:
ref_value = jsonutils.load(f)
base_path = os.path.dirname(path)
res = resolve_refs(ref_value, base_path)
return res
def resolve_refs(obj_with_refs, base_path):
if isinstance(obj_with_refs, list):
for i, item in enumerate(obj_with_refs):
obj_with_refs[i] = resolve_refs(item, base_path)
elif isinstance(obj_with_refs, dict):
if '$ref' in obj_with_refs.keys():
ref = obj_with_refs.pop('$ref')
resolved_ref = _resolve_ref(ref, base_path)
# the rest of the ref dict contains overrides for the ref. Apply
# those overrides recursively here.
_update_dict_recursively(resolved_ref, obj_with_refs)
return resolved_ref
else:
for key, value in obj_with_refs.items():
obj_with_refs[key] = resolve_refs(value, base_path)
else:
# scalar, nothing to do
pass
return obj_with_refs
def _update_dict_recursively(d, update):
"""Update dict d recursively with data from dict update"""
for k, v in update.items():
if k in d and isinstance(d[k], dict) and isinstance(v, dict):
_update_dict_recursively(d[k], v)
else:
d[k] = v
|
apache-2.0
| 7,998,802,605,201,372,000
| 35.047619
| 78
| 0.625716
| false
| 3.772425
| false
| false
| false
|
claytantor/coinbase4py
|
webapp/settings.py
|
1
|
4533
|
import os
from ConfigParser import RawConfigParser
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.dirname(__file__)
CONF_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
here = lambda x: os.path.join(os.path.abspath(os.path.dirname(__file__)), x)
# you will need to copy the example and make custom
# settings for the environment
config = RawConfigParser()
#place in a dir that is not managed in the code base
# print 'config dir: {0}/conf/gitpatron_settings.ini'.format(CONF_DIR)
config.read('{0}/conf/coinbase4py_settings.ini'.format(CONF_DIR))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.get('secrets','DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.get('debug','DEBUG')
TEMPLATE_DEBUG = config.get('debug','TEMPLATE_DEBUG')
ENVIRONMENT = config.get('base','ENVIRONMENT')
ALLOWED_HOSTS = []
#the database for the app
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'coinbase4py.db'),
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.staticfiles',
'coinbase4py',
'webapp',
'webapp.templatetags',
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages")
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
WSGI_APPLICATION = 'webapp.wsgi.application'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# Additional locations of static files
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, '../', 'static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
PROJECT_DIR + '/../webapp/templates/',
)
USER_ONE=config.get('coinbase4py','USER_ONE')
USER_TWO=config.get('coinbase4py','USER_TWO')
TEST_STATE_DIR=config.get('coinbase4py','TEST_STATE_DIR')
COINBASE4PY_PW_SECRET_KEY=config.get('coinbase4py','COINBASE4PY_PW_SECRET_KEY')
COINBASE_OAUTH_CLIENT_APP=config.get('coinbase','COINBASE_OAUTH_CLIENT_APP')
COINBASE_OAUTH_CLIENT_ID=config.get('coinbase','COINBASE_OAUTH_CLIENT_ID')
COINBASE_OAUTH_CLIENT_SECRET=config.get('coinbase','COINBASE_OAUTH_CLIENT_SECRET')
COINBASE_OAUTH_CLIENT_CALLBACK=config.get('coinbase','COINBASE_OAUTH_CLIENT_CALLBACK')
COINBASE4PY_APP_URL=config.get('coinbase','COINBASE4PY_APP_URL')
COINBASE_ORDER_CALLBACK='{0}/{1}'.format(
config.get('coinbase','COINBASE4PY_APP_URL'),
config.get('coinbase','COINBASE_ORDER_CALLBACK'))
|
apache-2.0
| 6,435,432,027,036,842,000
| 33.869231
| 86
| 0.701302
| false
| 3.591918
| true
| false
| false
|
rddim/Notepad-plus-plus
|
scintilla/qt/ScintillaEdit/WidgetGen.py
|
5
|
8222
|
#!/usr/bin/env python3
# WidgetGen.py - regenerate the ScintillaWidgetCpp.cpp and ScintillaWidgetCpp.h files
# Check that API includes all gtkscintilla2 functions
import sys
import os
import getopt
scintillaDirectory = "../.."
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
import Face
from FileGenerator import GenerateFile
def underscoreName(s):
# Name conversion fixes to match gtkscintilla2
irregular = ['WS', 'EOL', 'AutoC', 'KeyWords', 'BackSpace', 'UnIndents', 'RE', 'RGBA']
for word in irregular:
replacement = word[0] + word[1:].lower()
s = s.replace(word, replacement)
out = ""
for c in s:
if c.isupper():
if out:
out += "_"
out += c.lower()
else:
out += c
return out
def normalisedName(s, options, role=None):
if options["qtStyle"]:
if role == "get":
s = s.replace("Get", "")
return s[0].lower() + s[1:]
else:
return underscoreName(s)
typeAliases = {
"position": "int",
"line": "int",
"pointer": "int",
"colour": "int",
"keymod": "int",
"string": "const char *",
"stringresult": "const char *",
"cells": "const char *",
}
def cppAlias(s):
if s in typeAliases:
return typeAliases[s]
elif Face.IsEnumeration(s):
return "int"
else:
return s
understoodTypes = ["", "void", "int", "bool", "position", "line", "pointer",
"colour", "keymod", "string", "stringresult", "cells"]
def understoodType(t):
return t in understoodTypes or Face.IsEnumeration(t)
def checkTypes(name, v):
understandAllTypes = True
if not understoodType(v["ReturnType"]):
#~ print("Do not understand", v["ReturnType"], "for", name)
understandAllTypes = False
if not understoodType(v["Param1Type"]):
#~ print("Do not understand", v["Param1Type"], "for", name)
understandAllTypes = False
if not understoodType(v["Param2Type"]):
#~ print("Do not understand", v["Param2Type"], "for", name)
understandAllTypes = False
return understandAllTypes
def arguments(v, stringResult, options):
ret = ""
p1Type = cppAlias(v["Param1Type"])
if p1Type == "int":
p1Type = "sptr_t"
if p1Type:
ret = ret + p1Type + " " + normalisedName(v["Param1Name"], options)
p2Type = cppAlias(v["Param2Type"])
if p2Type == "int":
p2Type = "sptr_t"
if p2Type and not stringResult:
if p1Type:
ret = ret + ", "
ret = ret + p2Type + " " + normalisedName(v["Param2Name"], options)
return ret
def printPyFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["val"]:
out.append(name + "=" + v["Value"])
if feat in ["evt"]:
out.append("SCN_" + name.upper() + "=" + v["Value"])
if feat in ["fun"]:
out.append("SCI_" + name.upper() + "=" + v["Value"])
return out
def printHFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
out.append("\t" + returnType + " " + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options)+
")" + constDeclarator + ";")
return out
def methodNames(f, options):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
yield normalisedName(name, options)
def printCPPFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
if checkTypes(name, v):
constDeclarator = " const" if feat == "get" else ""
featureDefineName = "SCI_" + name.upper()
returnType = cppAlias(v["ReturnType"])
if returnType == "int":
returnType = "sptr_t"
stringResult = v["Param2Type"] == "stringresult"
if stringResult:
returnType = "QByteArray"
returnStatement = ""
if returnType != "void":
returnStatement = "return "
out.append(returnType + " ScintillaEdit::" + normalisedName(name, options, feat) + "(" +
arguments(v, stringResult, options) +
")" + constDeclarator + " {")
returns = ""
if stringResult:
returns += " " + returnStatement + "TextReturner(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ");"
else:
returns += " " + returnStatement + "send(" + featureDefineName + ", "
if "*" in cppAlias(v["Param1Type"]):
returns += "(sptr_t)"
if v["Param1Name"]:
returns += normalisedName(v["Param1Name"], options)
else:
returns += "0"
returns += ", "
if "*" in cppAlias(v["Param2Type"]):
returns += "(sptr_t)"
if v["Param2Name"]:
returns += normalisedName(v["Param2Name"], options)
else:
returns += "0"
returns += ");"
out.append(returns)
out.append("}")
out.append("")
return out
def gtkNames():
# The full path on my machine: should be altered for anyone else
p = "C:/Users/Neil/Downloads/wingide-source-4.0.1-1/wingide-source-4.0.1-1/external/gtkscintilla2/gtkscintilla.c"
with open(p) as f:
for l in f.readlines():
if "gtk_scintilla_" in l:
name = l.split()[1][14:]
if '(' in name:
name = name.split('(')[0]
yield name
def usage():
print("WidgetGen.py [-c|--clean][-h|--help][-u|--underscore-names]")
print("")
print("Generate full APIs for ScintillaEdit class and ScintillaConstants.py.")
print("")
print("options:")
print("")
print("-c --clean remove all generated code from files")
print("-h --help display this text")
print("-u --underscore-names use method_names consistent with GTK+ standards")
def readInterface(cleanGenerated):
f = Face.Face()
if not cleanGenerated:
f.ReadFromFile("../../include/Scintilla.iface")
return f
def main(argv):
# Using local path for gtkscintilla2 so don't default to checking
checkGTK = False
cleanGenerated = False
qtStyleInterface = True
# The --gtk-check option checks for full coverage of the gtkscintilla2 API but
# depends on a particular directory so is not mentioned in --help.
opts, args = getopt.getopt(argv, "hcgu", ["help", "clean", "gtk-check", "underscore-names"])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-g", "--gtk-check"):
checkGTK = True
elif opt in ("-u", "--underscore-names"):
qtStyleInterface = False
options = {"qtStyle": qtStyleInterface}
f = readInterface(cleanGenerated)
try:
GenerateFile("ScintillaEdit.cpp.template", "ScintillaEdit.cpp",
"/* ", True, printCPPFile(f, options))
GenerateFile("ScintillaEdit.h.template", "ScintillaEdit.h",
"/* ", True, printHFile(f, options))
GenerateFile("../ScintillaEditPy/ScintillaConstants.py.template",
"../ScintillaEditPy/ScintillaConstants.py",
"# ", True, printPyFile(f, options))
if checkGTK:
names = set(methodNames(f))
#~ print("\n".join(names))
namesGtk = set(gtkNames())
for name in namesGtk:
if name not in names:
print(name, "not found in Qt version")
for name in names:
if name not in namesGtk:
print(name, "not found in GTK+ version")
except:
raise
if cleanGenerated:
for file in ["ScintillaEdit.cpp", "ScintillaEdit.h", "../ScintillaEditPy/ScintillaConstants.py"]:
try:
os.remove(file)
except OSError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-3.0
| -2,808,446,952,764,096,500
| 28.679104
| 114
| 0.610071
| false
| 3.096798
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.