repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wji/plenarnavi_backend
|
data/UUID.py
|
1
|
1035
|
from sqlalchemy.types import TypeDecorator, CHAR
from sqlalchemy.dialects.postgresql import UUID
import uuid
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
|
gpl-3.0
| 666,983,228,280,939,600
| 27.777778
| 53
| 0.590338
| false
| 4.539474
| false
| false
| false
|
HPCGISLab/NAWS
|
workflow.py
|
1
|
7938
|
#!/usr/bin/python
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook (eshook@kent.edu)
"""
import os
import datetime
import time
import re
import subprocess
from Queue import Queue
#from threading import Thread
import threading
import sys,getopt
'''
The workflow script accepts a tasklist file, which contains a list of taskfiles.
A task may represent a simulation of an ABM or climate model. Tasks can be run
simultaneously if there are no dependencies or ordered in the case of
dependencies. Tasks may also include pre-processing or post-processing tasks.
'''
# TODO: Logging may be useful if the workflow becomes long
# TODO: Currently num_threads is user-defined, which controls the number of threads to launch tasks
# However, it would be better to include in the taskfile the number of cores needed
# and define the number of cores available, enabling the workflow system to manage core allocation
# Global variables
# The number of threads used to handle tasks is passed as a parameter
num_threads=0
# Array of threads (so they can be killed if needed)
threads=[]
# Array of task workflow numbers (one per thread/worker)
threadtasknums=[]
# Task queue
taskqueue=Queue()
# This function handles executing a task defined by a taskfile
def runtask(taskfile):
# Read and parse the taskfile with the following format
# Note additional parameters will likely be added based on need (e.g., CWD, data-dir)
'''
program: /path/to/executable_with_a_name
parameters: param1 -Optionalconfiguration param2 -AnotherParameter
'''
with open(taskfile,'r') as f:
# Set the required parameters as None for error checking at the end
program=None
parameters=None
for line in f:
if line.startswith("program:"):
# Extract the entire program location from after the colon split()[1]) with whitespace removed (strip())
program=line.split(":",1)[1].strip()
#print "Program="+program
if line.startswith("parameters:"):
# Extract the parameter string from after the colon split()[1]) with whitespace removed (strip())
parameters=line.split(":",1)[1].strip()
#print "Parameters="+parameters
# Error checking for required parameters
if program==None:
raise Exception("program missing in taskfile",taskfile)
if parameters==None:
raise Exception("parameters missing in taskfile",taskfile)
print "Calling program="+program,parameters
'''
In future versions that have defined input,output,stdout,etc.
there could be more logic here to:
- run each model in a defined directory
- output stdout,stderr in the directory
- package up output files for easier transfer
- ...
'''
returncode=subprocess.check_call(program+" "+parameters,shell=True)
# A task worker loops while there are tasks left in the taskqueue
# Input parameter is a thread id (tid)
def taskworker(tid):
while not taskqueue.empty():
taskfile=taskqueue.get()
tasknum=taskfile.split("/",1)[1].split(".",1)[0].strip()
tasknum=re.sub("\D", "", tasknum)
#print "tid=",tid
threadtasknums[tid]=int(tasknum)
# While there is a dependency problem (lower order task numbers are still being processed)
# then spintwait
mintasknum=min(threadtasknums)
while threadtasknums[tid]>mintasknum:
#print "min=",minthreadtasknum,"min(array)=",min(*threadtasknums),"nums[",i,"]=",threadtasknums[i]
#if(threadtasknums[tid]<=min(*threadtasknums)): # If this task number is less than or equal to the minimum
# break # then there are no dependencies, so you can break out of this infinite loop
time.sleep(1) # this is a spin-wait loop
mintasknum=min(*threadtasknums)
print "Thread",tid,"running",taskfile,"at",str(datetime.datetime.now())
try:
runtask(taskfile)
except:
exit(1)
taskqueue.task_done()
threadtasknums[tid]=999999 # Set the tasknum for tid to 9999 so it doesn't influence dependencies
print "Thread",tid,"quitting, because taskqueue is empty"
# Main program code
def main():
print "Starting node workflow"
try:
opts,args=getopt.getopt(sys.argv[1:],"n:t:",["numthreads=","tasklist="])
except getopt.GetoptError:
print "workflow.py -n <number of threads to launch> -t <tasklistfile>"
sys.exit(1)
# Set model filename and experiment name based on command-line parameter
num_threads=0
tasklistfile=""
for opt, arg in opts:
if opt in ("-n", "--numthreads"):
num_threads=int(arg)
if opt in ("-t", "--tasklist"):
tasklistfile=arg
err=0
if num_threads<=0:
print " [ ERROR ] Number of threads must be greater than 0"
err=1
if tasklistfile=="":
print " [ ERROR ] Must provide tasklistfile"
err=1
if err==1:
print "workflow.py -n <number of threads to launch> -t <tasklistfile>"
sys.exit(1)
print "Executing in current directory :",os.getcwd()
print "Reading tasklist file"
with open(tasklistfile,'r') as f:
taskfiles = f.readlines()
f.close()
# tasksdir = 'tasks/'
# taskfiles = os.listdir(tasksdir) # Contains a list of task files to process
taskfiles.sort()
print "Starting task queue"
for taskfile in taskfiles:
taskqueue.put(taskfile.strip())
print "Task queue contains ",taskqueue.qsize()," tasks"
# Start the workflow engine
# Currently the logic is simple -> one task==one thread==one core but that will need
# to be modified to account for multithreaded models (agent-based and climate)
# so eventually this will need to parse the task to determine the number of cores
# needed by the task and dynamically manage the number of tasks running simultaneously
print "Starting ",num_threads," threads"
for i in range(num_threads):
threadtasknums.append(-1)
t=threading.Thread(target=taskworker,args=(i,))
t.daemon=True
t.setDaemon(True)
t.start()
threads.append(t)
# Now we wait until all of the tasks are finished.
print "Waiting for threads to finish"
# Normally you can use a blocking .join, but then you cannot kill the process
# So instead we spin-wait and catch ^C so a user can kill this process.
# while threading.activeCount() > 0:
# time.sleep(20)
while taskqueue.qsize()>0:
time.sleep(1)
print "taskqueue size",taskqueue.qsize()
''' # FIXME: Need to clean up this code, which was used for testing ^C
try:
time.sleep(5) # Wait 5 seconds before checking again
# FIXME: In production this should be changed to 30
# If Ctrl+C or other error, kill all of the threads
except:
while not taskqueue.empty(): # Empty the queue
taskqueue.get()
for i in threads:
i.kill_received=True
i.kill()
exit(1)
'''
print "Joining taskqueue"
# At this point all of the tasks should be finished so we join them
notfinished=1
while notfinished==1:
notfinished=0
for i in range(num_threads):
if threadtasknums[i]<999999:
notfinished=1
time.sleep(1)
#while not taskqueue.join(1):
# time.sleep(1)
print "Finished node workflow"
# Run main
if __name__=="__main__":
main()
|
bsd-3-clause
| 6,810,888,699,355,148,000
| 35.75
| 120
| 0.644621
| false
| 4.11722
| false
| false
| false
|
ecolell/aquire
|
setup.py
|
1
|
2553
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import os
import subprocess
from setuptools.command import easy_install
def parse_requirements(filename):
return list(filter(lambda line: (line.strip())[0] != '#',
[line.strip() for line in open(filename).readlines()]))
def calculate_version():
# Fetch version from git tags, and write to version.py.
# Also, when git is not available (PyPi package), use stored version.py.
version_py = os.path.join(os.path.dirname(__file__), 'version.py')
try:
version_git = subprocess.check_output(["git", "describe"]).rstrip()
except Exception:
with open(version_py, 'r') as fh:
version_git = (open(version_py).read()
.strip().split('=')[-1].replace('"', ''))
version_msg = ('# Do not edit this file, pipeline versioning is '
'governed by git tags')
with open(version_py, 'w') as fh:
fh.write(version_msg + os.linesep + "__version__=" + version_git)
return version_git
requirements = parse_requirements('requirements.txt')
version_git = calculate_version()
def get_long_description():
readme_file = 'README.md'
if not os.path.isfile(readme_file):
return ''
# Try to transform the README from Markdown to reStructuredText.
try:
easy_install.main(['-U', 'pyandoc==0.0.1'])
import pandoc
pandoc.core.PANDOC_PATH = 'pandoc'
doc = pandoc.Document()
doc.markdown = open(readme_file).read()
description = doc.rst
except Exception:
description = open(readme_file).read()
return description
setup(
name='aquire',
version=version_git,
author=u'Eloy Adonis Colell',
author_email='eloy.colell@gmail.com',
packages=['aquire'],
url='https://github.com/ecolell/aquire',
license='MIT',
description=('A python library that allow to download files '
'from internet and show progress to the console.'),
long_description=get_long_description(),
zip_safe=True,
install_requires=requirements,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: File Transfer Protocol (FTP)",
],
)
|
mit
| 4,310,986,713,263,461,400
| 33.04
| 78
| 0.614179
| false
| 3.915644
| false
| false
| false
|
crispycret/crispys_webkit
|
tests/urls.py
|
1
|
3789
|
import unittest
from crispys_webkit.urls import LazyUrl
stackoverflow_url = 'http://stackoverflow.com/'
def create_stackoverflow_lazyurl():
return LazyUrl(stackoverflow_url)
class LazyUrlMixin(object):
def check_stackoverflow_url(self, url):
self.assertEqual(url.scheme, 'http')
self.assertEqual(url.host, 'stackoverflow.com')
self.assertEqual(url.path, '/')
self.assertEqual(str(url), 'http://stackoverflow.com/')
class LazyUrlCreationTests(LazyUrlMixin, unittest.TestCase):
#### Object Instantiation ################################
def test_create_lazy_url(self):
""" Create a normal LazyUrl """
url = LazyUrl('http://stackoverflow.com/')
self.check_stackoverflow_url(url)
def test_create_lazy_url_with_bad_scheme(self):
""" use a scheme that is not allowed """
url = LazyUrl('ftp://stackoverflow.com')
self.check_stackoverflow_url(url)
def test_create_lazy_url_with_no_scheme(self):
""" don't use a scheme """
url = LazyUrl('stackoverflow.com')
self.check_stackoverflow_url(url)
##########################################################
class LazyUrlGetSetTests(LazyUrlMixin, unittest.TestCase):
#### Set Methods #########################################
def test_set_scheme_with_bad_scheme(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_scheme('ssh')
self.assertEqual(url.scheme, 'http')
self.assertEqual(str(url), 'http://stackoverflow.com/')
def test_set_scheme_with_good_scheme(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_scheme('https')
self.assertEqual(url.scheme, 'https')
self.assertEqual(str(url), 'https://stackoverflow.com/')
def test_set_host(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_host('news.ycombinator.com')
self.assertEqual(str(url), 'http://news.ycombinator.com/')
def test_set_path(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_path('/user/1234/crispycret')
self.assertIn(stackoverflow_url, str(url))
self.assertEqual(url.path, '/user/1234/crispycret')
self.assertEqual(str(url), 'http://stackoverflow.com/user/1234/crispycret')
def test_set_params(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_params('price')
self.assertEqual(str(url), 'http://stackoverflow.com/;price')
def test_set_query(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_query('id=123')
self.assertEqual(str(url), 'http://stackoverflow.com/?id=123')
def test_set_fragment(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_fragment('someLabel')
self.assertIn(stackoverflow_url, str(url))
self.assertEqual(url.fragment, 'someLabel')
self.assertEqual(str(url), 'http://stackoverflow.com/#someLabel')
##########################################################
class LazyUrlMethodTests(LazyUrlMixin, unittest.TestCase):
def test_get_full_path(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_path('question/55555/SomeQuestion')
url.set_fragment('bookmark')
self.assertEqual(url.get_full_path(), '/question/55555/SomeQuestion#bookmark')
def test_clear_full_path(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_scheme('https')
url.set_path('question/55555/SomeQuestion')
url.set_params('details')
url.set_query('id=1')
url.set_fragment('bookmark')
self.assertEqual(str(url), 'https://stackoverflow.com/question/55555/SomeQuestion;details?id=1#bookmark')
url.clear_full_path()
self.assertEqual(str(url), 'https://stackoverflow.com/')
if __name__ == '__main__':
unittest.main()
|
mit
| 4,662,887,234,070,625,000
| 31.393162
| 107
| 0.691475
| false
| 3.235696
| true
| false
| false
|
Pointedstick/ReplicatorG
|
skein_engines/skeinforge-44/fabmetheus_utilities/xml_simple_reader.py
|
1
|
25544
|
"""
The xml_simple_reader.py script is an xml parser that can parse a line separated xml text.
This xml parser will read a line seperated xml text and produce a tree of the xml with a document element. Each element can have an attribute table, childNodes, a class name, parentNode, text and a link to the document element.
This example gets an xml tree for the xml file boolean.xml. This example is run in a terminal in the folder which contains boolean.xml and xml_simple_reader.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> fileName = 'boolean.xml'
>>> file = open(fileName, 'r')
>>> xmlText = file.read()
>>> file.close()
>>> from xml_simple_reader import DocumentNode
>>> xmlParser = DocumentNode(fileName, xmlText)
>>> print( xmlParser )
?xml, {'version': '1.0'}
ArtOfIllusion, {'xmlns:bf': '//babelfiche/codec', 'version': '2.0', 'fileversion': '3'}
Scene, {'bf:id': 'theScene'}
materials, {'bf:elem-type': 'java.lang.Object', 'bf:list': 'collection', 'bf:id': '1', 'bf:type': 'java.util.Vector'}
..
many more lines of the xml tree
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import xml_simple_writer
import cStringIO
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalGetAccessibleAttributeSet = set('getPaths getPreviousVertex getPreviousElementNode getVertexes parentNode'.split())
def createAppendByText(parentNode, xmlText):
'Create and append the child nodes from the xmlText.'
monad = OpenMonad(parentNode)
for character in xmlText:
monad = monad.getNextMonad(character)
def createAppendByTextb(parentNode, xmlText):
'Create and append the child nodes from the xmlText.'
monad = OpenMonad(parentNode)
for character in xmlText:
monad = monad.getNextMonad(character)
def getDocumentNode(fileName):
'Get the document from the file name.'
xmlText = getFileText('test.xml')
return DocumentNode(fileName, xmlText)
def getFileText(fileName, printWarning=True, readMode='r'):
'Get the entire text of a file.'
try:
file = open(fileName, readMode)
fileText = file.read()
file.close()
return fileText
except IOError:
if printWarning:
print('The file ' + fileName + ' does not exist.')
return ''
class CDATASectionMonad:
'A monad to handle a CDATASection node.'
def __init__(self, input, parentNode):
'Initialize.'
self.input = input
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
self.input.write(character)
if character == '>':
inputString = self.input.getvalue()
if inputString.endswith(']]>'):
textContent = '<%s\n' % inputString
self.parentNode.childNodes.append(CDATASectionNode(self.parentNode, textContent))
return OpenMonad(self.parentNode)
return self
class CDATASectionNode:
'A CDATASection node.'
def __init__(self, parentNode, textContent=''):
'Initialize.'
self.parentNode = parentNode
self.textContent = textContent
def __repr__(self):
'Get the string representation of this CDATASection node.'
return self.textContent
def addToIdentifierDictionaries(self):
'Add the element to the owner document identifier dictionaries.'
pass
def addXML(self, depth, output):
'Add xml for this CDATASection node.'
output.write(self.textContent)
def appendSelfToParent(self):
'Append self to the parentNode.'
self.parentNode.appendChild(self)
def copyXMLChildNodes(self, idSuffix, parentNode):
'Copy the xml childNodes.'
pass
def getAttributes(self):
'Get the attributes.'
return {}
def getChildNodes(self):
'Get the empty set.'
return []
def getCopy(self, idSuffix, parentNode):
'Copy the xml element, set its dictionary and add it to the parentNode.'
copy = self.getCopyShallow()
copy.parentNode = parentNode
copy.appendSelfToParent()
return copy
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return CDATASectionNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#cdata-section'
def getNodeType(self):
'Get the node type.'
return 4
def getOwnerDocument(self):
'Get the owner document.'
return self.parentNode.getOwnerDocument()
def getTextContent(self):
'Get the text content.'
return self.textContent
def removeChildNodesFromIDNameParent(self):
'Remove the childNodes from the id and name dictionaries and the childNodes.'
pass
def removeFromIDNameParent(self):
'Remove this from the id and name dictionaries and the childNodes of the parentNode.'
if self.parentNode != None:
self.parentNode.childNodes.remove(self)
def setParentAddToChildNodes(self, parentNode):
'Set the parentNode and add this to its childNodes.'
self.parentNode = parentNode
if self.parentNode != None:
self.parentNode.childNodes.append(self)
attributes = property(getAttributes)
childNodes = property(getChildNodes)
nodeName = property(getNodeName)
nodeType = property(getNodeType)
ownerDocument = property(getOwnerDocument)
class CommentMonad(CDATASectionMonad):
'A monad to handle a comment node.'
def getNextMonad(self, character):
'Get the next monad.'
self.input.write(character)
if character == '>':
inputString = self.input.getvalue()
if inputString.endswith('-->'):
textContent = '<%s\n' % inputString
self.parentNode.childNodes.append(CommentNode(self.parentNode, textContent))
return OpenMonad(self.parentNode)
return self
class CommentNode(CDATASectionNode):
'A comment node.'
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return CommentNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#comment'
def getNodeType(self):
'Get the node type.'
return 8
nodeName = property(getNodeName)
nodeType = property(getNodeType)
class DocumentNode:
'A class to parse an xml text and store the elements.'
def __init__(self, fileName, xmlText):
'Initialize.'
self.childNodes = []
self.fileName = fileName
self.idDictionary = {}
self.nameDictionary = {}
self.parentNode = None
self.tagDictionary = {}
self.xmlText = xmlText
createAppendByText(self, xmlText)
def __repr__(self):
'Get the string representation of this xml document.'
output = cStringIO.StringIO()
for childNode in self.childNodes:
childNode.addXML(0, output)
return output.getvalue()
def appendChild(self, elementNode):
'Append child elementNode to the child nodes.'
self.childNodes.append(elementNode)
elementNode.addToIdentifierDictionaries()
return elementNode
def getAttributes(self):
'Get the attributes.'
return {}
def getCascadeBoolean(self, defaultBoolean, key):
'Get the cascade boolean.'
return defaultBoolean
def getCascadeFloat(self, defaultFloat, key):
'Get the cascade float.'
return defaultFloat
def getDocumentElement(self):
'Get the document element.'
if len(self.childNodes) == 0:
return None
return self.childNodes[-1]
def getImportNameChain(self, suffix=''):
'Get the import name chain with the suffix at the end.'
return suffix
def getNodeName(self):
'Get the node name.'
return '#document'
def getNodeType(self):
'Get the node type.'
return 9
def getOriginalRoot(self):
'Get the original reparsed document element.'
if evaluate.getEvaluatedBoolean(True, self.documentElement, 'getOriginalRoot'):
return DocumentNode(self.fileName, self.xmlText).documentElement
return None
def getOwnerDocument(self):
'Get the owner document.'
return self
attributes = property(getAttributes)
documentElement = property(getDocumentElement)
nodeName = property(getNodeName)
nodeType = property(getNodeType)
ownerDocument = property(getOwnerDocument)
class DocumentTypeMonad(CDATASectionMonad):
'A monad to handle a document type node.'
def getNextMonad(self, character):
'Get the next monad.'
self.input.write(character)
if character == '>':
inputString = self.input.getvalue()
if inputString.endswith('?>'):
textContent = '%s\n' % inputString
self.parentNode.childNodes.append(DocumentTypeNode(self.parentNode, textContent))
return OpenMonad(self.parentNode)
return self
class DocumentTypeNode(CDATASectionNode):
'A document type node.'
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return DocumentTypeNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#forNowDocumentType'
def getNodeType(self):
'Get the node type.'
return 10
nodeName = property(getNodeName)
nodeType = property(getNodeType)
class ElementEndMonad:
'A monad to look for the end of an ElementNode tag.'
def __init__(self, parentNode):
'Initialize.'
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '>':
return TextMonad(self.parentNode)
return self
class ElementLocalNameMonad:
'A monad to set the local name of an ElementNode.'
def __init__(self, character, parentNode):
'Initialize.'
self.input = cStringIO.StringIO()
self.input.write(character)
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '[':
if (self.input.getvalue() + character).startswith('![CDATA['):
self.input.write(character)
return CDATASectionMonad(self.input, self.parentNode)
if character == '-':
if (self.input.getvalue() + character).startswith('!--'):
self.input.write(character)
return CommentMonad(self.input, self.parentNode)
if character.isspace():
self.setLocalName()
return ElementReadMonad(self.elementNode)
if character == '/':
self.setLocalName()
self.elementNode.appendSelfToParent()
return ElementEndMonad(self.elementNode.parentNode)
if character == '>':
self.setLocalName()
self.elementNode.appendSelfToParent()
return TextMonad(self.elementNode)
self.input.write(character)
return self
def setLocalName(self):
'Set the class name.'
self.elementNode = ElementNode(self.parentNode)
self.elementNode.localName = self.input.getvalue().lower().strip()
class ElementNode:
'An xml element.'
def __init__(self, parentNode=None):
'Initialize.'
self.attributes = {}
self.childNodes = []
self.localName = ''
self.parentNode = parentNode
self.xmlObject = None
def __repr__(self):
'Get the string representation of this xml document.'
return '%s\n%s\n%s' % (self.localName, self.attributes, self.getTextContent())
def _getAccessibleAttribute(self, attributeName):
'Get the accessible attribute.'
global globalGetAccessibleAttributeSet
if attributeName in globalGetAccessibleAttributeSet:
return getattr(self, attributeName, None)
return None
def addSuffixToID(self, idSuffix):
'Add the suffix to the id.'
if 'id' in self.attributes:
self.attributes['id'] += idSuffix
def addToIdentifierDictionaries(self):
'Add the element to the owner document identifier dictionaries.'
ownerDocument = self.getOwnerDocument()
importNameChain = self.getImportNameChain()
idKey = self.getStrippedAttributesValue('id')
if idKey != None:
ownerDocument.idDictionary[importNameChain + idKey] = self
nameKey = self.getStrippedAttributesValue('name')
if nameKey != None:
euclidean.addElementToListDictionaryIfNotThere(self, importNameChain + nameKey, ownerDocument.nameDictionary)
for tagKey in self.getTagKeys():
euclidean.addElementToListDictionaryIfNotThere(self, tagKey, ownerDocument.tagDictionary)
def addXML(self, depth, output):
'Add xml for this elementNode.'
innerOutput = cStringIO.StringIO()
xml_simple_writer.addXMLFromObjects(depth + 1, self.childNodes, innerOutput)
innerText = innerOutput.getvalue()
xml_simple_writer.addBeginEndInnerXMLTag(self.attributes, depth, innerText, self.localName, output, self.getTextContent())
def appendChild(self, elementNode):
'Append child elementNode to the child nodes.'
self.childNodes.append(elementNode)
elementNode.addToIdentifierDictionaries()
return elementNode
def appendSelfToParent(self):
'Append self to the parentNode.'
self.parentNode.appendChild(self)
def copyXMLChildNodes(self, idSuffix, parentNode):
'Copy the xml childNodes.'
for childNode in self.childNodes:
childNode.getCopy(idSuffix, parentNode)
def getCascadeBoolean(self, defaultBoolean, key):
'Get the cascade boolean.'
if key in self.attributes:
value = evaluate.getEvaluatedBoolean(None, self, key)
if value != None:
return value
return self.parentNode.getCascadeBoolean(defaultBoolean, key)
def getCascadeFloat(self, defaultFloat, key):
'Get the cascade float.'
if key in self.attributes:
value = evaluate.getEvaluatedFloat(None, self, key)
if value != None:
return value
return self.parentNode.getCascadeFloat(defaultFloat, key)
def getChildNodesByLocalName(self, localName):
'Get the childNodes which have the given class name.'
childNodesByLocalName = []
for childNode in self.childNodes:
if localName.lower() == childNode.getNodeName():
childNodesByLocalName.append(childNode)
return childNodesByLocalName
def getChildNodesByLocalNameRecursively(self, localName):
'Get the childNodes which have the given class name recursively.'
childNodesByLocalName = self.getChildNodesByLocalName(localName)
for childNode in self.childNodes:
childNodesByLocalName += childNode.getChildNodesByLocalNameRecursively(localName)
return childNodesByLocalName
def getCopy(self, idSuffix, parentNode):
'Copy the xml element, set its dictionary and add it to the parentNode.'
matrix4X4 = matrix.getBranchMatrixSetElementNode(self)
attributesCopy = self.attributes.copy()
attributesCopy.update(matrix4X4.getAttributes('matrix.'))
copy = self.getCopyShallow(attributesCopy)
copy.setParentAddToChildNodes(parentNode)
copy.addSuffixToID(idSuffix)
copy.addToIdentifierDictionaries()
self.copyXMLChildNodes(idSuffix, copy)
return copy
def getCopyShallow(self, attributes=None):
'Copy the xml element and set its dictionary and parentNode.'
if attributes == None: # to evade default initialization bug where a dictionary is initialized to the last dictionary
attributes = {}
copyShallow = ElementNode(self.parentNode)
copyShallow.attributes = attributes
copyShallow.localName = self.localName
return copyShallow
def getDocumentElement(self):
'Get the document element.'
return self.getOwnerDocument().getDocumentElement()
def getElementNodeByID(self, idKey):
'Get the xml element by id.'
idDictionary = self.getOwnerDocument().idDictionary
idKey = self.getImportNameChain() + idKey
if idKey in idDictionary:
return idDictionary[idKey]
return None
def getElementNodesByName(self, nameKey):
'Get the xml elements by name.'
nameDictionary = self.getOwnerDocument().nameDictionary
nameKey = self.getImportNameChain() + nameKey
if nameKey in nameDictionary:
return nameDictionary[nameKey]
return None
def getElementNodesByTag(self, tagKey):
'Get the xml elements by tag.'
tagDictionary = self.getOwnerDocument().tagDictionary
if tagKey in tagDictionary:
return tagDictionary[tagKey]
return None
def getFirstChildByLocalName(self, localName):
'Get the first childNode which has the given class name.'
for childNode in self.childNodes:
if localName.lower() == childNode.getNodeName():
return childNode
return None
def getIDSuffix(self, elementIndex=None):
'Get the id suffix from the dictionary.'
suffix = self.localName
if 'id' in self.attributes:
suffix = self.attributes['id']
if elementIndex == None:
return '_%s' % suffix
return '_%s_%s' % (suffix, elementIndex)
def getImportNameChain(self, suffix=''):
'Get the import name chain with the suffix at the end.'
importName = self.getStrippedAttributesValue('_importName')
if importName != None:
suffix = '%s.%s' % (importName, suffix)
return self.parentNode.getImportNameChain(suffix)
def getNodeName(self):
'Get the node name.'
return self.localName
def getNodeType(self):
'Get the node type.'
return 1
def getOwnerDocument(self):
'Get the owner document.'
return self.parentNode.getOwnerDocument()
def getParser(self):
'Get the parser.'
return self.getOwnerDocument()
def getPaths(self):
'Get all paths.'
if self.xmlObject == None:
return []
return self.xmlObject.getPaths()
def getPreviousElementNode(self):
'Get previous ElementNode if it exists.'
if self.parentNode == None:
return None
previousElementNodeIndex = self.parentNode.childNodes.index(self) - 1
if previousElementNodeIndex < 0:
return None
return self.parentNode.childNodes[previousElementNodeIndex]
def getPreviousVertex(self, defaultVector3=None):
'Get previous vertex if it exists.'
if self.parentNode == None:
return defaultVector3
if self.parentNode.xmlObject == None:
return defaultVector3
if len(self.parentNode.xmlObject.vertexes) < 1:
return defaultVector3
return self.parentNode.xmlObject.vertexes[-1]
def getStrippedAttributesValue(self, keyString):
'Get the stripped attribute value if the length is at least one, otherwise return None.'
if keyString in self.attributes:
strippedAttributesValue = self.attributes[keyString].strip()
if len(strippedAttributesValue) > 0:
return strippedAttributesValue
return None
def getSubChildWithID( self, idReference ):
'Get the childNode which has the idReference.'
for childNode in self.childNodes:
if 'bf:id' in childNode.attributes:
if childNode.attributes['bf:id'] == idReference:
return childNode
subChildWithID = childNode.getSubChildWithID( idReference )
if subChildWithID != None:
return subChildWithID
return None
def getTagKeys(self):
'Get stripped tag keys.'
if 'tags' not in self.attributes:
return []
tagKeys = []
tagString = self.attributes['tags']
if tagString.startswith('='):
tagString = tagString[1 :]
if tagString.startswith('['):
tagString = tagString[1 :]
if tagString.endswith(']'):
tagString = tagString[: -1]
for tagWord in tagString.split(','):
tagKey = tagWord.strip()
if tagKey != '':
tagKeys.append(tagKey)
return tagKeys
def getTextContent(self):
'Get the text from the child nodes.'
if len(self.childNodes) == 0:
return ''
firstNode = self.childNodes[0]
if firstNode.nodeType == 3:
return firstNode.textContent
return ''
def getValueByKey( self, key ):
'Get value by the key.'
if key in evaluate.globalElementValueDictionary:
return evaluate.globalElementValueDictionary[key](self)
if key in self.attributes:
return evaluate.getEvaluatedLinkValue(self, self.attributes[key])
return None
def getVertexes(self):
'Get the vertexes.'
if self.xmlObject == None:
return []
return self.xmlObject.getVertexes()
def getXMLProcessor(self):
'Get the xmlProcessor.'
return self.getDocumentElement().xmlProcessor
def linkObject(self, xmlObject):
'Link self to xmlObject and add xmlObject to archivableObjects.'
self.xmlObject = xmlObject
self.xmlObject.elementNode = self
self.parentNode.xmlObject.archivableObjects.append(self.xmlObject)
def printAllVariables(self):
'Print all variables.'
print('attributes')
print(self.attributes)
print('childNodes')
print(self.childNodes)
print('localName')
print(self.localName)
print('parentNode')
print(self.parentNode.getNodeName())
print('text')
print(self.getTextContent())
print('xmlObject')
print(self.xmlObject)
print('')
def printAllVariablesRoot(self):
'Print all variables and the document element variables.'
self.printAllVariables()
documentElement = self.getDocumentElement()
if documentElement != None:
print('')
print('Root variables:')
documentElement.printAllVariables()
def removeChildNodesFromIDNameParent(self):
'Remove the childNodes from the id and name dictionaries and the childNodes.'
childNodesCopy = self.childNodes[:]
for childNode in childNodesCopy:
childNode.removeFromIDNameParent()
def removeFromIDNameParent(self):
'Remove this from the id and name dictionaries and the childNodes of the parentNode.'
self.removeChildNodesFromIDNameParent()
idKey = self.getStrippedAttributesValue('id')
if idKey != None:
idDictionary = self.getOwnerDocument().idDictionary
idKey = self.getImportNameChain() + idKey
if idKey in idDictionary:
del idDictionary[idKey]
nameKey = self.getStrippedAttributesValue('name')
if nameKey != None:
euclidean.removeElementFromListTable(self, self.getImportNameChain() + nameKey, self.getOwnerDocument().nameDictionary)
for tagKey in self.getTagKeys():
euclidean.removeElementFromListTable(self, tagKey, self.getOwnerDocument().tagDictionary)
if self.parentNode != None:
self.parentNode.childNodes.remove(self)
def setParentAddToChildNodes(self, parentNode):
'Set the parentNode and add this to its childNodes.'
self.parentNode = parentNode
if self.parentNode != None:
self.parentNode.childNodes.append(self)
def setTextContent(self, textContent=''):
'Get the text from the child nodes.'
if len(self.childNodes) == 0:
self.childNodes.append(TextNode(self, textContent))
return
firstNode = self.childNodes[0]
if firstNode.nodeType == 3:
firstNode.textContent = textContent
self.childNodes.append(TextNode(self, textContent))
nodeName = property(getNodeName)
nodeType = property(getNodeType)
ownerDocument = property(getOwnerDocument)
textContent = property(getTextContent)
class ElementReadMonad:
'A monad to read the attributes of the ElementNode tag.'
def __init__(self, elementNode):
'Initialize.'
self.elementNode = elementNode
def getNextMonad(self, character):
'Get the next monad.'
if character.isspace():
return self
if character == '/':
self.elementNode.appendSelfToParent()
return ElementEndMonad(self.elementNode.parentNode)
if character == '>':
self.elementNode.appendSelfToParent()
return TextMonad(self.elementNode)
return KeyMonad(character, self.elementNode)
class KeyMonad:
'A monad to set the key of an attribute of an ElementNode.'
def __init__(self, character, elementNode):
'Initialize.'
self.input = cStringIO.StringIO()
self.input.write(character)
self.elementNode = elementNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '=':
return ValueMonad(self.elementNode, self.input.getvalue().strip())
self.input.write(character)
return self
class OpenChooseMonad(ElementEndMonad):
'A monad to choose the next monad.'
def getNextMonad(self, character):
'Get the next monad.'
if character.isspace():
return self
if character == '?':
input = cStringIO.StringIO()
input.write('<?')
return DocumentTypeMonad(input, self.parentNode)
if character == '/':
return ElementEndMonad(self.parentNode.parentNode)
return ElementLocalNameMonad(character, self.parentNode)
class OpenMonad(ElementEndMonad):
'A monad to handle the open tag character.'
def getNextMonad(self, character):
'Get the next monad.'
if character == '<':
return OpenChooseMonad(self.parentNode)
return self
class TextMonad:
'A monad to handle the open tag character and set the text.'
def __init__(self, parentNode):
'Initialize.'
self.input = cStringIO.StringIO()
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '<':
inputString = self.input.getvalue().strip()
if len(inputString) > 0:
self.parentNode.childNodes.append(TextNode(self.parentNode, inputString))
return OpenChooseMonad(self.parentNode)
self.input.write(character)
return self
class TextNode(CDATASectionNode):
'A text node.'
def addXML(self, depth, output):
'Add xml for this text node.'
pass
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return TextNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#text'
def getNodeType(self):
'Get the node type.'
return 3
nodeName = property(getNodeName)
nodeType = property(getNodeType)
class ValueMonad:
'A monad to set the value of an attribute of an ElementNode.'
def __init__(self, elementNode, key):
'Initialize.'
self.elementNode = elementNode
self.input = cStringIO.StringIO()
self.key = key
self.quoteCharacter = None
def getNextMonad(self, character):
'Get the next monad.'
if self.quoteCharacter == None:
if character == '"' or character == "'":
self.quoteCharacter = character
return self
if self.quoteCharacter == character:
self.elementNode.attributes[self.key] = self.input.getvalue()
return ElementReadMonad(self.elementNode)
self.input.write(character)
return self
|
gpl-2.0
| -4,695,995,734,451,366,000
| 29.555024
| 228
| 0.743541
| false
| 3.431028
| false
| false
| false
|
fablabnbg/inkscape-chain-paths
|
setup.py
|
1
|
1767
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sudo zypper in python-setuptools
# http://docs.python.org/2/distutils/setupscript.html#installing-additional-files
#
from __future__ import print_function
import sys,os,glob,re
from distutils.core import setup
from setuptools.command.test import test as TestCommand
import chain_paths # for author(), version()
e = chain_paths.ChainPaths()
m = re.match('(.*)\s+<(.*)>', e.author())
# print('.',['Makefile'] + glob.glob('chain_paths*'))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(name='chain-paths',
version = e.version(),
description='Inkscape extension making long continuous paths',
author=m.groups()[0],
author_email=m.groups()[1],
url='https://github.com/jnweiger/inkscape-chain-paths',
scripts=filter(os.path.isfile, ['chain_paths.py', 'chain_paths.inx', 'README.md' ] ),
packages=['chain-paths'],
license='GPL-2.0',
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
cmdclass={'test': PyTest},
long_description="".join(open('README.md').readlines()),
# tests_require=['pytest', 'scipy'],
#packages=['pyPdf','reportlab.pdfgen','reportlab.lib.colors','pygame.font' ],
#
)
|
gpl-2.0
| 4,223,029,791,495,990,000
| 31.127273
| 91
| 0.624222
| false
| 3.635802
| true
| false
| false
|
haandol/algorithm_in_python
|
tree/check_full_bin_tree.py
|
1
|
1126
|
# http://www.geeksforgeeks.org/check-whether-binary-tree-full-binary-tree-not/
from __init__ import Node
def solution(root):
if not root:
return True
if root.left and not root.right:
return False
if root.right and not root.left:
return False
return solution(root.left) and solution(root.right)
if __name__ == '__main__':
root = Node(1)
print(solution(root))
root = Node(1)
root.left = Node(2)
print(solution(root))
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
print(solution(root))
root = Node(10)
root.left = Node(20)
root.right = Node(30)
root.left.right = Node(40)
root.left.left = Node(50)
root.right.left = Node(60)
root.right.right = Node(70)
root.left.left.left = Node(80)
root.left.left.right = Node(90)
root.left.right.left = Node(80)
root.left.right.right = Node(90)
root.right.left.left = Node(80)
root.right.left.right = Node(90)
root.right.right.left = Node(80)
root.right.right.right = Node(90)
print(solution(root))
|
mit
| 7,824,998,618,597,669,000
| 20.653846
| 78
| 0.615453
| false
| 3.059783
| false
| false
| false
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_10_11] Dr_Jung_Update/pre_bn_state_processing.py
|
1
|
91559
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 5 15:28:13 2014
@author: deokwooj
"""
from __future__ import division # To forace float point division
import numpy as np
from scipy import stats
from scipy.interpolate import interp1d
from sklearn import mixture
#from sklearn.cluster import Ward
from sklearn.cluster import KMeans
import time
##################################################################
# Custom library
##################################################################
from data_tools import *
from shared_constants import *
import pprint
import lib_bnlearn as rbn
def X_INPUT_to_states(xinput,CORR_VAL_OUT=0,PARALLEL = False):
#import pdb;pdb.set_trace()
sinput=np.zeros(xinput.shape)
num_samples=xinput.shape[0]
num_sensors=xinput.shape[1]
if num_samples <num_sensors:
print 'Warning number of samplesa are smaller than number of sensors'
print 'Mapping',xinput.shape, ' marix to discrete states '
for k,samples in enumerate(xinput.T):
obs=samples[:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum= state_retrieval(obs,max_num_cluster=6,est_method='kmean',PARALLEL=PARALLEL,PRINTSHOW=False)
high_peak_label_idx=np.argmax(model.cluster_centers_)
low_peak_label_idx=np.argmin(model.cluster_centers_)
high_peak_idx=np.nonzero(label==high_peak_label_idx)[0]
sinput[high_peak_idx,k]=1
low_peak_idx=np.nonzero(label==low_peak_label_idx)[0]
sinput[low_peak_idx,k]=-1
corr_state_val=[]
if CORR_VAL_OUT==1:
print 'Compute Correlation Score....'
for k,(row1, row2) in enumerate(zip(sinput.T, xinput.T)):
corr_state_val.append(round(stats.pearsonr(row1,row2)[0],3))
corr_state_val=np.array(corr_state_val)
return sinput,corr_state_val
def interpolation_measurement(data_dict,input_names,err_rate=1,sgm_bnd=20):
print 'interploattion starts....'
measurement_point_set=[]
num_of_discrete_val=[]
sampling_interval_set=[]
num_type_set=[]
err_rate=1;sgm_bnd=20
"""
try:
import pdb;pdb.set_trace()
except ValueError:
import pdb;pdb.set_trace()
"""
for i,key_name in enumerate(input_names):
print key_name,'.....'
t_=np.array(data_dict[key_name][2][0])
if len(t_) == 0:
continue
intpl_intv=np.ceil((t_[-1]-t_[0]) /len(t_))
sampling_interval_set.append(intpl_intv)
val_=np.array(data_dict[key_name][2][1])
num_of_discrete_val_temp=len(set(val_))
num_of_discrete_val.append(num_of_discrete_val_temp)
# filtering outlier
# assuming 1% of errors and 30 x standard deviation rules
outlier_idx=outlier_detect(val_,err_rate,sgm_bnd)
if len(outlier_idx)>0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
t_=np.delete(t_,outlier_idx)
val_=np.delete(val_,outlier_idx)
t_new=np.r_[t_[0]:t_[-1]:intpl_intv]
"""
if num_of_discrete_val_temp<MIN_NUM_VAL_FOR_FLOAT:
num_type=INT_TYPE
val_new=fast_nearest_interp(t_new, t_,val_)
else:
num_type=FLOAT_TYPE
val_new = np.interp(t_new, t_,val_)
"""
num_type=check_data_type(data_dict[key_name][2][1])
if num_type==INT_TYPE:
val_new=fast_nearest_interp(t_new, t_,val_)
else:
#num_type=FLOAT_TYPE
val_new = np.interp(t_new, t_,val_)
c=np.vstack([t_new,val_new])
measurement_point_set.append(c)
num_type_set.append(num_type)
print '-----------------------------------------------------------------'
#return measurement_point_set,num_type_set,num_of_discrete_val,sampling_interval_set
return measurement_point_set,np.array(num_type_set)
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def rolling_window_label_mode(label,r_window):
if (r_window/2)==int(r_window/2):
r_window=int(r_window+1)
#raise NameError('length of window size must be odd')
offset=int(r_window/2)
rw_label_temp=stats.mode(rolling_window(label, r_window),1)[0]
head= rw_label_temp[0]*np.ones([offset,1])
body=rw_label_temp
tail= rw_label_temp[-1]*np.ones([offset,1])
rw_label=np.r_[head,body,tail]
return rw_label
def rolling_window_label_binary(label,r_window):
if (r_window/2)==int(r_window/2):
r_window=int(r_window+1)
#raise NameError('length of window size must be odd')
offset=int(r_window/2)
rw_label_temp=np.array([ np.sum(temp)/r_window for temp in rolling_window(label, r_window)])
#import pdb;pdb.set_trace()
# rw_label_temp=stats.mode(rolling_window(label, r_window),1)[0]
head= rw_label_temp[0]*np.ones([offset,1])
body=rw_label_temp
tail= rw_label_temp[-1]*np.ones([offset,1])
rw_label=np.r_[head,body[:,np.newaxis],tail]
return rw_label
"""
def state_retrieval(obs,max_num_cluster=6,est_method='kmean'):
#print '========================================================================='
#print 'Retrieving discrete states from data using ',est_method, ' model...'
#print '========================================================================='
score=np.zeros(max_num_cluster)
model_set=[]
#print 'try ',max_num_cluster, ' clusters..... '
for num_cluster in range(max_num_cluster):
#print 'Try ',num_cluster+1, ' clusters '
#print '-----------------------------------'
if est_method=='kmean':
kmean=KMeans(n_clusters=num_cluster+1).fit(obs)
model_set.append(kmean)
#import pdb;pdb.set_trace()
score[num_cluster]=np.sum(kmean.score(obs))
elif est_method=='gmm':
gmm = mixture.GMM(n_components=num_cluster+1).fit(obs)
model_set.append(gmm)
score[num_cluster]=np.sum(gmm.score(obs))
else:
raise NameError('not supported est_method')
score_err_sum=np.zeros(max_num_cluster)
#print 'Finding knee points of log likelihood...'
for i in range(max_num_cluster):
a_0=score[:(i)]
if len(a_0)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_0)),a_0)
sqr_sum_err0=sum(((slope*np.arange(len(a_0))+ intercept)-a_0)**2)
else:
sqr_sum_err0=0
a_1=score[(i):]
if len(a_1)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_1)),a_1)
sqr_sum_err1=sum(((slope*np.arange(len(a_1))+ intercept)-a_1)**2)
else:
sqr_sum_err1=0
score_err_sum[i]=sqr_sum_err0+sqr_sum_err1
# Optimum number of clusters.
min_idx=np.argmin(score_err_sum)
opt_num_cluster=min_idx+1
#print 'opt_num_cluster: ' , opt_num_cluster
if est_method=='kmean':
label=model_set[min_idx].labels_
elif est_method=='gmm':
label=model_set[min_idx].predict(obs)
else:
raise NameError('not supported est_method')
return label,opt_num_cluster, model_set[min_idx],score,score_err_sum
"""
def cluster_state_retrieval(tup):
obs = tup[0]
num_clusters = tup[1]
est_method = tup[2]
#print 'num clusters = ' + str(num_clusters)
if est_method=='kmean':
kmean=KMeans(n_clusters=num_clusters).fit(obs)
model = kmean
score=compute_log_ll(kmean.labels_,obs)
#score=-1*np.log(-1*np.sum(kmean.score(obs)))
elif est_method=='gmm':
gmm = mixture.GMM(n_components=num_clusters).fit(obs)
model = gmm
score=np.sum(gmm.score(obs))
#print 'Done ' + str(num_clusters)
return (num_clusters-1, [model,score])
from multiprocessing import Pool
def compute_log_ll(label_in,obs_in):
log_ll_sum=0
for i in range(label_in.max()+1):
idx=np.nonzero(label_in==i)[0]
val_set=obs_in[idx]
log_val=stats.norm.logpdf(val_set,loc=np.mean(val_set),scale=np.std(val_set))
log_ll_sum=log_ll_sum+sum(log_val[log_val!=-np.inf])
return log_ll_sum
def state_retrieval(obs,max_num_cluster=6,off_set=0,est_method='kmean',PARALLEL = False,PRINTSHOW=False):
if PRINTSHOW==True:
print '========================================================================='
print 'Retrieving discrete states from data using ',est_method, ' model...'
print '========================================================================='
print 'try ',max_num_cluster, ' clusters..... '
score=np.zeros(max_num_cluster)
model_set=[]
if not PARALLEL:
for num_cluster in range(max_num_cluster):
#print 'Try ',num_cluster+1, ' clusters '
#print '-----------------------------------'
if est_method=='kmean':
kmean=KMeans(n_clusters=num_cluster+1).fit(obs)
model_set.append(kmean)
#import pdb;pdb.set_trace()
#score[num_cluster]=-1*np.log(-1*np.sum(kmean.score(obs)))
#score[num_cluster]=kmean.score(obs)
#score[num_cluster]=kmean.score(obs)-.5*(num_cluster+1)*1*log10(len(obs))
#log_ll_val=compute_log_ll(kmean.labels_,obs)
score[num_cluster]=compute_log_ll(kmean.labels_,obs)
elif est_method=='gmm':
gmm = mixture.GMM(n_components=num_cluster+1).fit(obs)
model_set.append(gmm)
score[num_cluster]=np.sum(gmm.score(obs))
else:
raise NameError('not supported est_method')
else:
if PRINTSHOW==True:
print 'Parallel enabled...'
model_set = [0] * max_num_cluster
score = [0] * max_num_cluster
p = Pool(max_num_cluster)
params = [(obs,i+1,est_method) for i in range(max_num_cluster)]
model_dict = dict(p.map(cluster_state_retrieval,params))
for k,v in model_dict.iteritems():
model_set[k] = v[0]
score[k] = v[1]
p.close()
p.join()
score_err_sum=np.zeros(max_num_cluster)
if PRINTSHOW==True:
print 'Finding knee points of log likelihood...'
for i in range(max_num_cluster):
a_0=score[:(i)]
if len(a_0)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_0)),a_0)
sqr_sum_err0=sum(((slope*np.arange(len(a_0))+ intercept)-a_0)**2)
else:
sqr_sum_err0=0
a_1=score[(i):]
if len(a_1)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_1)),a_1)
sqr_sum_err1=sum(((slope*np.arange(len(a_1))+ intercept)-a_1)**2)
else:
sqr_sum_err1=0
score_err_sum[i]=sqr_sum_err0+sqr_sum_err1
# Optimum number of clusters.
min_idx=np.argmin(score_err_sum)
opt_num_cluster=min_idx+1
if PRINTSHOW==True:
print 'opt_num_cluster: ' , opt_num_cluster
if est_method=='kmean':
label=model_set[min_idx].labels_
elif est_method=='gmm':
label=model_set[min_idx].predict(obs)
else:
raise NameError('not supported est_method')
return label,opt_num_cluster, model_set[min_idx],score,score_err_sum
########################################################################
# Function Irregualr event table retrieval
########################################################################
def mesurement_to_states(measurement_point_set,alpha=0.5,max_num_cluster=8,est_method='kmean',PARALLEL=False):
print '==============================================================================='
print 'Mapping measurement to states by ', est_method, ', Parallel Enabled: ',str(PARALLEL)
print '==============================================================================='
model_set=[]
label_set=[]
irr_event_set=[]
start_t=time.time()
for k,data_set in enumerate(measurement_point_set):
print 'working on ',k,'th measurement point... '
val_new=data_set[1]
val_set=list(set(val_new))
num_of_discrete_val=len(val_set)
t_new=data_set[0]
# average sampling interval
sr=(t_new[-1]-t_new[0]) /len(t_new)
# transformed observatoin data for state retrieval
if num_of_discrete_val<10:
print 'the number of discrete values are less than 10'
print 'no states retrieval needed '
cnt_num_occurances=[len(np.nonzero(val_new==state)[0]) for state in val_set]
#import pdb;pdb.set_trace()
label=val_new
label_set.append(np.vstack([t_new, label]))
min_label_idx=val_set[np.argmin(cnt_num_occurances)]
irregualr_event=np.zeros(label.shape)
irregualr_event[label==min_label_idx]=1
elif num_of_discrete_val<100:
print 'the number of discrete values are less than 100'
print 'use K-MEAN clustering by default '
obs=abs(np.diff(val_new))[:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=state_retrieval(obs,max_num_cluster,est_method='kmean',PARALLEL=PARALLEL,PRINTSHOW=False)
max_label_idx=np.argmax(model.cluster_centers_)
max_label=np.zeros(label.shape)
max_label[label==max_label_idx]=1
irregualr_event=np.r_[max_label[0],max_label]
else:
obs=abs(np.diff(val_new))[:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=state_retrieval(obs,max_num_cluster,est_method=est_method,PARALLEL=PARALLEL,PRINTSHOW=False)
#import pdb;pdb.set_trace()
if est_method=='kmean':
#label,opt_num_cluster,model,score,score_err_sum=state_retrieval_kmean(obs,max_num_cluster)
max_label_idx=np.argmax(model.cluster_centers_)
elif est_method=='gmm':
#label,opt_num_cluster,model,score,score_err_sum=state_retrieval(obs,max_num_cluster)
max_label_idx=np.argmax(model.means_)
else:
raise NameError('not supported est_method')
model_set.append(model)
label_set.append(np.vstack([t_new[1:], label]))
# Irregualr state mapping
max_label=np.zeros(label.shape)
max_label[label==max_label_idx]=1
irregualr_event=np.r_[max_label[0],max_label]
irregualr_event_inter_arr_times=np.diff(t_new[irregualr_event==1])
if (len(irregualr_event_inter_arr_times)>10) and (num_of_discrete_val>10):
loc_x, scale_x =stats.expon.fit(irregualr_event_inter_arr_times)
inter_arr_times_alpha=stats.expon.ppf(alpha,loc=loc_x,scale=scale_x)
window_size=int(inter_arr_times_alpha/sr)
rw_irregualr_event=rolling_window_label_binary(irregualr_event,window_size)[:,0]
irr_event_set.append(np.vstack([t_new, rw_irregualr_event]))
else:
irr_event_set.append(np.vstack([t_new, irregualr_event]))
end_proc_t=time.time()
print 'the time of processing mesurement_to_states ', end_proc_t-start_t, ' sec'
return irr_event_set
#########################################################################
#########################################################################
# Binary Table Extraction
#########################################################################
def get_common_time_reference(ts_list):
list_len = len(ts_list)
start_ts_list = np.array([ts_list[i][0] for i in range(list_len)])
end_ts_list = np.array([ts_list[i][-1] for i in range(list_len)])
common_start_ts = np.max(start_ts_list)
common_end_ts = np.min(end_ts_list)
common_ts = []
for i in range(list_len):
#common_ts = common_ts + ts_list[i]
common_ts = np.hstack([common_ts,ts_list[i]])
# remove duplicated ts
common_ts = np.asarray(sorted(list(set(common_ts))))
common_ts = np.delete(common_ts,np.nonzero(common_ts < common_start_ts)[0])
common_ts = np.delete(common_ts,np.nonzero(common_ts > common_end_ts)[0])
return common_ts
def interpolate_state_nearest(available_ts,available_values, intplt_ts):
f = interp1d(available_ts,available_values,kind='nearest')
## Interpolation
print 'len of intplt points: ' + str(len(intplt_ts))
intplt_values = f(intplt_ts)
return intplt_values
def find_consecutive_dup_rows(mat):
nrows = len(mat)
dup_idx_list = []
for r_idx in range(nrows-1,0,-1):
if all(mat[r_idx] == mat[r_idx-1]):
dup_idx_list.append(r_idx)
return dup_idx_list
def binary_table_extract(irr_event_set, binary_state_cut_off=-1, rm_dup=False):
#print 'this extract binary state based on state transition of composit binary states (default) or reference time using interploation '
#print 'return N-by-P matrix where N is the number of transitions and P is the number of sensors'
num_of_sensors=len(irr_event_set)
#num_of_transition=1 # to be updated
state_table = []
"""
Steps to find state transition
1. Find the common time frame of all sensors
start = max{ts[0]} for all ts: list of time of each sensor
end = min{ts[-1]} for all ts: list of time of each sensor
2. Find all ts that at least one sensor data available [start,end]
TS = Union of all ts, within [start_end]
3. Interpolate sensor state for each sensor during [start,end]
Before interpolation, convert states into binary (optional)
4. Remove duplicated state transitions (optional)
"""
### Step 1+2: Get common time reference
ts_list = []
for i in range(num_of_sensors):
ts_list.append(irr_event_set[i][0])
print ts_list
common_ts = get_common_time_reference(ts_list)
### interpolate state for each sensor, during common_ts
for i in range(num_of_sensors):
# convert state probability to binary state
if (binary_state_cut_off >= 0):
positive_prob_idx=np.nonzero(irr_event_set[i][1] > binary_state_cut_off)[0]
irr_event_set[i][1][:]=0
irr_event_set[i][1][positive_prob_idx]=1
intplt_states = interpolate_state_nearest(irr_event_set[i][0],irr_event_set[i][1],common_ts)
state_table.append(intplt_states)
state_table = np.asarray(state_table).T
# column: sensor, row: state sample
### Remove duplicated state transitions
if rm_dup==True:
dup_idx_list = find_consecutive_dup_rows(state_table)
state_table = np.delete(state_table,dup_idx_list,axis=0)
common_ts = np.delete(common_ts,dup_idx_list)
return common_ts,state_table
###################################################################################
# Probability Computation Functions
###################################################################################
# Example Codes
###################################################################################
# data_mat_set=np.array([[1,1,0],[1,1,0],[0,1,0],[1,1,1],[0,1,0],[1,0,0],[0,0,0],[0,1,0]])
# data_mat_set2=np.array([[1,11,100],[1,11,100],[0,11,100],[1,11,101],[0,11,100],[1,10,100],[0,10,100],[0,11,100]])
#data_mat_set=np.array([[1,1,0],[1,1,0],[0,1,0],[1,1,1],[0,1,0],[1,0,0],[0,0,0],[0,1,0]])
#compute_joint_prob(data_mat_set,[0,1],[[0,1],[0]])
#compute_cond_prob(data_mat_set,[0],[[1]],[1],[[1]])
#state_tmp,prob_tmp=compute_effect_prob(data_mat_set,[0],[1],[[1]])
#state_tmp,likelihood_tmp=compute_cause_likelihood(data_mat_set,[0],[1],[[1]])
def compute_joint_prob(data_mat,state_idx_set,state_val_set):
num_samples=data_mat.shape[0]
num_states=data_mat.shape[1]
if len(state_idx_set)!=len(state_val_set):
raise NameError('the length of state_set and state_val must be same')
joint_idx=set(range(num_samples))
for k,state_idx in enumerate(state_idx_set):
samples=data_mat[:,state_idx]
sub_joint_idx=set([])
for state_val in state_val_set[k]:
sub_joint_idx=sub_joint_idx| set(np.nonzero(samples==state_val)[0])
joint_idx=joint_idx & sub_joint_idx
joint_prob=len(joint_idx)/num_samples
if num_samples==0:
return 0
else:
return joint_prob
#def compute_cond_prob(data_mat,state_idx_set,state_val_set,cond_idx_set):
def compute_cond_prob(data_mat,state_idx_set,state_val_set,cond_idx_set,cond_val_set):
joint_state_idx_set=state_idx_set+cond_idx_set
joint_state_val_set=state_val_set+cond_val_set
all_prob=compute_joint_prob(data_mat,joint_state_idx_set,joint_state_val_set)
partial_prob=compute_joint_prob(data_mat,cond_idx_set,cond_val_set)
if partial_prob==0:
return 0
else:
return all_prob/partial_prob
def compute_effect_prob(data_mat,effect_idx_set,cause_idx_set,cause_val_set):
# find f_B*(A)=P(A|B=B*)
# generate a set of all possible states
state_set=[]
for k,idx in enumerate(effect_idx_set):
#print idx, ':', list(set(data_mat[:,idx]))
#set(list(data_mat[idx,:]))
if k==0:
state_set=list(set(data_mat[:,idx]))
else:
state_set=pair_in_idx(state_set,list(set(data_mat[:,idx])))
prob_set=[]
for state_val in state_set:
#import pdb;pdb.set_trace()
if isinstance(state_val,list):
input_val_set=[[val] for val in state_val]
else:
input_val_set=[[state_val]]
prob_temp=compute_cond_prob(data_mat,effect_idx_set,input_val_set,cause_idx_set,cause_val_set)
prob_set.append(prob_temp)
return state_set,prob_set
def compute_cause_likelihood(data_mat,cause_idx_set,effect_idx_set,effect_val_set):
# find f_A*(B)=P(A=A*|B)
# generate a set of all possible states
state_set=[]
for k,idx in enumerate(cause_idx_set):
#print idx, ':', list(set(data_mat[:,idx]))
#set(list(data_mat[idx,:]))
#import pdb;pdb.set_trace()
if k==0:
state_set=list(set(data_mat[:,idx]))
else:
state_set=pair_in_idx(state_set,list(set(data_mat[:,idx])))
likelihood_set=[]
for state_val in state_set:
#import pdb;pdb.set_trace()
if isinstance(state_val,list):
input_val_set=[[val] for val in state_val]
else:
input_val_set=[[state_val]]
prob_temp=compute_cond_prob(data_mat,effect_idx_set,effect_val_set,cause_idx_set,input_val_set)
likelihood_set.append(prob_temp)
return state_set,likelihood_set
def irr_state_mapping(state_mat,weight_coeff=10):
peak_prob=np.array([compute_joint_prob(state_mat,[k],[[PEAK]]) for k in range(state_mat.shape[1])])
low_prob=np.array([compute_joint_prob(state_mat,[k],[[LOW_PEAK]]) for k in range(state_mat.shape[1])])
no_prob=np.array([compute_joint_prob(state_mat,[k],[[NO_PEAK]]) for k in range(state_mat.shape[1])])
irr_state_prob=np.zeros(state_mat.shape[1])
irr_state_mat=np.zeros(state_mat.shape)
skewness_metric_sort=np.zeros(peak_prob.shape)
idx_state_map=[PEAK,NO_PEAK,LOW_PEAK]
for k,prob_set in enumerate(np.vstack([peak_prob,no_prob,low_prob]).T):
# Processing probaiblity data for each sensor
prob_sort_idx=np.argsort(prob_set)
prob_sort=prob_set[prob_sort_idx]
#import pdb;pdb.set_trace()
# if k==16:
# import pdb;pdb.set_trace()
if weight_coeff*(prob_sort[0]+prob_sort[1]) <prob_sort[2]:
irr_prob=prob_sort[0]+prob_sort[1]
reg_prob=prob_sort[2]
irr_state_mat[(state_mat[:,k]==idx_state_map[prob_sort_idx[0]]) | (state_mat[:,k]==idx_state_map[prob_sort_idx[1]]),k]=1
else:
irr_prob=prob_sort[0]
reg_prob=prob_sort[1]+prob_sort[2]
irr_state_mat[state_mat[:,k]==idx_state_map[prob_sort_idx[0]],k]=1
temp=abs(irr_prob-reg_prob)/np.sqrt(reg_prob*irr_prob)
if temp<np.inf:
skewness_metric_sort[k]=temp
irr_state_prob[k]=irr_prob
desc_sort_idx=np.argsort(-1*skewness_metric_sort)
return irr_state_mat
#return irr_state_mat,irr_state_prob,skewness_metric_sort[desc_sort_idx],desc_sort_idx
###################################################################################
# Probability Analysis Functions
###################################################################################
def time_effect_analysis(data_mat,data_name,avgtime_names,s_name,DO_PLOT=False):
s_idx=data_name.index(s_name)
t_idx=[[data_name.index(ntemp)] for ntemp in avgtime_names] #['MTH', 'WD', 'HR']
m_list=list(set(data_mat[:,data_name.index('MTH')]))
state_list=list(set(data_mat[:,s_idx]))
s_prob_log=[[]]*len(yearMonths)
print 'Monthy analysis...'
for m_idx in yearMonths:
print monthDict[m_idx]
if m_idx not in m_list:
print 'no data for this month'
print '-----------------------------'
continue
prob_map=np.zeros([len(state_list),len(Week),len(DayHours)])
#for h_idx in DayHours:
start_t=time.time()
for dh_pair in pair_in_idx(Week,DayHours):
#state_tmp,prob_tmp=compute_effect_prob(data_mat,[s_idx],t_idx,[[m_idx],Weekday,[h_idx]])
state_tmp,prob_tmp=compute_effect_prob(data_mat,[s_idx],t_idx,[[m_idx],[dh_pair[0]],[dh_pair[1]]])
for state in state_list:
prob_map[state_list.index(state) ,dh_pair[0],dh_pair[1]]=prob_tmp[state_tmp.index(state)]
end_t=time.time()
print 'spend ' ,end_t-start_t,'secs'
s_prob_log[m_idx]=prob_map
#m_prob_log
print '-----------------------------'
#s_m_data_valid=[ False if sum(prob)==0 else True for prob in s_prob_log]
valid_mon_list=[month_val for month_val in yearMonths if len(s_prob_log[month_val])>0]
if DO_PLOT==True:
plot_time_effect(s_name,state_list,valid_mon_list,s_prob_log)
valid_mon_pair=pair_in_idx(valid_mon_list)
time_effect_mat_dist=np.zeros([len(state_list),len(valid_mon_pair)])
for i,state_idx in enumerate(range(len(state_list))):
for j,mon_idx_pair in enumerate(valid_mon_pair):
val_temp=norm(np.array(s_prob_log[mon_idx_pair[0]][state_idx])-np.array(s_prob_log[mon_idx_pair[1]][state_idx]))
time_effect_mat_dist[i,j]=val_temp
score_in_structure=[]
for k,mon_idx in enumerate(valid_mon_list):
score_val=[]
for state_idx,state_val in enumerate(state_list):
mat_input=np.array(s_prob_log[mon_idx][state_idx])
dst_col=find_norm_dist_matrix(mat_input)
dst_row=find_norm_dist_matrix(mat_input.T)
score_val.append(dst_col.mean()+dst_row.mean())
score_in_structure.append(np.sum(score_val))
return state_list,s_prob_log,time_effect_mat_dist,score_in_structure,valid_mon_list,state_list
def plot_time_effect(s_name,state_list,valid_mon_list,s_prob_log):
plt.figure(s_name)
for i,state_val in enumerate(state_list):
for j, mon_val in enumerate(valid_mon_list):
plt_idx=len(valid_mon_list)*i+j+1
plt.subplot(len(state_list),len(valid_mon_list),plt_idx)
im = plt.imshow(s_prob_log[mon_val][state_list.index(state_val)],interpolation='none',vmin=0, vmax=1,aspect='auto')
if set(stateDict.keys())==set(state_list):
plt.title(monthDict[mon_val]+' , state: '+ stateDict[state_val])
else:
plt.title(monthDict[mon_val]+' , state: '+ str(state_val))
plt.yticks(weekDict.keys(),weekDict.values())
plt.colorbar()
#plt.xlabel('Hours of day')
if i == len(state_list) - 1:
plt.xlabel('Hours of day')
#plt.subplots_adjust(right=0.95)
#cbar_ax = plt.add_axes([0.95, 0.15, 0.05, 0.7])
#cax,kw = mpl.colorbar.make_axes([ax for ax in pl t.axes().flat])
#plt.colorbar(im, cax=cax, **kw)
#plt.colorbar(im,cbar_ax)
def time_effect_analysis_all(data_mat,data_name,avgtime_names,avgsensor_names):
monthly_structure_score=[]
monthly_variability=[]
for s_name in avgsensor_names:
print s_name
print '==============================='
state_list,s_prob_log,time_effect_mat_dist,score_in_structure,valid_mon_list,state_list\
=time_effect_analysis(data_mat,data_name,avgtime_names,s_name,DO_PLOT=False)
monthly_variability.append(time_effect_mat_dist.mean())
monthly_structure_score.append(score_in_structure)
return np.array(monthly_variability),np.array(monthly_structure_score)
###############################################################################################
# Analysis- Sensitivity of state distribution for parameters\
# Use Bhattacharyya distance to compute the distance between two probabilities
#D_b(p,q)= - ln (BC(p,q)) where BC(p,q)=\sum_x \sqrt{p(x)q(x)}
# Due to triagnular propety we use Hellinger distance , D_h(p,q)=\sqrt{1-BC(p,q)}
###############################################################################################
def param_sensitivity(data_mat, data_name,avgsensor_names,wfactor,dst_type):
wfactor_prob_map=[]
wfactor_state_map=[]
wfactor_sensitivity=[]
wfactor_idx=data_name.index(wfactor)
wfactor_list=list(set(data_mat[:,wfactor_idx]))
for k,s_name in enumerate(avgsensor_names):
s_idx=data_name.index(s_name)
state_list=list(set(data_mat[:,s_idx]))
prob_map=np.zeros([len(state_list),len(wfactor_list)])
state_map=np.zeros([len(state_list),len(wfactor_list)])
for i,wfactor_state in enumerate(wfactor_list):
state_tmp,prob_tmp=compute_effect_prob(data_mat,[s_idx],[[wfactor_idx]],[[wfactor_state]])
state_map[:,i]=state_tmp
prob_map[:,i]=prob_tmp
wfactor_prob_map.append(np.round(prob_map,2))
wfactor_state_map.append(state_map)
D_=[]
for probset in pair_in_idx(prob_map.T,prob_map.T):
BC=min(1,sum(np.sqrt(probset[0]*probset[1])))
if dst_type=='b':
D_.append(-1*np.log(BC))
elif dst_type=='h':
D_.append(np.sqrt(1-BC))
elif dst_type=='v':
D_.append(0.5*min(1,sum(abs(probset[0]-probset[1]))))
else:
print 'error'; return
#import pdb;pdb.set_trace()
#BC=np.min(1,sum(np.sqrt(probset[0]*probset[1])))
#if dst_type=='b':
# D_=[-1*np.log(np.min(1,sum(np.sqrt(probset[0]*probset[1])))) for probset in pair_in_idx(prob_map.T,prob_map.T)]
#elif dst_type=='h':
# D_=[np.sqrt(1-np.min(1,sum(np.sqrt(probset[0]*probset[1])))) for probset in pair_in_idx(prob_map.T,prob_map.T)]
#else:
# print 'error'; return
wfactor_sensitivity.append(np.mean(D_))
return wfactor_prob_map,wfactor_state_map, wfactor_sensitivity,wfactor_list
###############################################################################################
def plot_weather_sensitivity(wf_type,wf_prob_map,wf_state_map,wf_sensitivity,wf_list,\
avgsensor_names,Conditions_dict,Events_dict,sort_opt='desc',num_of_picks=9):
# Plotting bar graph
if sort_opt=='desc':
argsort_idx=np.argsort(wf_sensitivity)[::-1]
elif sort_opt=='asc':
argsort_idx=np.argsort(wf_sensitivity)
else:
print 'error in type'
return
wf_sort_idx=np.argsort(wf_list)
width = 0.5 # the width of the bars
color_list=['b','g','r','c','m','y','k','w']
num_col=floor(np.sqrt(num_of_picks))
num_row=ceil(num_of_picks/num_col)
for i in range(num_of_picks):
subplot(num_col,num_row,i+1)
bar_idx=argsort_idx[i]
prob_bar_val=wf_prob_map[bar_idx]
prob_bar_name=avgsensor_names[bar_idx]
prob_bar_wf_state=[str(wf) for wf in np.array(wf_list)[wf_sort_idx]]
prob_bar_sensor_state=wf_state_map[bar_idx]
N =prob_bar_sensor_state.shape[0]
M =prob_bar_sensor_state.shape[1]
ind = np.arange(N) # the x locations for the groups
state_ticks=[]
state_label=[]
for k,(val,state) in enumerate(zip(prob_bar_val[:,wf_sort_idx].T,prob_bar_sensor_state[:,wf_sort_idx].T)):
x=ind+k*5
x_sort_idx=np.argsort(state)
bar(x, val[x_sort_idx], width, color=color_list[k%len(color_list)])
state_ticks=state_ticks+list(x)
state_label=state_label+list(state[x_sort_idx].astype(int))
#category_ticks=category_ticks+[int(mean(x))]
if wf_type=='T':
start_str='TP';end_str='C'
statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='D':
start_str='DP';end_str='C'
statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='H':
start_str='HD';end_str='%'
statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='E':
start_str='EV';end_str=''
statek=\
Events_dict.keys()[Events_dict.values().index(int(prob_bar_wf_state[k]))];fontsize_val=6
if statek=='': statek='none'
#statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='C':
start_str='CD';end_str=''
statek=prob_bar_wf_state[k];fontsize_val=10
#statek=\
#Conditions_dict.keys()[Conditions_dict.values().index(int(prob_bar_wf_state[k]))];fontsize_val=6
if statek=='': statek='none'
init_str=''
else:
print 'no such type'
return
if k==0:
category_str= init_str
else:
category_str=statek+ end_str
plt.text(int(mean(x)),1.1,category_str,fontsize=fontsize_val)
plt.xticks(state_ticks,state_label )
plt.xlabel('State',fontsize=10)
plt.ylabel('Probability',fontsize=10)
ylim([0,1.3]); title(prob_bar_name,fontsize=10)
def wt_sensitivity_analysis(data_state_mat,data_time_mat,data_weather_mat,sensor_names,time_names,\
Conditions_dict,Events_dict,bldg_tag,trf_tag,weather_names,dict_dir,dst_t='h'):
import pprint
import radar_chart
data_mat = np.hstack([data_state_mat,data_time_mat])
data_name = sensor_names+time_names
print 'Parameter sensitivty for Months....'
mth_prob_map,mth_state_map, mth_sensitivity,mth_list\
= param_sensitivity(data_mat,data_name,sensor_names,'MTH',dst_type=dst_t)
print 'Parameter sensitivty for Days....'
wday_prob_map,wday_state_map,wday_sensitivity,wday_list\
= param_sensitivity(data_mat,data_name,sensor_names,'WD',dst_type=dst_t)
print 'Parameter sensitivty for Hours....'
dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list\
= param_sensitivity(data_mat,data_name,sensor_names,'HR',dst_type=dst_t)
#Month Sensitivty bar Plot.
tf_tuple_mth=('MTH',mth_prob_map,mth_state_map,mth_sensitivity,mth_list)
tf_tuple_wday=('WD',wday_prob_map,wday_state_map,wday_sensitivity,wday_list)
tf_tuple_dhr=('HR',dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list)
tf_sstv_tuple=np.array([tf_tuple_mth[3],tf_tuple_wday[3],tf_tuple_dhr[3]])
max_tf_sstv=tf_sstv_tuple[tf_sstv_tuple<np.inf].max()*2
tf_sstv_tuple[tf_sstv_tuple==np.inf]=max_tf_sstv
tf_sstv_total=np.sum(tf_sstv_tuple,0)
arg_idx_s=np.argsort(tf_sstv_total)[::-1]
arg_idx_is=np.argsort(tf_sstv_total)
num_of_picks=9
print 'Most time sensitive sensors'
print '---------------------------------------------'
Time_Sensitive_Sensors=list(np.array(sensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(Time_Sensitive_Sensors)
####################################################################
## Rador Plotting for Hour_Sensitive_Sensors
####################################################################
sensor_no = len(sensor_names)
# convert 'inf' to 1
sen_mth = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_mth[3]]
sen_wday = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_wday[3]]
sen_dhr = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_dhr[3]]
SEN = [[sen_mth[i], sen_wday[i], sen_dhr[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-num_of_picks:] # Best 9 sensors
spoke_labels = ["Month", "Day", "Hour"]
data = [SEN[i] for i in idx]
sensor_labels = [sensor_names[i] for i in idx]
radar_chart.subplot(data, spoke_labels, sensor_labels, saveto=dict_dir+bldg_tag+trf_tag+'time_radar.png')
######################################################################
#1. effect prob - weather dependecy analysis
######################################################################
data_mat = np.hstack([data_state_mat,data_weather_mat])
# Temporary for correcting month change
#data_mat[:,-3]=data_mat[:,-3]-1
data_name = sensor_names+weather_names
# State classification of weather data
temp_idx=data_name.index('TemperatureC')
dewp_idx=data_name.index('Dew PointC')
humd_idx=data_name.index('Humidity')
evnt_idx=data_name.index('Events')
cond_idx=data_name.index('Conditions')
######################################################################
# Weather state classification
######################################################################
weather_dict={}
for class_idx in [temp_idx,dewp_idx,humd_idx]:
obs=data_mat[:,class_idx][:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=\
state_retrieval(obs,max_num_cluster=10,est_method='kmean',PARALLEL=IS_USING_PARALLEL_OPT,PRINTSHOW=True)
if class_idx==temp_idx:
weather_dict.update({'Temp':model.cluster_centers_})
elif class_idx==dewp_idx:
weather_dict.update({'Dewp':model.cluster_centers_})
elif class_idx==humd_idx:
weather_dict.update({'Humd':model.cluster_centers_})
else:
print 'not found'
for label_id in range(label.max()+1):
label_idx=np.nonzero(label==label_id)[0]
data_mat[label_idx,class_idx]=np.round(model.cluster_centers_[label_id][0])
##################################################
# Reclassify the Condition states into clarity of the sky
##################################################
#Conditions_dict=data_dict['Conditions_dict'].copy()
#data_mat = np.hstack([avgdata_state_mat,avgdata_weather_mat])
cond_state=[[]]*6
cond_state[5]=['Clear'] # Clear
cond_state[4]=['Partly Cloudy','Scattered Clouds'] # 'Partly Cloudy'
cond_state[3]=['Mostly Cloudy','Overcast'] # 'Overcast'
cond_state[2]=['Light Drizzle','Mist', 'Shallow Fog', 'Patches of Fog',\
'Light Snow', 'Light Freezing Rain', 'Light Rain Showers','Light Freezing Fog','Light Snow Showers', 'Light Rain'] # Light Rain
cond_state[1]=['Rain','Rain Showers','Thunderstorms and Rain'\
,'Heavy Rain','Heavy Rain Showers','Drizzle', 'Heavy Drizzle', 'Fog'] # Heavy Rain
cond_state[0]=['Unknown']
cond_data_array=data_mat[:,cond_idx].copy()
for k in range(len(cond_state)):
for cond_str in cond_state[k]:
cond_val_old=Conditions_dict[cond_str]
idx_temp=np.nonzero(cond_data_array==cond_val_old)[0]
if len(idx_temp)>0:
data_mat[idx_temp,cond_idx]=k
#plt.plot(data_mat[:,cond_idx],'.')
Conditions_dict_temp={}
Conditions_dict_temp.update({'Clear':5})
Conditions_dict_temp.update({'Partly Cloudy':4})
Conditions_dict_temp.update({'Overcast':3})
Conditions_dict_temp.update({'Light Rain':2})
Conditions_dict_temp.update({'Heavy Rain':1})
Conditions_dict_temp.update({'Unknown':0})
# Abbr' of weather factor type is
weather_dict.update({'Cond':Conditions_dict_temp})
####################################################################
# Reclassify the Event states into rain/snow/fog weather conditons
####################################################################
event_state=[[]]*4
event_state[0]=[''] # No event
event_state[1]=['Rain-Snow','Snow'] # Snow
event_state[2]=['Rain','Thunderstorm','Rain-Thunderstorm'] # Rain
event_state[3]=['Fog','Fog-Rain'] # Fog
event_data_array=data_mat[:,evnt_idx].copy()
for k in range(len(event_state)):
for event_str in event_state[k]:
event_val_old=Events_dict[event_str]
idx_temp=np.nonzero(event_data_array==event_val_old)[0]
if len(idx_temp)>0:
data_mat[idx_temp,evnt_idx]=k
Events_dict_temp={}
Events_dict_temp.update({'NoEvent':0})
Events_dict_temp.update({'Snow':1})
Events_dict_temp.update({'Rain':2})
Events_dict_temp.update({'Fog':3})
weather_dict.update({'Event':Events_dict_temp})
# T,D,H,E,C
print 'Parameter sensitivty for TemperatureC....'
tempr_prob_map,tempr_state_map, tempr_sensitivity,tempr_list\
= param_sensitivity(data_mat,data_name,sensor_names,'TemperatureC',dst_type=dst_t)
print 'Parameter sensitivty for Dew PointC....'
dewp_prob_map,dewp_state_map, dewp_sensitivity, dewp_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Dew PointC',dst_type=dst_t)
print 'Parameter sensitivty for Humidity....'
humd_prob_map,humd_state_map, humd_sensitivity,humd_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Humidity',dst_type=dst_t)
print 'Parameter sensitivty for Events....'
event_prob_map,event_state_map,event_sensitivity, event_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Events',dst_type=dst_t)
print 'Parameter sensitivty for Conditions....'
cond_prob_map,cond_state_map,cond_sensitivity,cond_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Conditions',dst_type=dst_t)
wf_tuple_t=('T',tempr_prob_map,tempr_state_map,tempr_sensitivity,tempr_list)
wf_tuple_d=('D',dewp_prob_map,dewp_state_map,dewp_sensitivity,dewp_list)
wf_tuple_h=('H',humd_prob_map,humd_state_map,humd_sensitivity,humd_list)
wf_tuple_e=('E',event_prob_map,event_state_map,event_sensitivity,event_list)
wf_tuple_c=('C',cond_prob_map,cond_state_map,cond_sensitivity,cond_list)
wf_sstv_tuple=np.array([wf_tuple_t[3],wf_tuple_d[3],wf_tuple_h[3],wf_tuple_e[3],wf_tuple_c[3]])
max_wf_sstv=wf_sstv_tuple[wf_sstv_tuple<np.inf].max()*2
wf_sstv_tuple[wf_sstv_tuple==np.inf]=max_wf_sstv
wf_sstv_total=np.sum(wf_sstv_tuple,0)
arg_idx_s=np.argsort(wf_sstv_total)[::-1]
print 'Most weather sensitive sensors'
print '---------------------------------------------'
Weather_Sensitive_Sensors=list(np.array(sensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(Weather_Sensitive_Sensors)
####################################################################
## Radar Plotting for Weather_Sensitive_Sensors
####################################################################
sensor_no = len(sensor_names)
# convert 'inf' to 1
sen_t = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_t[3]]
sen_d = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_d[3]]
sen_h = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_h[3]]
sen_e = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_e[3]]
sen_c = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_c[3]]
SEN = [[sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-num_of_picks:] # Best 6 sensors
spoke_labels = ["Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
data = [SEN[i] for i in idx]
sensor_labels = [sensor_names[i] for i in idx]
import radar_chart
radar_chart.subplot(data, spoke_labels, sensor_labels, saveto=dict_dir+bldg_tag+trf_tag+'weather_radar.png')
#radar_chart.plot(data, spoke_labels, sensor_labels, saveto="weather_radar.png")
####################################################################
## Bar Plotting for Weather and time sensitive_Sensors
####################################################################
import bar_chart
# Load from binaries
#sen_mth sen_wday sen_dhr sen_t sen_d sen_h sen_e sen_c
SEN = [[sen_mth[i],sen_wday[i],sen_dhr[i],sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-15:] # Best 15 sensors
#data = [[TOTAL_SEN[i] for i in idx]] * 8
data = [[np.array(SEN)[i,k] for i in idx] for k in range(8)]
labels = [[sensor_names[i] for i in idx]] * 8
titles = ["Month", "Day", "Hour", "Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
colors = ["b" if i < 3 else "g" for i in range(8)]
bar_chart.plot(data, labels, titles, colors, grid=True, savefig=dict_dir+bldg_tag+trf_tag+'bar.png', savereport=dict_dir+bldg_tag+trf_tag+'all_bar.csv')
####################################################################
## Rador Plotting for Time Weather_Sensitive_Sensors
####################################################################
wtf_sstv_total=wf_sstv_total+tf_sstv_total
arg_idx_s=np.argsort(wtf_sstv_total)[::-1]
#arg_idx_is=np.argsort(wtf_sstv_total)
num_of_picks=9
print 'Most time-weather sensitive sensors'
print '---------------------------------------------'
WT_Sensitive_Sensors=list(np.array(sensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(WT_Sensitive_Sensors)
sensor_no = len(sensor_names)
# convert 'inf' to 1
SEN = [[sen_mth[i], sen_wday[i], sen_dhr[i],sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-num_of_picks:] # Best 9 sensors
spoke_labels = ["Month", "Day", "Hour","Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
data = [SEN[i] for i in idx]
sensor_labels = [sensor_names[i] for i in idx]
radar_chart.subplot(data, spoke_labels, sensor_labels, saveto=dict_dir+bldg_tag+trf_tag+'time_weather_radar.png')
fig=plt.figure()
idx = np.argsort(TOTAL_SEN)[-(min(len(TOTAL_SEN),50)):] # Best 50 sensors
twf_sstv_tuple = np.array([SEN[i] for i in idx]).T
sensor_labels = [sensor_names[i] for i in idx]
#twf_sstv_tuple=np.vstack([tf_sstv_tuple,wf_sstv_tuple])
vmax_=twf_sstv_tuple.max()
vmin_=twf_sstv_tuple.min()
im=plt.imshow(twf_sstv_tuple,interpolation='none',vmin=vmin_, vmax=vmax_,aspect='equal')
y_label=['MTH', 'WD', 'HR','TemperatureC','Dew PointC','Humidity','Events', 'Conditions']
y_ticks=range(len(y_label))
plt.yticks(y_ticks,y_label)
x_label=sensor_labels
x_ticks=range(len(sensor_labels))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", "15%", pad="30%")
plt.colorbar(im, cax=cax,orientation='horizontal')
plt.savefig(dict_dir+bldg_tag+trf_tag+'time_weather_hmap.png')
wtf_tuples={}
wtf_tuples.update({'month':tf_tuple_mth})
wtf_tuples.update({'day':tf_tuple_wday})
wtf_tuples.update({'hour':tf_tuple_dhr})
wtf_tuples.update({'month':tf_tuple_mth})
wtf_tuples.update({'t':wf_tuple_t})
wtf_tuples.update({'d':wf_tuple_d})
wtf_tuples.update({'h':wf_tuple_h})
wtf_tuples.update({'e':wf_tuple_e})
wtf_tuples.update({'c':wf_tuple_c})
return wtf_tuples,weather_dict
def check_cond_state(all_cond_name,cond_state):
no_assn_key=[]
for key in all_cond_name:
print '------------------'
print key
num_cnt=0
for k in range(len(cond_state)):
if key in cond_state[k]:
num_cnt=num_cnt+1
print num_cnt
if num_cnt==0:
no_assn_key.append(key)
print '------------------'
print 'unassigned cond key ' ,no_assn_key
return no_assn_key
#all_cond_name=list(set(GW1_.Conditions_dict.keys()+GW2_.Conditions_dict.keys()\
#+VAK2_.Conditions_dict.keys()+VAK1_.Conditions_dict.keys()))
def check_event_state(all_event_name,event_state):
no_assn_key=[]
for key in all_event_name:
print '------------------'
print key
num_cnt=0
for k in range(len(event_state)):
if key in event_state[k]:
num_cnt=num_cnt+1
print num_cnt
if num_cnt==0:
no_assn_key.append(key)
print '------------------'
print 'unassigned event key ' ,no_assn_key
return no_assn_key
#all_event_name=list(set(GW1_.Events_dict.keys()+GW2_.Events_dict.keys()\
#+VAK2_.Events_dict.keys()+VAK1_.Events_dict.keys()))
def weather_convert(wdata_mat,wdata_name, Conditions_dict,Events_dict):
##########################################
# New dictionary by state classification of weather data
##########################################
weather_dict={}
##########################################
# index of weather data point in previous data
##########################################
try:
temp_idx=wdata_name.index('TemperatureC')
except:
temp_idx=[]
try:
dewp_idx=wdata_name.index('Dew_PointC')
except:
dewp_idx=[]
try:
humd_idx=wdata_name.index('Humidity')
except:
humd_idx=[]
try:
evnt_idx=wdata_name.index('Events')
except:
evnt_idx=[]
try:
cond_idx=wdata_name.index('Conditions')
except:
cond_idx=[]
######################################################################
# Weather state classification
######################################################################
for class_idx in [temp_idx,dewp_idx,humd_idx]:
obs=wdata_mat[:,class_idx][:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=\
state_retrieval(obs,max_num_cluster=30,off_set=1,est_method='kmean',PARALLEL=IS_USING_PARALLEL_OPT,PRINTSHOW=False)
if class_idx==temp_idx:
print 'Temp state classification...'
weather_dict.update({'Temp':model.cluster_centers_})
elif class_idx==dewp_idx:
print 'Dewp state classification...'
weather_dict.update({'Dewp':model.cluster_centers_})
elif class_idx==humd_idx:
print 'Humd state classification...'
weather_dict.update({'Humd':model.cluster_centers_})
else:
print 'not found'
for label_id in range(label.max()+1):
label_idx=np.nonzero(label==label_id)[0]
wdata_mat[label_idx,class_idx]=np.round(model.cluster_centers_[label_id][0])
##################################################
# Reclassify the Condition states into clarity of the sky
##################################################
cond_state=[[]]*9
cond_state[8]=['Clear'] # Clear
cond_state[7]=['Partly Cloudy','Scattered Clouds'] # 'Partly Cloudy'
cond_state[6]=['Mostly Cloudy','Overcast'] # 'Overcast'
cond_state[5]=['Fog','Mist', 'Shallow Fog','Patches of Fog','Light Freezing Fog'] # Light Rain
cond_state[4]=['Drizzle', 'Heavy Drizzle','Light Drizzle','Light Freezing Drizzle']
cond_state[3]=['Rain','Rain Showers','Thunderstorms and Rain'\
,'Heavy Rain','Heavy Rain Showers', 'Freezing Rain','Light Freezing Rain', \
'Light Rain Showers','Light Rain','Light Thunderstorms and Rain'] # Heavy Rain
cond_state[2]=['Ice Pellets', 'Ice Crystals','Light Ice Crystals','Light Ice Pellets']
cond_state[1]=['Snow','Snow Showers','Light Snow','Light Snow Grains','Light Snow Showers'] # 'Snow'
cond_state[0]=['Unknown']
cond_data_array=wdata_mat[:,cond_idx].copy()
print 'Condition state classification...'
for k in range(len(cond_state)):
for cond_str in cond_state[k]:
if cond_str in Conditions_dict.keys():
cond_val_old=Conditions_dict[cond_str]
idx_temp=np.nonzero(cond_data_array==cond_val_old)[0]
if len(idx_temp)>0:
wdata_mat[idx_temp,cond_idx]=k
Conditions_dict_temp={}
Conditions_dict_temp.update({'Clear':8})
Conditions_dict_temp.update({'Cloudy':7})
Conditions_dict_temp.update({'Overcast':6})
Conditions_dict_temp.update({'Fog':5})
Conditions_dict_temp.update({'Drizzle':4})
Conditions_dict_temp.update({'Rain':3})
Conditions_dict_temp.update({'Ice':2})
Conditions_dict_temp.update({'Snow':1})
Conditions_dict_temp.update({'Unknown':0})
# Abbr' of weather factor type is
weather_dict.update({'Cond':Conditions_dict_temp})
####################################################################
# Reclassify the Event states into rain/snow/fog weather conditons
####################################################################
event_state=[[]]*4
event_state[0]=[''] # No event
event_state[1]=['Rain-Snow','Snow','Fog-Snow'] # Snow
event_state[2]=['Rain','Thunderstorm','Rain-Thunderstorm'] # Rain
event_state[3]=['Fog','Fog-Rain'] # Fog
print 'Event state classification...'
event_data_array=wdata_mat[:,evnt_idx].copy()
for k in range(len(event_state)):
for event_str in event_state[k]:
if event_str in Events_dict.keys():
event_val_old=Events_dict[event_str]
idx_temp=np.nonzero(event_data_array==event_val_old)[0]
if len(idx_temp)>0:
wdata_mat[idx_temp,evnt_idx]=k
Events_dict_temp={}
Events_dict_temp.update({'NoEvent':0})
Events_dict_temp.update({'Snow':1})
Events_dict_temp.update({'Rain':2})
Events_dict_temp.update({'Fog':3})
weather_dict.update({'Event':Events_dict_temp})
return wdata_mat,weather_dict
def bldg_obj_weather_convert(bldg_obj):
#import pdb;pdb.set_trace()
# For avg
if 'data_weather_mat' in bldg_obj.avg.__dict__.keys():
wdata_mat = bldg_obj.avg.data_weather_mat.copy()
wdata_name =bldg_obj.avg.weather_names
Conditions_dict= bldg_obj.Conditions_dict.copy()
Events_dict= bldg_obj.Events_dict.copy()
wdata_mat,weather_dict=weather_convert(wdata_mat,wdata_name, Conditions_dict,Events_dict)
bldg_obj.avg.weather_dict=weather_dict
bldg_obj.avg.data_weather_mat_=wdata_mat
# For diff
if 'data_weather_mat' in bldg_obj.diff.__dict__.keys():
wdata_mat = bldg_obj.diff.data_weather_mat.copy()
wdata_name =bldg_obj.diff.weather_names
Conditions_dict= bldg_obj.Conditions_dict.copy()
Events_dict= bldg_obj.Events_dict.copy()
wdata_mat,weather_dict=weather_convert(wdata_mat,wdata_name, Conditions_dict,Events_dict)
bldg_obj.diff.weather_dict=weather_dict
bldg_obj.diff.data_weather_mat_=wdata_mat
def find_cond_lh_set(data_state_mat,cause_idx_set,effect_idx,obs_state):
optprob_set=np.zeros(len(cause_idx_set))
optstate_set=np.zeros(len(cause_idx_set))
for i,cause_idx in enumerate(cause_idx_set):
# Compute liklihoood of GW2 avg data state map among sensors
avg_state_temp, avg_prob_temp\
=compute_cause_likelihood(data_state_mat,[cause_idx],[[effect_idx]],[[obs_state]])
# masking its own effect
if cause_idx==effect_idx:
# and its state
max_opt_state=np.nan
# and its probability
max_opt_prob=-np.inf
else:
# find sensor index giving the maximum likelihood
max_idx=np.argmax(avg_prob_temp)
# and its state
max_opt_state=avg_state_temp[max_idx]
# and its probability
max_opt_prob=avg_prob_temp[max_idx]
optprob_set[i]=max_opt_prob
optstate_set[i]=max_opt_state
return optstate_set, optprob_set
def create_bldg_obj(dict_dir,bldg_tag,pname_key):
print '==================================='
print 'create object for ', bldg_tag+'BLDG'
print '==================================='
cmd_=bldg_tag+'data_dict = mt.loadObjectBinaryFast(dict_dir+'+'\'data_dict.bin\')'
exec(cmd_)
sig_tag_set=[]
try:
cmd_=bldg_tag+'diffdata_dict = mt.loadObjectBinaryFast(dict_dir+'+'\'diffdata_dict.bin\')'
exec(cmd_)
sig_tag_set.append('diff')
except:
pass
try:
cmd_=bldg_tag+'avgdata_dict = mt.loadObjectBinaryFast(dict_dir+'+'\'avgdata_dict.bin\')'
exec(cmd_)
sig_tag_set.append('avg')
except:
pass
###########################################################################################
for sig_tag in sig_tag_set:
cmd_str=[[]]*9
cmd_str[0]=bldg_tag+sig_tag+'data_state_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_state_mat\']'
cmd_str[1]=bldg_tag+sig_tag+'data_weather_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_weather_mat\']'
cmd_str[2]=bldg_tag+sig_tag+'data_time_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_time_mat\']'
cmd_str[3]=bldg_tag+sig_tag+'_time_slot='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'_time_slot\']'
cmd_str[4]=bldg_tag+sig_tag+'data_exemplar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_exemplar\']'
cmd_str[5]=bldg_tag+sig_tag+'data_zvar=remove_dot('+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_zvar\'])'
cmd_str[6]=bldg_tag+sig_tag+'sensor_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'sensor_names\'])'
cmd_str[7]=bldg_tag+sig_tag+'weather_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'weather_names\'])'
cmd_str[8]=bldg_tag+sig_tag+'time_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'time_names\'])'
for cmd_ in cmd_str:
exec(cmd_)
if 'avg' in sig_tag:
print "--*--*--*--*--*--*--*--*-- create_bldg_obj::(" + sig_tag + ") data_weather_mat --*--*--*--*--*-"
exec("print " + bldg_tag+sig_tag+'data_weather_mat[:,4]')
print "--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*"
#TODO: Name correction for exemplar
if isinstance(pname_key,list)==True:
cmd_str_tmp=sig_tag+'p_names=pname_key'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag+sig_tag+'p_idx=['+bldg_tag+sig_tag+'sensor_names.index(name_) for name_ in pname_key]'
exec(cmd_str_tmp)
else:
cmd_str_tmp=bldg_tag+sig_tag+'p_idx=grep('+'\''+pname_key+'\''+','+bldg_tag+sig_tag+'sensor_names)'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag+sig_tag+'p_names=list(np.array('+bldg_tag+sig_tag+'sensor_names)['+bldg_tag+sig_tag+'p_idx])'
exec(cmd_str_tmp)
cmd_str_tmp=sig_tag+'p_names=list(np.array('+bldg_tag+sig_tag+'sensor_names)['+bldg_tag+sig_tag+'p_idx])'
exec(cmd_str_tmp)
print '--------------------------------------------------------'
print ' Power sensor selected -'+sig_tag
print '--------------------------------------------------------'
cmd_str_tmp='pprint.pprint('+sig_tag+'p_names)'
exec(cmd_str_tmp)
print '----------------------------------------'
print 'creating '+ bldg_tag+' obj....'
print '----------------------------------------'
cmd_str_=bldg_tag+'=obj({'+'\'avg\''+':obj({}),'+'\'diff\''+':obj({})})'
exec(cmd_str_)
for sig_tag in sig_tag_set:
print 'generating '+ sig_tag+' members....'
cmd_str=[[]]*12
#cmd_str[0]=bldg_tag+'.'+sig_tag+'=[]'
cmd_str[0]='[]'
cmd_str[1]=bldg_tag+'.'+sig_tag+'.data_state_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_state_mat\']'
cmd_str[2]=bldg_tag+'.'+sig_tag+'.data_weather_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_weather_mat\']'
cmd_str[3]=bldg_tag+'.'+sig_tag+'.data_time_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_time_mat\']'
cmd_str[4]=bldg_tag+'.'+sig_tag+'.time_slot='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'_time_slot\']'
cmd_str[5]=bldg_tag+'.'+sig_tag+'.data_exemplar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_exemplar\']'
cmd_str[6]=bldg_tag+'.'+sig_tag+'.data_zvar=remove_dot('+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_zvar\'])'
cmd_str[7]=bldg_tag+'.'+sig_tag+'.sensor_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'sensor_names\'])'
cmd_str[8]=bldg_tag+'.'+sig_tag+'.weather_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'weather_names\'])'
cmd_str[9]=bldg_tag+'.'+sig_tag+'.time_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'time_names\'])'
cmd_str[10]=bldg_tag+'.'+sig_tag+'.p_idx='+bldg_tag+sig_tag+'p_idx'
cmd_str[11]=bldg_tag+'.'+sig_tag+'.p_names=remove_dot('+bldg_tag+sig_tag+'p_names)'
for cmd_ in cmd_str: exec(cmd_)
#TODO: Name correction for exemplar
cmd_=bldg_tag+'.'+'Conditions_dict='+bldg_tag+'data_dict[\'Conditions_dict\']'
exec(cmd_)
cmd_=bldg_tag+'.'+'Events_dict='+bldg_tag+'data_dict[\'Events_dict\']'
exec(cmd_)
cmd_='bldg_obj_weather_convert('+bldg_tag+')'
exec(cmd_)
# Create classs strucutre for data analysis
analysis={}
for sig_tag in sig_tag_set:
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
temp1={}
for name_ in p_names:
temp_s=obj({'optprob_set':[],'optstate_set':[]})
temp_t=obj({'optprob_set':[],'optstate_set':[]})
temp_w=obj({'optprob_set':[],'optstate_set':[]})
temp2=obj({'peak_eff_state':[],'sensor':temp_s,'time':temp_t,'weather':temp_w})
temp1.update({remove_dot(name_):temp2})
analysis.update({sig_tag:obj(temp1)})
analysis=obj(analysis)
cmd_str_=bldg_tag+'.analysis=analysis'
exec(cmd_str_)
print '-------------------------'
print 'Compute LH values'
print '-------------------------'
for sig_tag in sig_tag_set:
print sig_tag+'.....'
cmd_str_='all_data_state_mat=np.vstack(('+bldg_tag+'.'+sig_tag+'.data_state_mat.T, '\
+bldg_tag+'.'+sig_tag+'.data_time_mat.T,'+bldg_tag+'.'+sig_tag+'.data_weather_mat_.T)).T'
exec(cmd_str_)
cmd_str_='p_idx='+bldg_tag+'.'+sig_tag+'.p_idx'
exec(cmd_str_)
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
cmd_str_='len_sensor='+bldg_tag+'.'+sig_tag+'.data_state_mat.shape[1]'
exec(cmd_str_)
cmd_str_='len_time='+bldg_tag+'.'+sig_tag+'.data_time_mat.shape[1]'
exec(cmd_str_)
cmd_str_='len_weather='+bldg_tag+'.'+sig_tag+'.data_weather_mat.shape[1]'
exec(cmd_str_)
cmd_str_='sensor_cause_idx_set=range(len_sensor)'
exec(cmd_str_)
cmd_str_='time_cause_idx_set=range(len_sensor,len_sensor+len_time)'
exec(cmd_str_)
cmd_str_='weather_cause_idx_set=range(len_sensor+len_time,len_sensor+len_time+len_weather)'
exec(cmd_str_)
for k,effect_idx in enumerate(p_idx):
print 'compute cond. prob of ' + p_names[k]
cmd_str_='p_name_='+bldg_tag+'.'+sig_tag+'.p_names[k]'
exec(cmd_str_)
# check weather it is in the set
effect_state_set=np.array(list(set(all_data_state_mat[:, effect_idx])))
eff_state=effect_state_set.max()
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.peak_eff_state=eff_state'
exec(cmd_str_)
s_optstate_set_temp,s_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,sensor_cause_idx_set,effect_idx,eff_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.sensor.optprob_set=s_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.sensor.optstate_set=s_optstate_set_temp'
exec(cmd_str_)
w_optstate_set_temp,w_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,weather_cause_idx_set,effect_idx,eff_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.weather.optprob_set=w_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.weather.optstate_set=w_optstate_set_temp'
exec(cmd_str_)
w_optstate_set_temp,w_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,time_cause_idx_set,effect_idx,eff_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.time.optprob_set=w_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.time.optstate_set=w_optstate_set_temp'
exec(cmd_str_)
cmd_str_='mt.saveObjectBinaryFast('+bldg_tag+','+'bldg_tag+\'.bin\')'
exec(cmd_str_)
cmd_str_='obj_out='+bldg_tag
exec(cmd_str_)
return obj_out
def plotting_bldg_lh(bldg_,bldg_key=[],attr_class='sensor',num_picks=30):
print 'plotting lh for '+attr_class
print '============================================'
sig_tag_set=['avg','diff']
plt.ioff()
if len(bldg_key)==0:
bldg_set=bldg_.__dict__.keys()
else :
bldg_set=[bldg_key]
for bldg_tag in bldg_set:
print bldg_tag
cmd_str_= bldg_tag+'=bldg_.__dict__[bldg_tag]'
exec(cmd_str_)
print '-------------------------'
print bldg_tag
print '-------------------------'
for sig_tag in sig_tag_set:
try:
print sig_tag+'.....'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'.'+attr_class+'_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optstate_set'
exec(cmd_str_)
num_picks=30
sort_idx=np.argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
plt.figure(figsize=(20.0,15.0))
plt.subplot(2,1,1)
plt.plot(sort_lh,'-*')
x_label= list(np.array(s_names)[sort_idx[:num_picks]])
cmd_str_='key_set=bldg_.__dict__.keys()'
exec(cmd_str_)
if 'convert_name' in key_set:
cmd_str_='x_label=bldg_.convert_name(x_label)'
exec(cmd_str_)
cmd_str_='pname_=bldg_.convert_name(pname_)[0]'
exec(cmd_str_)
x_ticks=range(len(x_label))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
if sig_tag=='avg':
plt.title('Most relavant '+ attr_class+ ' attributes to the peak (demand) of '+pname_,fontsize=20)
else:
plt.title('Most relavant '+ attr_class+ ' attributes to the peak variations of '+pname_,fontsize=20)
plt.tick_params(labelsize='large')
plt.ylim([-0.05, 1.05])
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+attr_class+'_'+sig_tag+'_lh_sensors.png', bbox_inches='tight')
plt.close()
except:
pass
plt.close()
plt.ion()
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&PPPPPPPPPPPPPP
def bldg_lh_sensitivity(bldg_,bldg_key=[],attr_class='sensor',sig_tag='avg'):
print 'compute std of lh for '+attr_class+'...'
if len(bldg_key)==0:
bldg_set=bldg_.__dict__.keys()
else :
bldg_set=[bldg_key]
bldg_lh_std_log={}
for bldg_tag in bldg_set:
try:
print bldg_tag
cmd_str_= bldg_tag+'=bldg_.__dict__[bldg_tag]'
exec(cmd_str_)
print '-------------------------'
print bldg_tag
print '-------------------------'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
lh_std_log={}
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'.'+attr_class+'_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optstate_set'
exec(cmd_str_)
lh_std=np.std(np.sort(optprob_set)[1:])
lh_std_log.update({bldg_.convert_name(pname_)[0]:lh_std})
bldg_lh_std_log.update({bldg_tag:lh_std_log})
except:
pass
return obj(bldg_lh_std_log)
###################################################&&&&&!!!!!
def bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag='avg',num_picks_bn=15,learning_alg='hc'):
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
p_idx=s_names.index(p_name)
cmd_str_='data_state_mat=bldg_obj.'+sig_tag+'.data_state_mat'
exec(cmd_str_)
if not (attr=='all') :
cmd_str_='optprob_set=bldg_obj.analysis.'+sig_tag+'.__dict__[p_name].'+attr+'.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set=bldg_obj.analysis.'+sig_tag+'.__dict__[p_name].'+attr+'.optstate_set'
sort_idx=np.argsort(optprob_set)[::-1]
if (attr=='sensor') :
print 'power - sensors...'
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
idx_select=[p_idx]+ list(sort_idx[:num_picks_bn])
cmd_str_='bndata_mat=bldg_obj.'+sig_tag+'.data_state_mat[:,idx_select]'
exec(cmd_str_)
cols=[s_names[k] for k in idx_select]
elif (attr=='weather'):
print 'power - weather...'
cmd_str_='w_names=bldg_obj.'+sig_tag+'.weather_names'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+'.data_state_mat[:,p_idx].T,bldg_obj.'+sig_tag+'.data_weather_mat_.T)).T'
exec(cmd_str_)
cols=[p_name]+[w_name for w_name in w_names]
if 'avg' in sig_tag:
print "--*--*--*--*- bn_anaylsis::sig_tag [" + sig_tag + "] data_weather_mat_ --*--*--*--*--*--*--"
exec('print bldg_obj.'+sig_tag+'.data_weather_mat_[:,4]')
print "--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--"
elif (attr=='time'):
print 'power - time...'
cmd_str_='t_names=bldg_obj.'+sig_tag+'.time_names'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+\
'.data_state_mat[:,p_idx].T,bldg_obj.'+sig_tag+'.data_time_mat.T)).T'
exec(cmd_str_)
cols=[p_name]+[t_name for t_name in t_names]
elif (attr=='all'):
print 'power - sensors + weather + time ...'
s_cause_label,s_labels,s_hc,s_cp_mat,s_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
t_cause_label,t_labels,t_hc,t_cp_mat,t_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
w_cause_label,w_labels,w_hc,w_cp_mat,w_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
#s_cause_label=s_labels; w_cause_label=w_labels;t_cause_label=t_labels
cmd_str_='s_cause_idx=[bldg_obj.'+sig_tag+'.sensor_names.index(name_) for name_ in s_cause_label]'
exec(cmd_str_)
cmd_str_='t_cause_idx=[bldg_obj.'+sig_tag+'.time_names.index(name_) for name_ in t_cause_label]'
exec(cmd_str_)
cmd_str_='w_cause_idx=[bldg_obj.'+sig_tag+'.weather_names.index(name_) for name_ in w_cause_label]'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+'.data_state_mat[:,p_idx].T,\
bldg_obj.'+sig_tag+'.data_state_mat[:,s_cause_idx].T, \
bldg_obj.'+sig_tag+'.data_weather_mat_[:,w_cause_idx].T, \
bldg_obj.'+sig_tag+'.data_time_mat[:,t_cause_idx].T)).T'
exec(cmd_str_)
cmd_str_='cols=[name_ for name_ in [p_name]+s_cause_label+w_cause_label+t_cause_label]'
exec(cmd_str_)
else:
print 'error'
return 0
if (attr=='all'):
b_arc_list = pair_in_idx([p_name],s_cause_label+ w_cause_label+t_cause_label)+\
pair_in_idx(s_cause_label,w_cause_label+t_cause_label)+\
pair_in_idx(w_cause_label,t_cause_label)+\
pair_in_idx(t_cause_label,t_cause_label)
#import pdb;pdb.set_trace()
elif(attr=='time'):
b_arc_list = pair_in_idx([cols[0]],cols[1:])+pair_in_idx(cols[1:],cols[1:])
else:
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
if learning_alg=='tabu':
hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
elif learning_alg=='mmhc':
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
else:
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
cause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in cause_label]
return cause_label,cols, hc_b, amat,bndata_mat
def peak_analysis(cause_label,effect_label,col_labels,bndata_mat):
if isinstance(cause_label,list)==True:
cause_idx=[col_labels.index(label_) for label_ in cause_label]
else:
cause_idx=[col_labels.index(label_) for label_ in [cause_label]]
if isinstance(effect_label,list)==True:
effect_idx=[col_labels.index(label_) for label_ in effect_label]
else:
effect_idx=[col_labels.index(label_) for label_ in [effect_label]]
effect_state_set=list(set(bndata_mat[:,effect_idx].T[0]))
LOW_PEAK_STATE_EFFECT=np.min(effect_state_set)
HIGH_PEAK_STATE_EFFECT=np.max(effect_state_set)
high_peak_state_temp, high_peak_prob_temp=\
compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[HIGH_PEAK_STATE_EFFECT]])
low_peak_state_temp, low_peak_prob_temp=\
compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[LOW_PEAK_STATE_EFFECT]])
high_peak_state=np.array(high_peak_state_temp)
high_peak_prob=np.array(high_peak_prob_temp)
low_peak_state=np.array(low_peak_state_temp)
low_peak_prob=np.array(low_peak_prob_temp)
return low_peak_state,low_peak_prob,high_peak_state,high_peak_prob
def get_tick_symbol(tick_state_val,cause_labels_,Event,Cond):
if len(cause_labels_)==1:
iter_zip=zip(cause_labels_,tick_state_val.T[np.newaxis,:])
else:
iter_zip=zip(cause_labels_,tick_state_val.T)
symbol_tuple=[]
for cause_label_,state_val_ in iter_zip:
symbol_out=[]
if (isinstance(state_val_,np.ndarray)==False) and (isinstance(state_val_,list)==False):
state_val_=[state_val_]
temp=list(set(state_val_))
if list(np.sort(temp))==[-1,0,1]:
cause_label_='PEAK'
for sval_ in state_val_:
if cause_label_=='MTH':
symbol_out.append(monthDict[sval_])
elif cause_label_=='WD':
symbol_out.append(weekDict[sval_])
elif cause_label_=='HR':
symbol_out.append(hourDict[sval_])
elif cause_label_=='Dew_PointC':
ssymbol_out.append(str(sval_)+'C')
elif cause_label_=='Humidity':
symbol_out.append(str(sval_)+'%')
elif cause_label_=='Events':
symbol_out.append([key_ for key_,val_ in Event.items() if val_==sval_])
elif cause_label_=='Conditions':
symbol_out.append([key_ for key_,val_ in Cond.items() if val_==sval_])
elif cause_label_=='TemperatureC':
symbol_out.append(str(sval_)+'C')
elif cause_label_=='PEAK':
symbol_out.append(stateDict[sval_])
else:
symbol_out.append(str(sval_))
symbol_tuple.append(symbol_out)
temp_=np.array(symbol_tuple)
temp2=temp_.reshape(len(cause_labels_),np.prod(temp_.shape)/len(cause_labels_)).T
return [tuple(symbol_) for symbol_ in temp2]
def bn_prob_analysis(bldg_obj,sig_tag_='avg'):
cmd_str='Event=bldg_obj.'+sig_tag_+'.weather_dict[\'Event\']'
exec(cmd_str)
cmd_str='Cond=bldg_obj.'+sig_tag_+'.weather_dict[\'Cond\']'
exec(cmd_str)
bn_out_set={}
cmd_str='p_name_set=bldg_obj.analysis.'+sig_tag_+'.__dict__.keys()'
exec(cmd_str)
for p_name in p_name_set:
try:
# bn analysis - Power-Sensor
s_cause_label,s_labels,s_hc,s_cp_mat,s_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag=sig_tag_,num_picks_bn=5)
# bn analysis -Power-Time
t_cause_label,t_labels,t_hc,t_cp_mat,t_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag=sig_tag_,num_picks_bn=10)
# bn analysis -Power-Weather
w_cause_label,w_labels,w_hc,w_cp_mat,w_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag=sig_tag_,num_picks_bn=10)
# bn analysis -Power-Sensor+Time+Weather
all_cause_label,all_labels,all_hc,all_cp_mat,all_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='all',sig_tag=sig_tag_,num_picks_bn=20)
# prob analysis -Power-Sensor+Time+Weather
cause_label=all_cause_label;col_labels=all_labels;
effect_label=p_name; bndata_mat=all_bndata_mat
low_peak_state,low_peak_prob,high_peak_state,high_peak_prob=\
peak_analysis(cause_label,effect_label,col_labels,bndata_mat)
x_set=low_peak_state
all_cause_symbol_xlabel=get_tick_symbol(x_set,all_cause_label,Event,Cond)
all_cause_symbol_xtick=range(len(low_peak_state))
# BN-PROB STORE
bn_out={'s_cause_label':s_cause_label,'s_labels':s_labels,'s_hc':s_hc,\
's_cp_mat':s_cp_mat, 's_bndata_mat':s_bndata_mat,'t_cause_label':t_cause_label,\
't_labels':t_labels,'t_hc':t_hc,'t_cp_mat':t_cp_mat,'t_bndata_mat':t_bndata_mat, \
'w_cause_label':w_cause_label,'w_labels':w_labels,'w_hc':w_hc,'w_cp_mat':w_cp_mat,\
'w_bndata_mat':w_bndata_mat,'all_cause_label':all_cause_label,'all_labels':all_labels,\
'all_hc':all_hc,'all_cp_mat':all_cp_mat,'all_bndata_mat':all_bndata_mat,
'low_peak_state':low_peak_state,'low_peak_prob':low_peak_prob,\
'high_peak_state':high_peak_state,'high_peak_prob':high_peak_prob,\
'all_cause_symbol_xlabel':all_cause_symbol_xlabel,'all_cause_symbol_xtick':all_cause_symbol_xtick}
bn_out_set.update({p_name:bn_out})
except:
print '*** Error in processing bn_prob for ', p_name, '! ****'
pass
return obj(bn_out_set)
def compute_bn_sensors(bldg_obj,sig_tag='avg',learning_alg='hill'):
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
cmd_str_='bndata_mat=bldg_obj.'+sig_tag+'.data_state_mat'
exec(cmd_str_)
cols=s_names
const_idx=np.nonzero(np.array([ len(set(col)) for col in bndata_mat.T])<2)[0]
bndata_mat=np.delete(bndata_mat,const_idx,1)
cols=list(np.delete(cols,const_idx,0))
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
if learning_alg=='tabu':
hc_b = rbn.bnlearn.tabu(data_frame,score='bic')
elif learning_alg=='mmhc':
hc_b = rbn.bnlearn.mmhc(data_frame,score='bic')
else:
hc_b = rbn.bnlearn.hc(data_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
return hc_b,cols,amat
def plotting_bldg_bn(bldg_):
plt.ioff()
#if 'convert_name' not in bldg_.__dict__.keys():
# bldg_.convert_name = lambda name_: [name_]
for bldg_tag in bldg_.__dict__.keys():
print 'Getting anal_out from '+ bldg_tag
anal_out_found=True
try:
cmd_str='anal_out=bldg_.__dict__[\''+bldg_tag+'\'].anal_out'
exec(cmd_str)
except:
anal_out_found=False
if anal_out_found==True:
for sig_tag in ['avg','diff']:
if sig_tag in anal_out.__dict__.keys():
anal_out_sig=anal_out.__dict__[sig_tag]
p_name_sets=anal_out_sig.__dict__.keys()
for p_name in p_name_sets:
bn_out=anal_out_sig.__dict__[p_name]
cmd_str='pname_=bldg_.convert_name(p_name)[0]'
exec(cmd_str)
try:
fig_name='BN for Sensors '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
col_name=bldg_.convert_name(bn_out.s_labels)
rbn.nx_plot(bn_out.s_hc,col_name,graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_sensors'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN for Time '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.t_hc,bldg_.convert_name(bn_out.t_labels),graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_time'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN for Weather '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.w_hc,bldg_.convert_name(bn_out.w_labels),graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_weather'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN for Sensor-Time-Weather '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.all_hc,bldg_.convert_name(bn_out.all_labels),graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_sensor_time_weather'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN PEAK LH Analysis for Sensor-Time-Weather '+pname_
plt.figure(fig_name, figsize=(30.0,30.0))
plt.subplot(2,1,1)
plt.plot(bn_out.all_cause_symbol_xtick,bn_out.high_peak_prob,'-^')
plt.plot(bn_out.all_cause_symbol_xtick,bn_out.low_peak_prob,'-.v')
plt.ylabel('Likelihood',fontsize=20)
plt.xticks(bn_out.all_cause_symbol_xtick,bn_out.all_cause_symbol_xlabel,rotation=270, fontsize=20)
plt.tick_params(labelsize=20)
plt.legend(('High Peak', 'Low Peak'),loc='center right', prop={'size':25})
plt.tick_params(labelsize=20)
plt.grid();plt.ylim([-0.05,1.05])
plt.title('Likelihood of '+ str(remove_dot(pname_))+\
' given '+'\n'+str(remove_dot(bldg_.convert_name(bn_out.all_cause_label))), fontsize=20)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_LH_sensor_time_weather'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
plt.ion()
##############################################################################
# Obslete library files
##############################################################################
"""
plt.ioff()
for bldg_tag in bldg_tag_set:
print '-------------------------'
print bldg_tag
print '-------------------------'
for sig_tag in sig_tag_set:
print sig_tag+'.....'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'p_names'
exec(cmd_str_)
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'sensor_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.optstate_set'
exec(cmd_str_)
num_picks=30
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
fig=figure(figsize=(20.0,15.0))
subplot(2,1,1)
plt.plot(sort_lh,'-*')
x_label= list(np.array(s_names)[sort_idx[:num_picks]])
x_ticks=range(len(x_label))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
if sig_tag=='avg':
plt.title('Most relavant '+bldg_tag +'sensors to the peak (demand) of '+pname_,fontsize=20)
else:
plt.title('Most relavant '+bldg_tag +'sensors to the peak variations of '+pname_,fontsize=20)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
#plt.get_current_fig_manager().window.showMaximized()
plt.savefig(fig_dir+pname_+'_'+sig_tag+'_lh_sensors.png', bbox_inches='tight')
plt.close()
plt.ion()
def interpolation_measurement_2(data_dict,input_names,err_rate=1,sgm_bnd=20):
print 'interploattion starts....'
measurement_point_set=[]
num_of_discrete_val=[]
sampling_interval_set=[]
num_type_set=[]
err_rate=1;sgm_bnd=20
for i,key_name in enumerate(input_names):
print key_name,'.....'
start_time = time.time()
v = mt.loadObjectBinaryFast(str(key_name) + FL_EXT)
t_=np.array(v[2][0])
if len(t_) == 0:
continue
intpl_intv=np.ceil((t_[-1]-t_[0]) /len(t_))
sampling_interval_set.append(intpl_intv)
val_=np.array(v[2][1])
num_of_discrete_val_temp=len(set(val_))
num_of_discrete_val.append(num_of_discrete_val_temp)
# filtering outlier
# assuming 1% of errors and 30 x standard deviation rules
outlier_idx=outlier_detect(val_,err_rate,sgm_bnd)
if len(outlier_idx)>0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
t_=np.delete(t_,outlier_idx)
val_=np.delete(val_,outlier_idx)
t_new=np.r_[t_[0]:t_[-1]:intpl_intv]
num_type=check_data_type(v[2][1])
if num_type==INT_TYPE:
val_new=fast_nearest_interp(t_new, t_,val_)
else:
#num_type=FLOAT_TYPE
val_new = np.interp(t_new, t_,val_)
c=np.vstack([t_new,val_new])
measurement_point_set.append(c)
num_type_set.append(num_type)
print 'interpolation_measurement one iteration done...'
mt.print_report(start_time)
print '-----------------------------------------------------------------'
#return measurement_point_set,num_type_set,num_of_discrete_val,sampling_interval_set
return measurement_point_set,np.array(num_type_set)
"""
|
gpl-2.0
| -6,386,651,215,079,214,000
| 46.074036
| 156
| 0.550858
| false
| 3.155901
| false
| false
| false
|
jualjiman/knowledge-base
|
src/knowledge_base/users/api.py
|
1
|
6130
|
# -*- coding: utf-8 -*-
import os
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from knowledge_base.api.v1.routers import router
from knowledge_base.core.api.mixins import base as base_mixins
from knowledge_base.core.api.routers.single import SingleObjectRouter
from knowledge_base.core.api.viewsets import GenericViewSet
from knowledge_base.users.serializers import (
ProfileSerializer, ProfileUpdateImageSerializer,
ProfileUpdateSerializer, SearchUserSerializer
)
from knowledge_base.utils.urlresolvers import get_query_params
class ProfileViewSet(
base_mixins.RetrieveModelMixin,
base_mixins.PartialUpdateModelMixin,
GenericViewSet
):
serializer_class = ProfileSerializer
retrieve_serializer_class = ProfileSerializer
update_serializer_class = ProfileUpdateSerializer
change_image_serializer_class = ProfileUpdateImageSerializer
permission_classes = (IsAuthenticated, )
def retrieve(self, request, pk=None):
"""
Gets the user profile information.
---
response_serializer: ProfileSerializer
omit_serializer: false
responseMessages:
- code: 200
message: OK
- code: 403
message: FORBIDDEN
- code: 404
message: NOT FOUND
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(ProfileViewSet, self).retrieve(request)
def partial_update(self, request):
"""
Updates the user profile information.
---
request_serializer: ProfileSerializer
response_serializer: ProfileSerializer
omit_serializer: false
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 403
message: FORBIDDEN
- code: 404
message: NOT FOUND
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(ProfileViewSet, self).partial_update(request)
@detail_route(methods=['PUT'])
def change_image(self, request, *args, **kwars):
"""
Allows the session's user to update his profile image.
---
request_serializer: ProfileUpdateImageSerializer
response_serializer: ProfileSerializer
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
user = request.user
# Serializer that will be used to validate the information.
update_serializer = self.get_serializer(
user,
data=request.data,
partial=True,
action='change_image'
)
update_serializer.is_valid(raise_exception=True)
self.perform_delete_image()
updated_user = update_serializer.save()
retrieve_serializer = self.get_serializer(
updated_user,
action='retrieve'
)
return Response(retrieve_serializer.data, status=status.HTTP_200_OK)
@detail_route(methods=['DELETE'])
def delete_image(self, request, *args, **kwars):
"""
Allows delete the image for current user.
omit_serializer: true
---
responseMessages:
- code: 204
message: NO CONTENT
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
self.perform_delete_image()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_object(self):
return self.request.user
def perform_delete_image(self):
user = self.request.user
if user.photo and os.path.isfile(user.photo.path):
os.remove(user.photo.path)
user.photo = None
if user.thumbnail and os.path.isfile(user.thumbnail.path):
os.remove(user.thumbnail.path)
user.thumbnail = None
user.save()
class SearchUserViewSet(base_mixins.ListModelMixin, GenericViewSet):
serializer_class = SearchUserSerializer
list_serializer_class = SearchUserSerializer
permission_classes = (IsAuthenticated, )
def get_queryset(self, *args, **kwargs):
queryset = get_user_model().objects.filter(is_active=True)
query_params = get_query_params(self.request)
q = query_params.get('q')
if q:
queryset = queryset.filter(email__icontains=q)
return queryset
def list(self, request, *args, **kwargs):
"""
Return a list of users, that matches with the given word.
---
response_serializer: SearchUserSerializer
parameters:
- name: q
description: Search word.
paramType: query
type: string
responseMessages:
- code: 200
message: OK
- code: 403
message: FORBIDDEN
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(SearchUserViewSet, self).list(request, *args, **kwargs)
router.register(
'me',
ProfileViewSet,
base_name='me',
router_class=SingleObjectRouter
)
router.register(
r'users/search',
SearchUserViewSet,
base_name='users-search'
)
|
apache-2.0
| -7,804,503,449,449,819,000
| 27.915094
| 76
| 0.596574
| false
| 4.693721
| false
| false
| false
|
google-research/episodic-curiosity
|
episodic_curiosity/constants.py
|
1
|
6595
|
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for episodic curiosity."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
class Level(object):
"""Represents a DMLab level, possibly with additional non-standard settings.
Attributes:
dmlab_level_name: Name of the DMLab level
fully_qualified_name: Unique name used to distinguish between multiple DMLab
levels with the same name but different settings.
extra_env_settings: dict, additional DMLab environment settings for this
level.
random_maze: Whether the geometry of the maze is supposed to change when we
change the seed.
use_r_net_from_level: If provided, don't train a R-net for this level, but
instead, use the trained R-net from another level
(identified by its fully qualified name).
include_in_paper: Whether this level is included in the paper.
scenarios: Optional list of scenarios this level is used for.
"""
def __init__(self,
dmlab_level_name,
fully_qualified_name = None,
extra_env_settings = None,
random_maze = False,
use_r_net_from_level = None,
include_in_paper = False,
scenarios = None):
self.dmlab_level_name = dmlab_level_name
self.fully_qualified_name = fully_qualified_name or dmlab_level_name
self.extra_env_settings = extra_env_settings or {}
self.random_maze = random_maze
self.use_r_net_from_level = use_r_net_from_level
self.include_in_paper = include_in_paper
self.scenarios = scenarios
def asdict(self):
return vars(self)
class SplitType(Enum):
R_TRAINING = 0
POLICY_TRAINING = 3
VALIDATION = 1
TEST = 2
class Const(object):
"""Constants"""
MAX_ACTION_DISTANCE = 5
NEGATIVE_SAMPLE_MULTIPLIER = 5
# env
OBSERVATION_HEIGHT = 120
OBSERVATION_WIDTH = 160
OBSERVATION_CHANNELS = 3
OBSERVATION_SHAPE = (OBSERVATION_HEIGHT, OBSERVATION_WIDTH,
OBSERVATION_CHANNELS)
# model and training
BATCH_SIZE = 64
EDGE_CLASSES = 2
DUMP_AFTER_BATCHES = 100
EDGE_MAX_EPOCHS = 2000
ADAM_PARAMS = {
'lr': 1e-04,
'beta_1': 0.9,
'beta_2': 0.999,
'epsilon': 1e-08,
'decay': 0.0
}
ACTION_REPEAT = 4
STORE_CHECKPOINT_EVERY_N_EPOCHS = 30
LEVELS = [
# Levels on which we evaluate episodic curiosity.
# Corresponds to 'Sparse' setting in the paper
# (arxiv.org/pdf/1810.02274.pdf).
Level('contributed/dmlab30/explore_goal_locations_large',
fully_qualified_name='explore_goal_locations_large',
random_maze=True,
include_in_paper=True,
scenarios=['sparse', 'noreward', 'norewardnofire']),
# WARNING!! For explore_goal_locations_large_sparse and
# explore_goal_locations_large_verysparse to work properly (i.e. taking
# into account minGoalDistance), you need to use the dmlab MPM:
# learning/brain/research/dune/rl/dmlab_env_package.
# Corresponds to 'Very Sparse' setting in the paper.
Level(
'contributed/dmlab30/explore_goal_locations_large',
fully_qualified_name='explore_goal_locations_large_verysparse',
extra_env_settings={
# Forces the spawn and goals to be further apart.
# Unfortunately, we cannot go much higher, because we need to
# guarantee that for any goal location, we can at least find one
# spawn location that is further than this number (the goal
# location might be in the middle of the map...).
'minGoalDistance': 10,
},
use_r_net_from_level='explore_goal_locations_large',
random_maze=True, include_in_paper=True,
scenarios=['verysparse']),
# Corresponds to 'Sparse+Doors' setting in the paper.
Level('contributed/dmlab30/explore_obstructed_goals_large',
fully_qualified_name='explore_obstructed_goals_large',
random_maze=True,
include_in_paper=True,
scenarios=['sparseplusdoors']),
# Two levels where we expect to show episodic curiosity does not hurt.
# Corresponds to 'Dense 1' setting in the paper.
Level('contributed/dmlab30/rooms_keys_doors_puzzle',
fully_qualified_name='rooms_keys_doors_puzzle',
include_in_paper=True,
scenarios=['dense1']),
# Corresponds to 'Dense 2' setting in the paper.
Level('contributed/dmlab30/rooms_collect_good_objects_train',
fully_qualified_name='rooms_collect_good_objects_train',
include_in_paper=True,
scenarios=['dense2']),
]
MIXER_SEEDS = {
# Equivalent to not setting a mixer seed. Mixer seed to train the
# R-network.
SplitType.R_TRAINING: 0,
# Mixer seed for training the policy.
SplitType.POLICY_TRAINING: 0x3D23BE66,
SplitType.VALIDATION: 0x2B79ED94, # Invented.
SplitType.TEST: 0x600D5EED, # Same as DM's.
}
@staticmethod
def find_level(fully_qualified_name):
"""Finds a DMLab level by fully qualified name."""
for level in Const.LEVELS:
if level.fully_qualified_name == fully_qualified_name:
return level
# Fallback to the DMLab level with the corresponding name.
return Level(fully_qualified_name,
extra_env_settings = {
# Make 'rooms_exploit_deferred_effects_test',
# 'rooms_collect_good_objects_test' work.
'allowHoldOutLevels': True
})
@staticmethod
def find_level_by_scenario(scenario):
"""Finds a DMLab level by scenario name."""
for level in Const.LEVELS:
if level.scenarios and scenario in level.scenarios:
return level
raise ValueError('Scenario "{}" not found.'.format(scenario))
|
apache-2.0
| 3,986,055,241,371,008,500
| 36.471591
| 80
| 0.643821
| false
| 3.749289
| false
| false
| false
|
pterk/django-bop
|
bop/api.py
|
1
|
5312
|
import operator
from django.contrib.auth.models import User, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from bop.models import ObjectPermission
def get_model_perms(model):
return [p[0] for p in model._meta.permissions] + \
[model._meta.get_add_permission(),
model._meta.get_change_permission(),
model._meta.get_delete_permission()]
def has_model_perms(user, model):
for perm in user.get_all_permissions():
app_label, codename = perm.split('.')
if model._meta.app_label == app_label and \
codename in get_model_perms(model):
return True
return False
# I terify: BOO!
def iterify(obj, exceptions=(basestring,)):
""" iteryfy makes sure `obj` is iterable
(by turning any value that isn't iterable into a list)
>>> from bop.api import iterify
>>> things = [1, "string", ('a', 'tuple'), {'name': 'dict', 'another': 'value'}, set(['test', 1, 3]), [1,3,4], None, [None]]
>>> for thing in things:
... for x in iterify(thing):
... print x
...
1
string
a
tuple
name
another
test
1
3
1
3
4
None
None
>>>
>>> for thing in things:
... for x in iterify(thing, (basestring, dict)):
... if isinstance(x, dict):
... d = x.items()
... d.sort()
... print d
... else:
... print x
...
1
string
a
tuple
[('another', 'value'), ('name', 'dict')]
test
1
3
1
3
4
None
None
>>>
"""
if hasattr(obj, '__iter__'):
# To future self: string has __iter__ in python3
if not isinstance(obj, exceptions):
return obj
return [obj]
def resolve(iterable, model, key=None):
resolved = []
for i in iterify(iterable):
if isinstance(i, model):
resolved.append(i)
if isinstance(i, (basestring, int)):
if key is None or isinstance(key ,int):
key = 'pk'
if hasattr(key, '__call__'):
i = key(i)
else:
i = {key: i}
if isinstance(i, dict):
try:
resolved.append(model.objects.get(**i))
except model.DoesNotExist:
pass
return resolved
def is_object_permission(obj, permission, ct):
return permission.content_type == ct and \
obj._meta.app_label == permission.content_type.app_label and \
permission.codename in get_model_perms(obj)
#(permission.codename in [x[0] for x in obj._meta.permissions] \
# or permission.codename in (obj._meta.get_add_permission(),
# obj._meta.get_change_permission(),
# obj._meta.get_delete_permission()))
def perm2dict(perm):
app_label, codename = perm.split(".")
return {"content_type__app_label": app_label, "codename": codename}
def _make_lists_of_objects(users, groups, permissions, objects):
# Make sure all 'objects' are model-instances
users = resolve(users, User, key='username')
groups = resolve(groups, Group, key='name')
permissions = resolve(permissions, Permission, key=perm2dict)
# objects *must* be model-instances already
return (users, groups, permissions, iterify(objects))
def grant(users, groups, permissions, objects):
users, groups, permissions, objects = \
_make_lists_of_objects(users, groups, permissions, objects)
for o in objects:
if not hasattr(o, '_meta'):
continue
ct = ContentType.objects.get_for_model(o)
for p in permissions:
if is_object_permission(o, p, ct):
for u in users:
ObjectPermission.objects.get_or_create(user=u,
permission=p,
object_id=o.id,
content_type=ct)
for g in groups:
ObjectPermission.objects.get_or_create(group=g,
permission=p,
object_id=o.id,
content_type=ct)
def revoke(users, groups, permissions, objects):
users, groups, permissions, objects = \
_make_lists_of_objects(users, groups, permissions, objects)
userlist = []
grouplist = []
for o in objects:
ct = ContentType.objects.get_for_model(o)
for p in permissions:
if is_object_permission(o, p, ct):
for u in users:
userlist.append(Q(user=u))
for g in groups:
grouplist.append(Q(group=g))
Qs = userlist+grouplist
if not Qs:
continue
ObjectPermission.objects.filter(
reduce(operator.or_, Qs),
content_type=ct, object_id=o.id,permission=p
).delete()
|
mit
| -8,878,782,757,124,568,000
| 30.431953
| 128
| 0.512236
| false
| 4.219222
| false
| false
| false
|
ColorTyWorld/GISRS
|
src/python/landsat/ndvi.py
|
1
|
8766
|
import sys, os, math, time
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.CheckOutExtension("spatial")
#Metadata exists in one of two standard formats (finds the correct name for each field)
def acquireMetadata(metadata, band):
band = str(band)
metadatalist = []
if ("RADIANCE_MAXIMUM_BAND_" + band) in metadata.keys():
BANDFILE = "FILE_NAME_BAND_" + band
LMAX = "RADIANCE_MAXIMUM_BAND_" + band
LMIN = "RADIANCE_MINIMUM_BAND_" + band
QCALMAX = "QUANTIZE_CAL_MAX_BAND_" + band
QCALMIN = "QUANTIZE_CAL_MIN_BAND_" + band
DATE = "DATE_ACQUIRED"
metadatalist = [BANDFILE, LMAX, LMIN, QCALMAX, QCALMIN, DATE]
elif ("LMAX_BAND" + band) in metadata.keys():
BANDFILE = "BAND" + band + "_FILE_NAME"
LMAX = "LMAX_BAND" + band
LMIN = "LMIN_BAND" + band
QCALMAX = "QCALMAX_BAND" + band
QCALMIN = "QCALMIN_BAND" + band
DATE ="ACQUISITION_DATE"
metadatalist = [BANDFILE, LMAX, LMIN, QCALMAX, QCALMIN, DATE]
else:
arcpy.AddError('There was a problem reading the metadata for this file. Please make sure the _MTL.txt is in Level 1 data format')
return metadatalist
#Calculate the radiance from metadata on band.
def calcRadiance (LMAX, LMIN, QCALMAX, QCALMIN, QCAL, band):
LMAX = float(LMAX)
LMIN = float(LMIN)
QCALMAX = float(QCALMAX)
QCALMIN = float(QCALMIN)
gain = (LMAX - LMIN)/(QCALMAX-QCALMIN)
inraster = Raster(QCAL)
outname = 'RadianceB'+str(band)+'.tif'
arcpy.AddMessage('Band'+str(band))
arcpy.AddMessage('LMAX ='+str(LMAX))
arcpy.AddMessage('LMIN ='+str(LMIN))
arcpy.AddMessage('QCALMAX ='+str(QCALMAX))
arcpy.AddMessage('QCALMIN ='+str(QCALMIN))
arcpy.AddMessage('gain ='+str(gain))
outraster = (gain * (inraster-QCALMIN)) + LMIN
#outraster.save(outname)
return outraster
def calcReflectance(solarDist, ESUN, solarElevation, radiance, scaleFactor):
#Value for solar zenith is 90 degrees minus solar elevation (angle from horizon to the center of the sun)
# See Landsat7_Handbook 11.3.2 Radiance to Reflectance
solarZenith = ((90.0 - (float(solarElevation)))*math.pi)/180 #Converted from degrees to radians
solarDist = float(solarDist)
ESUN = float(ESUN)
outname = 'ReflectanceB'+str(band)+'.tif'
arcpy.AddMessage('Band'+str(band))
arcpy.AddMessage('solarDist ='+str(solarDist))
arcpy.AddMessage('solarDistSquared ='+str(math.pow(solarDist, 2)))
arcpy.AddMessage('ESUN ='+str(ESUN))
arcpy.AddMessage('solarZenith ='+str(solarZenith))
outraster = (math.pi * radiance * math.pow(solarDist, 2)) / (ESUN * math.cos(solarZenith)) * scaleFactor
return outraster
#Calculate the solar distance based on julian day
def calcSolarDist (jday):
#Values taken from d.csv file which is a formatted version of the d.xls file
#associated with the Landsat7 handbook, representing the distance of the sun
#for each julian day (1-366).
#this line keeps the relative path were this script is executing
filepath = os.path.join(os.path.dirname(sys.argv[0]), 'd.csv')
f = open(filepath, "r")
lines = f.readlines()[2:]
distances = []
for x in range(len(lines)):
distances.append(float(lines[x].strip().split(',')[1]))
f.close()
jday = int(jday)
dist = distances[jday - 1]
return dist
def calcJDay (date):
#Seperate date aspects into list (check for consistnecy in formatting of all
#Landsat7 metatdata) YYYY-MM-DD
dt = date.rsplit("-")
#Cast each part of the date as a in integer in the 9 int tuple mktime
t = time.mktime((int(dt[0]), int(dt[1]), int(dt[2]), 0, 0, 0, 0, 0, 0))
#As part of the time package the 7th int in mktime is calulated as Julian Day
#from the completion of other essential parts of the tuple
jday = time.gmtime(t)[7]
return jday
def getESUN(bandNum, SIType):
SIType = SIType
ESUN = {}
#from NASA's Landsat7_Handbook Table 11.3
#ETM+ Solar Spectral Irradiances (generated using the combined Chance-Kurucz Solar Spectrum within MODTRAN 5)
if SIType == 'ETM+ ChKur':
ESUN = {'b1':1970,'b2':1842,'b3':1547,'b4':1044,'b5':225.7,'b7':82.06,'b8':1369}
#from NASA's Landsat7_Handbook Table 9.1
#from the LPS ACCA algorith to correct for cloud cover
if SIType == 'LPS ACAA Algorithm':
ESUN = {'b1':1969,'b2':1840,'b3':1551,'b4':1044,'b5':225.7,'b7':82.06,'b8':1368}
#from Revised Landsat-5 TM Radiometric Calibration Procedures and Postcalibration, Table-2
#Gyanesh Chander and Brian Markham. Nov 2003.
#Landsat 5 ChKur
if SIType == 'Landsat 5 ChKur':
ESUN = {'b1':1957,'b2':1826,'b3':1554,'b4':1036,'b5':215,'b7':80.67}
#from Revised Landsat-5 TM Radiometric Calibration Procedures and Postcalibration, Table-2
#Gyanesh Chander and Brian Markham. Nov 2003.
#Landsat 4 ChKur
if SIType == 'Landsat 4 ChKur':
ESUN = {'b1':1957,'b2':1825,'b3':1557,'b4':1033,'b5':214.9,'b7':80.72}
bandNum = str(bandNum)
return ESUN[bandNum]
def readMetadata(metadataFile):
f = metadataFile
#Create an empty dictionary with which to populate all the metadata fields.
metadata = {}
#Each item in the txt document is seperated by a space and each key is
#equated with '='. This loop strips and seperates then fills the dictonary.
for line in f:
if not line.strip() == "END":
val = line.strip().split('=')
metadata [val[0].strip()] = val[1].strip().strip('"')
else:
break
return metadata
#Takes the unicode parameter input from Arc and turns it into a nice python list
def cleanList(bandList):
bandList = list(bandList)
for x in range(len(bandList)):
bandList[x] = str(bandList[x])
while ';' in bandList:
bandList.remove(';')
return bandList
#////////////////////////////////////MAIN LOOP///////////////////////////////////////
# TM5
work_dic = 'F:\\Data\\HRB\\RS\\Landsat\\Landsat5\\TM\\132_32\\LT51320322011318IKR01\\'
metadataPath = work_dic + 'LT51320322011318IKR01_MTL.txt'
out_dic = 'F:\\Data\\HRB\\RS\\Landsat\\Landsat5\\TM\\132_32\\LT51320322011318IKR01\\'
SIType = 'Landsat 5 ChKur'
keepRad = 'false'
keepRef = 'true'
scaleFactor = 1.0
min_ndvi = 0.15
env.workspace = work_dic
arcpy.env.overwriteOutput = True
ref_file_exit = 'false'
arcpy.AddMessage(scaleFactor)
if SIType =='Landsat 4 ChKur' :
bandList = cleanList(['5','7'])
else:
bandList = cleanList(['3','4'])
metadataFile = open(metadataPath)
metadata = readMetadata(metadataFile)
metadataFile.close()
successful = []
failed = []
if SIType =='Landsat 4 ChKur' :
# from http://landsat.gsfc.nasa.gov/the-multispectral-scanner-system/
# band 5 and 7 of MSS are equivalent to 3 and 4 of TM
ref_file_exit = os.path.exists(work_dic + "ReflectanceB5.tif")
ref_file_exit = os.path.exists(work_dic + "ReflectanceB7.tif")
else:
ref_file_exit = os.path.exists(work_dic + "ReflectanceB3.tif")
ref_file_exit = os.path.exists(work_dic + "ReflectanceB4.tif")
if ref_file_exit:
metlist = acquireMetadata(metadata, '5')
print 'Reflectance files existed'
else:
print 'Calculating reflectances'
for band in bandList:
bandstr = str(band)
print bandstr
metlist = acquireMetadata(metadata, band)
BANDFILE = metlist[0]
LMAX = metlist[1]
LMIN = metlist[2]
QCALMAX = metlist[3]
QCALMIN = metlist[4]
DATE = metlist[5]
ESUNVAL = "b" + band
#try:
radianceRaster = calcRadiance(metadata[LMAX], metadata[LMIN], metadata[QCALMAX], metadata[QCALMIN], metadata[BANDFILE], band)
reflectanceRaster = calcReflectance(calcSolarDist(calcJDay(metadata[DATE])), getESUN(ESUNVAL, SIType), metadata['SUN_ELEVATION'], radianceRaster, scaleFactor)
outname = 'ReflectanceB'+ bandstr
reflectanceRaster.save(outname)
successful.append(BANDFILE)
DATE = metlist[5]
day = metadata[DATE]
if SIType =='Landsat 4 ChKur' :
nir = Raster('ReflectanceB7.tif')
red = Raster('ReflectanceB5.tif')
else:
nir = Raster('ReflectanceB4.tif')
red = Raster('ReflectanceB3.tif')
ndvi_out_ras = out_dic + "ndvi_" + day + ".tif"
print 'Calculating NDVI'
raw_ndvi = (nir-red)/(nir+red)
ndvi = Con((raw_ndvi < min_ndvi) | (raw_ndvi > 1.0), 0, raw_ndvi)
arcpy.gp.SetNull_sa(ndvi, ndvi, ndvi_out_ras, "value = 0")
print 'NDVI file saved'
if keepRef != 'true':
arcpy.Delete_management(nir)
arcpy.Delete_management(red)
print 'Reflectance files deleted'
|
gpl-3.0
| -8,988,101,215,451,264,000
| 32.458015
| 166
| 0.646019
| false
| 3.017556
| false
| false
| false
|
hpcugent/easybuild-framework
|
easybuild/tools/multidiff.py
|
1
|
10576
|
# #
# Copyright 2014-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module which allows the diffing of multiple files
:author: Toon Willems (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import difflib
import math
import os
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import read_file
from easybuild.tools.systemtools import det_terminal_size
SEP_WIDTH = 5
# text colors
PURPLE = "\033[0;35m"
# background colors
GREEN_BACK = "\033[0;42m"
RED_BACK = "\033[0;41m"
# end character for colorized text
END_COLOR = "\033[0m"
# meaning characters in diff context
HAT = '^'
MINUS = '-'
PLUS = '+'
SPACE = ' '
QUESTIONMARK = '?'
END_LONG_LINE = '...'
# restrict displaying of differences to limited number of groups
MAX_DIFF_GROUPS = 3
_log = fancylogger.getLogger('multidiff', fname=False)
class MultiDiff(object):
"""
Class representing a multi-diff.
"""
def __init__(self, base_fn, base_lines, files, colored=True):
"""
MultiDiff constructor
:param base: base to compare with
:param files: list of files to compare with base
:param colored: boolean indicating whether a colored multi-diff should be generated
"""
self.base_fn = base_fn
self.base_lines = base_lines
self.files = files
self.colored = colored
self.diff_info = {}
def parse_line(self, line_no, diff_line, meta, squigly_line):
"""
Register a diff line
:param line_no: line number
:param diff_line: diff line generated by difflib
:param meta: meta information (e.g., filename)
:param squigly_line: squigly line indicating which characters changed
"""
# register (diff_line, meta, squigly_line) tuple for specified line number and determined key
key = diff_line[0]
if not key in [MINUS, PLUS]:
raise EasyBuildError("diff line starts with unexpected character: %s", diff_line)
line_key_tuples = self.diff_info.setdefault(line_no, {}).setdefault(key, [])
line_key_tuples.append((diff_line, meta, squigly_line))
def color_line(self, line, color):
"""Create colored version of given line, with given color, if color mode is enabled."""
if self.colored:
line = ''.join([color, line, END_COLOR])
return line
def merge_squigly(self, squigly1, squigly2):
"""Combine two squigly lines into a single squigly line."""
sq1 = list(squigly1)
sq2 = list(squigly2)
# longest line is base
base, other = (sq1, sq2) if len(sq1) > len(sq2) else (sq2, sq1)
for i, char in enumerate(other):
if base[i] in [HAT, SPACE] and base[i] != char:
base[i] = char
return ''.join(base)
def colorize(self, line, squigly):
"""Add colors to the diff line based on the squigly line."""
if not self.colored:
return line
# must be a list so we can insert stuff
chars = list(line)
flag = ' '
offset = 0
color_map = {
HAT: GREEN_BACK if line.startswith(PLUS) else RED_BACK,
MINUS: RED_BACK,
PLUS: GREEN_BACK,
}
if squigly:
for i, squigly_char in enumerate(squigly):
if squigly_char != flag:
chars.insert(i + offset, END_COLOR)
offset += 1
if squigly_char in [HAT, MINUS, PLUS]:
chars.insert(i + offset, color_map[squigly_char])
offset += 1
flag = squigly_char
chars.insert(len(squigly) + offset, END_COLOR)
else:
chars.insert(0, color_map.get(line[0], ''))
chars.append(END_COLOR)
return ''.join(chars)
def get_line(self, line_no):
"""
Return the line information for a specific line
:param line_no: line number to obtain information for
:return: list with text lines providing line information
"""
output = []
diff_dict = self.diff_info.get(line_no, {})
for key in [MINUS, PLUS]:
lines, changes_dict, squigly_dict = set(), {}, {}
# obtain relevant diff lines
if key in diff_dict:
for (diff_line, meta, squigly_line) in diff_dict[key]:
if squigly_line:
# merge squigly lines
if diff_line in squigly_dict:
squigly_line = self.merge_squigly(squigly_line, squigly_dict[diff_line])
squigly_dict[diff_line] = squigly_line
lines.add(diff_line)
# track meta info (which filenames are relevant)
changes_dict.setdefault(diff_line, set()).add(meta)
# sort: lines with most changes last, limit number to MAX_DIFF_GROUPS
lines = sorted(lines, key=lambda line: len(changes_dict[line]))[:MAX_DIFF_GROUPS]
for diff_line in lines:
squigly_line = squigly_dict.get(diff_line, '')
line = ['%s %s' % (line_no, self.colorize(diff_line, squigly_line))]
# mention to how may files this diff applies
files = changes_dict[diff_line]
num_files = len(self.files)
line.append("(%d/%d)" % (len(files), num_files))
# list files to which this diff applies (don't list all files)
if len(files) != num_files:
line.append(', '.join(files))
output.append(' '.join(line))
# prepend spaces to match line number length in non-color mode
if not self.colored and squigly_line:
prepend = ' ' * (2 + int(math.log10(line_no)))
output.append(''.join([prepend, squigly_line]))
# print seperator only if needed
if diff_dict and not self.diff_info.get(line_no + 1, {}):
output.extend([' ', '-' * SEP_WIDTH, ' '])
return output
def __str__(self):
"""
Create a string representation of this multi-diff
"""
def limit(text, length):
"""Limit text to specified length, terminate color mode and add END_LONG_LINE if trimmed."""
if len(text) > length:
maxlen = length - len(END_LONG_LINE)
res = text[:maxlen]
if self.colored:
res += END_COLOR
return res + END_LONG_LINE
else:
return text
_, term_width = det_terminal_size()
base = self.color_line(self.base_fn, PURPLE)
filenames = ', '.join(map(os.path.basename, self.files))
output = [
"Comparing %s with %s" % (base, filenames),
'=' * SEP_WIDTH,
]
diff = False
for i in range(len(self.base_lines)):
lines = filter(None, self.get_line(i))
if lines:
output.append('\n'.join([limit(line, term_width) for line in lines]))
diff = True
if not diff:
output.append("(no diff)")
output.append('=' * SEP_WIDTH)
return '\n'.join(output)
def multidiff(base, files, colored=True):
"""
Generate a diff for multiple files, all compared to base.
:param base: base to compare with
:param files: list of files to compare with base
:param colored: boolean indicating whether a colored multi-diff should be generated
:return: text with multidiff overview
"""
differ = difflib.Differ()
base_lines = read_file(base).split('\n')
mdiff = MultiDiff(os.path.basename(base), base_lines, files, colored=colored)
# use the MultiDiff class to store the information
for filepath in files:
lines = read_file(filepath).split('\n')
diff = differ.compare(lines, base_lines)
filename = os.path.basename(filepath)
# contruct map of line number to diff lines and mapping between diff lines
# example partial diff:
#
# - toolchain = {'name': 'goolfc', 'version': '2.6.10'}
# ? - ^ ^
#
# + toolchain = {'name': 'goolf', 'version': '1.6.20'}
# ? ^ ^
#
local_diff = {}
squigly_dict = {}
last_added = None
offset = 1
for (i, line) in enumerate(diff):
# diff line indicating changed characters on line above, a.k.a. a 'squigly' line
if line.startswith(QUESTIONMARK):
squigly_dict[last_added] = line
offset -= 1
# diff line indicating addition change
elif line.startswith(PLUS):
local_diff.setdefault(i + offset, []).append((line, filename))
last_added = line
# diff line indicated removal change
elif line.startswith(MINUS):
local_diff.setdefault(i + offset, []).append((line, filename))
last_added = line
offset -= 1
# construct the multi-diff based on the constructed dict
for line_no in local_diff:
for (line, filename) in local_diff[line_no]:
mdiff.parse_line(line_no, line.rstrip(), filename, squigly_dict.get(line, '').rstrip())
return str(mdiff)
|
gpl-2.0
| -2,696,512,201,220,367,000
| 35.343643
| 104
| 0.574414
| false
| 3.904024
| false
| false
| false
|
grlurton/orbf_data_validation
|
src/data_preparation/excel_consolidation/get_excel_metadata.py
|
1
|
2188
|
#%%
import xlrd
import itertools
import os
import pandas as pd
def combine_paths(directory, files):
return (os.path.join(directory, filename) for filename in files)
def get_excel_for_district(district_path):
files = os.walk(district_path)
files_per_directory = [combine_paths(walk[0],walk[2]) for walk in files]
all_files = list(itertools.chain(*files_per_directory))
return (f for f in all_files if f.endswith(('xlsx',"xls")))
def get_districts(root_path):
"""
Start from the directory containing all the districts. A district is assumed to be any
directory in root_path.
"""
return (os.path.join(root_path,directory) for directory in os.listdir(root_path) if os.path.isdir(os.path.join(root_path,directory)))
def get_districts_with_files(root_path):
return ((district, get_excel_for_district(district)) for district in get_districts(root_path))
def get_excel_metadata(filename):
try :
book = xlrd.open_workbook(filename , on_demand = True )
except :
return ((filename.replace("\\", "/")) , "error opening file" )
print(filename)
try :
if filename.endswith("xlsx"):
metadata = {"filename":[filename.replace("\\", "/")],
"user_name":[book.props["creator"]] ,
"last_modif_by":[book.props["last_modified_by"]] ,
"created":[book.props["created"]] ,
"modified":[book.props["modified"]]}
elif filename.endswith("xls"):
metadata = {"filename":[filename.replace("\\", "/")],
"user_name":[book.user_name]}
except :
metadata = ((filename.replace("\\", "/")) , "file has no props")
return pd.DataFrame.from_dict(metadata)
def full_function(root_path) :
for district, files in get_districts_with_files(root_path) :
for filename in files :
yield get_excel_metadata(filename)
#%%
data_path = 'data/raw/rbv_credes/'
out = pd.DataFrame()
for results in full_function(data_path) :
out = out.append(results)
out.to_csv(data_path + "excel_metadata.csv")
|
mit
| 5,083,607,058,927,558,000
| 36.385965
| 137
| 0.603291
| false
| 3.616529
| false
| false
| false
|
ranjaykrishna/simple-amt
|
reject_assignments.py
|
1
|
1247
|
import argparse, json
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--prod', action='store_false', dest='sandbox',
default=True,
help="Whether to run on the production AMT site.")
parser.add_argument('--assignment_ids_file')
parser.add_argument('--config', default='config.json', type=simpleamt.json_file)
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
if args.assignment_ids_file is None:
parser.error('Must specify --assignment_ids_file.')
with open(args.assignment_ids_file, 'r') as f:
assignment_ids = [line.strip() for line in f]
print ('This will reject %d assignments with '
'sandbox=%s' % (len(assignment_ids), str(args.sandbox)))
print 'Continue?'
s = raw_input('(Y/N): ')
if s == 'Y' or s == 'y':
print 'Rejecting assignments'
for idx, assignment_id in enumerate(assignment_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(assignment_ids))
try:
mtc.reject_assignment(assignment_id, feedback='Invalid results')
except:
print "Could not reject: %s" % (assignment_id)
else:
print 'Aborting'
|
mit
| 336,828,048,293,545,300
| 35.676471
| 82
| 0.639936
| false
| 3.552707
| false
| false
| false
|
tgcmteam/tgcmlinux
|
src/tgcm/ui/MSD/MSDActionsManager.py
|
1
|
2000
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Authors : Roberto Majadas <roberto.majadas@openshine.com>
# Cesar Garcia Tapia <tapia@openshine.com>
# Oier Blasco <oierblasco@gmail.com>
# Alvaro Peña <alvaro.pena@openshine.com>
#
# Copyright (c) 2003-2012, Telefonica Móviles España S.A.U.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import tgcm
import tgcm.core.Actions
class MSDActionsManager:
def __init__(self, conn_manager):
tgcm.info("Init tgcmActionsManager")
self._actions_manager = tgcm.core.Actions.ActionManager()
self.conn_manager = conn_manager
self.systray = None
self.action_list = self._actions_manager.get_actions()
def set_systray(self, systray):
self.systray = systray
def set_connections_model(self, model):
for x in self.action_list.keys():
obj = self.action_list[x]
obj.set_connections_model(model)
def connect_signals(self):
for x in self.action_list.keys():
obj = self.action_list[x]
obj.connect_signals()
def launch_action(self, action_codename):
obj = self.action_list[action_codename]
obj.launch(self.conn_manager)
def launch_help(self, action_codename):
obj = self.action_list[action_codename]
obj.launch_help()
|
gpl-2.0
| 3,031,636,920,097,830,000
| 33.431034
| 68
| 0.68002
| false
| 3.44905
| false
| false
| false
|
pp-mo/pylogy
|
newideas.py
|
1
|
3123
|
#
# mechanism
#
def Pred__possibles(args):
for rule in self.rules:
for possible in rule.possibles(args):
yield possible
def Rule__possibles(args):
locals = self.match_args(self.args, args)
if locals is None:
return
# E.G. "pred(X, 1)" matches ("Q", "V") producing {'X':Var("Q"), 'V':Lit(1)}
# (and returns [X, 1])
for term in self.terms:
local, term_args = term.fill_args(locals)
# E.G. "call(X, V, 3)" matches from {'X':Var("Q"),'V':Lit(1)}
# producing {X:Q, V:1} and returning [X, 1, 3]
# E.G. "call(X, Z)" with {X:Q, V:1} matches [Q, Z] yielding {X:Q, V:1, Z:V(xxx)}
# and then P(Z) would match [Z].
for possible in term.pred.possibles(term_args): # NB *not* locals
yield possible
def Rule__match_args(args):
vars = {}
for arg in self.args:
vars = arg.match_vars(arg, vars) # None if fail
if vars is None:
break
return vars
_uid = 0
def new_temp_var(basename):
uid += 1
name = name + '_' + str(uid)
return Var(name)
def Term__fill_args(vars):
args = []
for arg in self.args:
# E.G. I(4) returns (I(4))
# E.G. V('X') pulls from {X:3} returning (I(3))
# E.G. V('Z') pulls from {V:Z} returning (V(Z))
# E.G. V('Q') adds {Q:V()}
if isinstance(arg, VarArg):
if arg.name not in vars:
vars[arg.name] = new_temp_var(arg.name)
arg = vars[arg.name]
elif isintance(arg, LiteralArg):
pass
else:
raise ValueError()
args.append(arg)
return args
def LiteralArg__match_term(term, vars):
if isinstance(term, LiteralArg):
# E.G. f(X, 2) ?match (_Y, Lit(?))
if self.value == term.value:
# E.G. f(X, 2) ?match (_Y, 2)
pass # ok
else:
# E.G. f(X, 2) ?match (_Y, 3)
vars = None # fail
elif isinstance(term, VarArg):
# E.G. f(X, 2) ?match f(_Y, Q)
existing = vars.get(term.name)
if not existing:
# E.G. f(X, 2) ?match f(_Y, _Q)
vars[term.name] = term
elif vars[term].value == self.value:
# E.G. f(X, 2) ?match f(_Y, Z)
pass
else
return vars
def VarArg__match_term(term, vars):
name = self.name
if isinstance(term, LiteralArg):
# E.G. f(X) ?match (3)
if name in vars:
vars[name] = new_temp_var(name)
vars[name] = term
elif isinstance(term, VarArg):
existing = vars.get(self.name)
if not existing:
vars[self.name] = term
else:
raise ValueError
return vars
def ConsArg__match_term(term, vars):
if (isinstance(term, LiteralTerm) and
isinstance(term.value, list) and len(term.value) > 0):
vars = self.head.match_vars(make_term(term.value[0]), vars)
if vars is not None:
vars = self.tail.match_vars(make_term(term.value[1:]), vars)
else:
raise ValueError
return vars
|
gpl-3.0
| -2,520,361,608,389,034,000
| 29.617647
| 88
| 0.51521
| false
| 3.151362
| false
| false
| false
|
SauloAislan/ironic
|
ironic/conf/opts.py
|
1
|
3708
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from oslo_log import log
import ironic.conf
_default_opt_lists = [
ironic.conf.default.api_opts,
ironic.conf.default.driver_opts,
ironic.conf.default.exc_log_opts,
ironic.conf.default.hash_opts,
ironic.conf.default.image_opts,
ironic.conf.default.img_cache_opts,
ironic.conf.default.netconf_opts,
ironic.conf.default.notification_opts,
ironic.conf.default.path_opts,
ironic.conf.default.portgroup_opts,
ironic.conf.default.service_opts,
ironic.conf.default.utils_opts,
]
_opts = [
('DEFAULT', itertools.chain(*_default_opt_lists)),
('agent', ironic.conf.agent.opts),
('api', ironic.conf.api.opts),
('audit', ironic.conf.audit.opts),
('cimc', ironic.conf.cisco.cimc_opts),
('cinder', ironic.conf.cinder.list_opts()),
('cisco_ucs', ironic.conf.cisco.ucsm_opts),
('conductor', ironic.conf.conductor.opts),
('console', ironic.conf.console.opts),
('database', ironic.conf.database.opts),
('deploy', ironic.conf.deploy.opts),
('dhcp', ironic.conf.dhcp.opts),
('drac', ironic.conf.drac.opts),
('glance', ironic.conf.glance.list_opts()),
('ilo', ironic.conf.ilo.opts),
('inspector', ironic.conf.inspector.list_opts()),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('iscsi', ironic.conf.iscsi.opts),
('keystone', ironic.conf.keystone.opts),
('metrics', ironic.conf.metrics.opts),
('metrics_statsd', ironic.conf.metrics_statsd.opts),
('neutron', ironic.conf.neutron.list_opts()),
('oneview', ironic.conf.oneview.opts),
('pxe', ironic.conf.pxe.opts),
('service_catalog', ironic.conf.service_catalog.list_opts()),
('snmp', ironic.conf.snmp.opts),
('ssh', ironic.conf.ssh.opts),
('swift', ironic.conf.swift.list_opts()),
]
def list_opts():
"""Return a list of oslo.config options available in Ironic code.
The returned list includes all oslo.config options. Each element of
the list is a tuple. The first element is the name of the group, the
second element is the options.
The function is discoverable via the 'ironic' entry point under the
'oslo.config.opts' namespace.
The function is used by Oslo sample config file generator to discover the
options.
:returns: a list of (group, options) tuples
"""
return _opts
def update_opt_defaults():
log.set_defaults(
default_log_levels=[
'amqp=WARNING',
'amqplib=WARNING',
'qpid.messaging=INFO',
# TODO(therve): when bug #1685148 is fixed in oslo.messaging, we
# should be able to remove one of those 2 lines.
'oslo_messaging=INFO',
'oslo.messaging=INFO',
'sqlalchemy=WARNING',
'stevedore=INFO',
'eventlet.wsgi.server=INFO',
'iso8601=WARNING',
'paramiko=WARNING',
'requests=WARNING',
'neutronclient=WARNING',
'glanceclient=WARNING',
'urllib3.connectionpool=WARNING',
'keystonemiddleware.auth_token=INFO',
'keystoneauth.session=INFO',
]
)
|
apache-2.0
| 3,209,014,342,729,036,300
| 33.654206
| 77
| 0.653722
| false
| 3.660415
| false
| false
| false
|
WladimirSidorenko/SentiLex
|
scripts/visualize_graph.py
|
1
|
9464
|
#!/usr/bin/env python2.7
##################################################################
# Imports
from __future__ import print_function, unicode_literals
from germanet import Germanet
from wordnet import Wordnet
from collections import Counter, defaultdict
from itertools import chain
from matplotlib import collections as mc
import argparse
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import sys
##################################################################
# Constants
WORDNET = "wordnet"
GERMANET = "germanet"
REL2COLOR = {
# GermaNet
"causes": "#00F5FF",
"entails": "#00F5FF",
"has_antonym": "#8B1A1A",
"has_component_meronym": "#00008B",
"has_member_meronym": "#00008B",
"has_portion_meronym": "#00008B",
"has_substance_meronym": "#00008B",
"has_participle": "#FFA54f",
"has_pertainym": "#FFFF00",
"has_hypernym": "#8b4789",
"has_hyponym": "#8b4789",
"is_related_to": "#006400",
# WordNet
"Hyponym": "#8b4789",
"Instance Hyponym": "#8b4789",
"Antonym": "#8B1A1A",
"Member holonym": "#00008B",
"Part holonym": "#00008B",
"Substance holonym": "#00008B",
"Verb Group": "#00CD00",
"Member meronym": "#00008B",
"Part meronym": "#00008B",
"Substance meronym": "#00008B",
"Similar to": "#FF7256",
"Entailment": "#00F5FF",
"Derivationally related form": "#006400",
"Member of this domain - TOPIC": "#EE82EE",
"Member of this domain - REGION": "#EE82EE",
"Member of this domain - USAGE": "#EE82EE",
"Domain of synset - TOPIC": "#EE82EE",
"Domain of synset - REGION": "#EE82EE",
"Domain of synset - USAGE": "#EE82EE",
"Participle of verb": "#FFA54F",
"Attribute": "#FFA500",
"Cause": "#00F5FF",
"Hypernym": "#8b4789",
"Instance Hypernym": "#8b4789",
"Derived from adjective": "#FFFF00",
"Also see": "#006400"
}
REL2LABEL = {
# GermaNet
"has_antonym": "antonym",
"has_component_meronym": "meronym",
"has_member_meronym": "meronym",
"has_portion_meronym": "meronym",
"has_substance_meronym": "meronym",
"has_participle": "participle",
"has_pertainym": "pertainym",
"has_hypernym": "hypernym",
"has_hyponym": "hyponym",
"is_related_to": "related_to",
# WordNet
"Hyponym": "hyponym",
"Instance Hyponym": "hyponym",
"Antonym": "antonym",
"Member holonym": "holonym",
"Part holonym": "holonym",
"Substance holonym": "holonym",
"Verb Group": "verb group",
"Member meronym": "meronym",
"Part meronym": "meronym",
"Substance meronym": "meronym",
"Similar to": "similar to",
"Entailment": "entailment",
"Derivationally related form": "related_to",
"Member of this domain - TOPIC": "domain member",
"Member of this domain - REGION": "domain member",
"Member of this domain - USAGE": "domain member",
"Domain of synset - TOPIC": "domain",
"Domain of synset - REGION": "domain",
"Domain of synset - USAGE": "domain",
"Participle of verb": "participle",
"Attribute": "attribute",
"Cause": "cause",
"Hypernym": "hypernym",
"Instance Hypernym": "hypernym",
"Derived from adjective": "derived_from",
"Also see": "also see"
}
AX = plt.axes()
POS2COLOR = {"verben": "#00EE76", "v": "#00EE76",
"nomen": "#36648B", "n": "#36648B",
"adj": "#FFA54F", "a": "#FFA54F",
"r": "#97FFFF", "s": "#FF4500"}
POS2LABEL = {"nomen": "noun", "n": "noun",
"verben": "verb", "v": "verb",
"adj": "adjective", "a": "adjective",
"r": "adverb", "s": "adjective satellite"}
_POS2X = {"adj": 0, "a": 0,
"nomen": 1, "n": 1,
"verben": 2, "v": 2,
"r": 0, "s": 1}
_POS2Y = {"adj": 0, "a": 0,
"nomen": 1, "n": 1.5,
"verben": 0, "v": 0,
"r": 2.5, "s": 0.35}
DE_REL_RELS = ["has_hyponym", "has_antonym",
"has_pertainym", "is_related_to",
"has_participle"]
EN_REL_RELS = ["Hyponym", "Instance Hyponym", "Antonym",
"Derived from adjective", "Derivationally related form",
"Participle of verb"]
##################################################################
# Methods
def main(a_argv):
"""Main method for visualizing WordNet databases.
@param a_argv - command-line arguments
@return \c 0 on success, non-\c 0 otherwise
"""
argparser = argparse.ArgumentParser(
description="Script for visualizing WordNet-like databases.")
argparser.add_argument("wntype",
help="type of lexical database to visualize",
choices=(WORDNET, GERMANET))
argparser.add_argument("path", help="path to the lexical database")
args = argparser.parse_args(a_argv)
# nodes' X position, Y position, and color
_X, _Y = [], []
POS2X = defaultdict(list)
POS2Y = defaultdict(list)
# pos color mapping
POS2CNT = Counter()
# mapping from pos to X range
POS2XRANGE = {}
# mapping from pos to Y range
POS2YRANGE = {}
# mapping from synset id to node's index
SYNID2NODEID = {}
SIGMA = 10
# line collection to be initialized later
lc = None
# populate nodes
if args.wntype == GERMANET:
print("Reading GermaNet synsets... ", end="", file=sys.stderr)
inet = Germanet(args.path)
print("done", file=sys.stderr)
rel_rels = DE_REL_RELS
else:
print("Reading WordNet synsets... ", end="", file=sys.stderr)
inet = Wordnet(args.path)
print("done", file=sys.stderr)
rel_rels = EN_REL_RELS
# obtain available parts of speech
POS2CNT.update(inet.synid2pos.itervalues())
poses = set(inet.synid2pos.itervalues())
nposes = float(len(poses))
rpart = 500000. / min(3, nposes)
# populate colors and ranges for parts of speech
x = y = 0
for ipos in poses:
x = _POS2X[ipos]
y = _POS2Y[ipos]
POS2XRANGE[ipos] = x * rpart
POS2YRANGE[ipos] = y * rpart
# add nodes to the graph
x = y = 0.
invsigma = 2.
if args.wntype == WORDNET:
assert ("00704270", "s") in inet.synid2pos, \
"('00704270', 's') is missing"
for i, (isynid, ipos) in enumerate(inet.synid2pos.iteritems()):
# print("isynid =", repr(isynid), file=sys.stderr)
# sys.exit(66)
SYNID2NODEID[isynid] = i
x = np.random.normal(POS2XRANGE[ipos],
POS2CNT[ipos] / invsigma)
y = np.random.normal(POS2YRANGE[ipos],
POS2CNT[ipos] / invsigma)
_X.append(x)
POS2X[ipos].append(x)
_Y.append(y)
POS2Y[ipos].append(y)
# add edges to the graph
lines = []
lcolors = []
lex_rels = None
from_idx = to_idx = x_from = x_to = y_from = y_to = 0
if args.wntype == GERMANET:
iterrels = inet.con_relations.iteritems()
else:
iterrels = inet.relations.iteritems()
for ifrom, irels in iterrels:
# print("ifrom =", repr(ifrom), file=sys.stderr)
# sys.exit(66)
from_idx = SYNID2NODEID[ifrom]
if args.wntype == GERMANET:
lex_rels = [(to_synid, to_rel)
for from_lex in inet.synid2lexids[ifrom]
for to_lex, to_rel in inet.lex_relations[from_lex]
for to_synid in inet.lexid2synids[to_lex]]
else:
lex_rels = []
x_from, y_from = _X[from_idx], _Y[from_idx]
for (ito, irel) in chain(irels, lex_rels):
# print("irel: irel = {:s} {:d}".format(repr(irel),
# irel in rel_rels),
# file=sys.stderr)
if not irel in rel_rels:
continue
# print("rel: ifrom = {:s}, irels = {:s}".format(repr(ifrom),
# repr(irels)),
# file=sys.stderr)
if ito not in SYNID2NODEID and ito[-1] == 'a':
to_idx = SYNID2NODEID[(ito[0], 's')]
else:
to_idx = SYNID2NODEID[ito]
x_to, y_to = _X[to_idx], _Y[to_idx]
lines.append(((x_from, y_from), (x_to, y_to)))
lcolors.append(REL2COLOR.get(irel, "#FFFFFF"))
# draw edges
lc = mc.LineCollection(lines, colors=lcolors,
alpha=0.15, linestyle='-'
)
# draw the graph
AX.add_collection(lc)
for ipos, x in POS2X.iteritems():
plt.scatter(x, POS2Y[ipos], label=POS2LABEL.get(ipos, ipos),
c=[POS2COLOR[ipos]] * len(x))
# add legend for edges
handles, labels = AX.get_legend_handles_labels()
iline = ilabel = None
known_labels = set()
for irel in rel_rels:
iline = mlines.Line2D([], [], color=REL2COLOR[irel], linewidth=3.)
ilabel = REL2LABEL[irel]
if ilabel in known_labels:
continue
handles.append(iline)
labels.append(ilabel)
known_labels.add(ilabel)
plt.legend(handles, labels,
loc="upper right", scatterpoints=1)
plt.axis("off")
plt.savefig(args.wntype + ".png", dpi=200)
plt.show() # display
##################################################################
# Main
if __name__ == "__main__":
main(sys.argv[1:])
|
mit
| -6,633,096,336,561,518,000
| 33.414545
| 74
| 0.539941
| false
| 3.169457
| false
| false
| false
|
keighrim/bananaNER
|
scripts/entity_extr.py
|
1
|
2984
|
# /usr/bin/python
# -*- coding: utf-8 -*-
"""
This program is to
extract named entities from an annotated data file
CS137B, programming assignment #1, Spring 2015
"""
import sys
reload(sys)
sys.setdefaultencoding('utf8')
__author__ = 'krim'
__date__ = '2/6/2015'
__email__ = 'krim@brandeis.edu'
def read(input_filename):
"""load sentences from data file"""
sentences = []
sentence = []
with open(input_filename) as in_file:
for line in in_file:
if re.search(r"^\s+$", line):
if not prev_empty:
sentences.append(sentence)
sentence = []
prev_empty = True
else:
try:
sentence.append((line.split("\t")[1].strip(),
line.split("\t")[2].strip(),
line.split("\t")[3].strip()))
except IndexError:
sentence.append((line.split("\t")[1].strip(),
line.split("\t")[2].strip(), ""))
prev_empty = False
return sentences
def find_entities(sents):
# we'll use 4 dictionaries; ORG, GEO, PERSON, OTHER
org = []
geo = []
other = []
person = []
entity = ""
for sent in sents:
for w, _, b in sent:
try:
bio = b.split("-")[0]
typ = b.split("-")[1]
except IndexError:
bio = "O"
typ = ""
# for person names, do not concatenate
if typ == "PER":
if len(entity) > 0:
cur.append(entity)
entity = ""
person.append(w)
# else, keep track of "I" tagged words and concatenate
else:
if bio == "B":
if len(entity) > 0:
cur.append(entity)
entity = w
if typ == "ORG":
cur = org
elif typ == "LOC" or typ == "GPE":
cur = geo
else:
cur = other
elif bio == "I":
entity += " " + w
else:
if len(entity) > 0:
cur.append(entity)
entity = ""
# write out lists to coresponding files
with open("org.extr", "w") as orgf, \
open("other.extr", "w") as otherf, \
open("person.extr", "w") as personf, \
open("geo.extr", "w") as geof:
for o in org:
orgf.write(o + "\n")
for ot in other:
otherf.write(ot + "\n")
for p in person:
personf.write(p + "\n")
for g in geo:
geof.write(g + "\n")
if __name__ == '__main__':
# tempted to use all.gold...
find_entities(read("../dataset/train.gold"))
|
gpl-3.0
| -1,419,966,461,462,060,800
| 29.141414
| 70
| 0.413539
| false
| 4.196906
| false
| false
| false
|
kedder/soaring-coupons
|
coupons/models.py
|
1
|
8712
|
from typing import Sequence
import logging
import pytz
import random
import string
import itertools
from datetime import date, datetime
from decimal import Decimal
from django.db import models
log = logging.getLogger(__name__)
SEASON_START_MONTH = 4
SEASON_END_MONTH = 10
class CouponType(models.Model):
id = models.CharField(max_length=32, primary_key=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
title = models.CharField(max_length=255)
welcome_text = models.TextField(null=True)
validity_cond_text = models.CharField(max_length=255, null=True)
deafult_expiration_date = models.DateField()
in_stock = models.BooleanField(default=True)
# Template to use when printing the coupon. Will use django template in
# `templates/coupons/{}.html`
print_template = models.CharField(
max_length=32,
choices=[("flight", "Flight Coupon"), ("courses", "Courses Coupon")],
)
def __str__(self) -> str:
return self.title
class Order(models.Model):
ST_PENDING = 1
ST_PAID = 2
ST_CANCELLED = 3
ST_SPAWNED = 4
coupon_type = models.ForeignKey(CouponType, on_delete=models.CASCADE)
quantity = models.IntegerField()
price = models.DecimalField(max_digits=10, decimal_places=2)
discount = models.DecimalField(max_digits=10, decimal_places=2, default=0)
currency = models.CharField(max_length=8)
paid_amount = models.DecimalField(max_digits=10, decimal_places=2, null=True)
paid_currency = models.CharField(max_length=8, null=True)
payer_name = models.CharField(max_length=255, null=True)
payer_surname = models.CharField(max_length=255, null=True)
payer_email = models.CharField(max_length=255, null=True)
payment_provider = models.CharField(max_length=255, null=True)
test = models.BooleanField(default=False)
status = models.IntegerField(
choices=[
(ST_PENDING, "Pending"),
(ST_PAID, "Paid"),
(ST_CANCELLED, "Cancelled"),
(ST_SPAWNED, "Spawned"),
],
default=ST_PENDING,
)
create_time = models.DateTimeField()
payment_time = models.DateTimeField(null=True)
notes = models.CharField(max_length=255, null=True)
@classmethod
def from_type(cls, coupon_type: CouponType, quantity: int = 1) -> "Order":
return Order(
coupon_type=coupon_type,
quantity=quantity,
price=coupon_type.price,
currency="EUR",
create_time=datetime.now(pytz.utc),
)
def apply_discount(self, discount: int) -> None:
new_price = self.price * (1 - discount / Decimal("100"))
new_price = round(new_price, 2)
self.discount = self.price - new_price
self.price = new_price
log.info(
f"Applied {discount}% discount ({self.discount} {self.currency}) "
f"to order {self.id}"
)
def process(
self,
*,
paid_amount: float,
paid_currency: str,
payer_email: str = None,
payer_name: str = None,
payer_surname: str = None,
payment_provider: str = None,
) -> Sequence["Coupon"]:
"""Process order payment.
Updates order with supplied information and updates status to ST_PAID.
Creates Coupon object. Payment information must be validated before
passing to this method.
"""
if self.status != Order.ST_PENDING:
raise ValueError(f"Cannot process non-pending order {self.id}")
self.paid_amount = paid_amount
self.paid_currency = paid_currency
self.payer_email = payer_email
self.payer_name = payer_name
self.payer_surname = payer_surname
self.status = Order.ST_PAID
self.payment_time = datetime.now(pytz.utc)
self.payment_provider = payment_provider
self.save()
log.info("Order %s processed" % self.id)
# create coupon
assert self.quantity == 1
return Coupon.from_order(self)
def find_coupons(self) -> Sequence["Coupon"]:
return list(Coupon.objects.filter(order=self))
@property
def paid(self) -> bool:
return self.status == Order.ST_PAID
class Coupon(models.Model):
ST_ACTIVE = 1
ST_USED = 2
id = models.CharField(max_length=12, primary_key=True)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
year = models.IntegerField()
status = models.IntegerField(
choices=[(ST_ACTIVE, "Active"), (ST_USED, "Used")], default=ST_ACTIVE
)
use_time = models.DateTimeField(null=True, blank=True)
expires = models.DateField(null=True, blank=True)
@staticmethod
def from_order(order: Order, expires: date = None) -> Sequence["Coupon"]:
"""Create couponse for given order"""
ctype = order.coupon_type
payment_year = (
order.payment_time.year if order.payment_time else order.create_time.year
)
if expires is None:
# Come up with sensible expiration date from copon type settings
expires = ctype.deafult_expiration_date.replace(year=payment_year)
# If ticket is sold after this year's expiration date, move it to
# the next year
if date.today() > expires:
expires = expires.replace(year=payment_year + 1)
coupons = []
for x in range(order.quantity):
coupon = Coupon(
id=Coupon.gen_unique_id(),
order=order,
year=payment_year,
expires=expires,
)
coupon.save()
coupons.append(coupon)
log.info(f"Coupon {coupon.id} created")
return coupons
@staticmethod
def spawn(
coupon_type: CouponType,
*,
count: int,
email: str,
expires: date,
notes: str = None,
) -> Sequence["Coupon"]:
log.info("Spawning %s coupons", count)
order = Order.from_type(coupon_type, quantity=count)
order.status = Order.ST_SPAWNED
order.notes = notes
order.payer_email = email
order.payment_time = datetime.now(pytz.utc)
order.save()
return Coupon.from_order(order)
@staticmethod
def gen_unique_id() -> str:
# add some random digits to make order ids less predictable
seed = "".join(random.choice(string.digits) for i in range(10))
year = date.today().strftime("%y")
uniqueid = f"{year}{seed}"
# make sure it is really unique
for attempt in range(10):
try:
Coupon.objects.get(id=uniqueid)
log.warning(f"Generated coupon id '{uniqueid}' is not unique")
except Coupon.DoesNotExist:
return uniqueid
raise RuntimeError("Cannot generate unique coupon id")
@staticmethod
def get_valid_expirations(today, count):
def seq(start):
curmonth = today.month + 1
curyear = start.year
earliest_month = SEASON_START_MONTH + 3
while True:
if curmonth > SEASON_END_MONTH:
curyear += 1
curmonth = 1
if curmonth <= earliest_month:
curmonth = earliest_month
yield date(curyear, curmonth, 1)
curmonth += 1
return list(itertools.islice(seq(today), 0, count))
@property
def active(self):
expired = self.expires and date.today() > self.expires
active = self.status == Coupon.ST_ACTIVE
return active and not expired
@property
def coupon_type(self) -> CouponType:
return self.order.coupon_type
def use(self) -> None:
if not self.active:
raise ValueError(f"Cannot use non-active coupon {self.id}")
self.status = Coupon.ST_USED
self.use_time = datetime.now(pytz.utc)
self.save()
log.info(f"Coupon {self.id} used")
class ScheduledDiscount(models.Model):
date_from = models.DateTimeField()
date_to = models.DateTimeField()
discount = models.IntegerField()
comment = models.TextField(null=True)
@staticmethod
def find_discount_on(now: datetime) -> int:
"""Return discount in percent (0-100) for given time
Or 0 if no discount."""
relevant = ScheduledDiscount.objects.filter(date_from__lte=now, date_to__gt=now)
# Latest discount takes precedence
relevant = relevant.order_by("-date_from")
for sd in relevant:
return sd.discount
# No discounts found
return 0
|
agpl-3.0
| -2,247,421,808,241,636,900
| 31.75188
| 88
| 0.607094
| false
| 3.903226
| false
| false
| false
|
teoliphant/scipy
|
scipy/sparse/tests/test_base.py
|
2
|
65539
|
#
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices
"""
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_sparse.py
"""
import sys
import warnings
import numpy as np
from numpy import arange, zeros, array, dot, matrix, asmatrix, asarray, \
vstack, ndarray, transpose, diag, kron, inf, conjugate, \
int8, ComplexWarning
import random
from numpy.testing import assert_raises, assert_equal, assert_array_equal, \
assert_array_almost_equal, assert_almost_equal, assert_, \
dec, TestCase, run_module_suite
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, \
coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \
eye, isspmatrix, SparseEfficiencyWarning
from scipy.sparse.sputils import supported_dtypes
from scipy.sparse.linalg import splu, expm, inv
warnings.simplefilter('ignore', SparseEfficiencyWarning)
warnings.simplefilter('ignore', ComplexWarning)
#TODO check that spmatrix( ... , copy=X ) is respected
#TODO test prune
#TODO test has_sorted_indices
class _TestCommon:
"""test common functionality shared by all sparse formats"""
def setUp(self):
self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
self.datsp = self.spmatrix(self.dat)
def test_empty(self):
"""create empty matrices"""
assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3)))
assert_equal(self.spmatrix((3,3)).nnz, 0)
def test_invalid_shapes(self):
assert_raises(ValueError, self.spmatrix, (-1,3) )
assert_raises(ValueError, self.spmatrix, (3,-1) )
assert_raises(ValueError, self.spmatrix, (-1,-1) )
def test_repr(self):
repr(self.datsp)
def test_str(self):
str(self.datsp)
def test_empty_arithmetic(self):
"""Test manipulating empty matrices. Fails in SciPy SVN <= r1768
"""
shape = (5, 5)
for mytype in [np.dtype('int32'), np.dtype('float32'),
np.dtype('float64'), np.dtype('complex64'),
np.dtype('complex128')]:
a = self.spmatrix(shape, dtype=mytype)
b = a + a
c = 2 * a
d = a * a.tocsc()
e = a * a.tocsr()
f = a * a.tocoo()
for m in [a,b,c,d,e,f]:
assert_equal(m.A, a.A*a.A)
# These fail in all revisions <= r1768:
assert_equal(m.dtype,mytype)
assert_equal(m.A.dtype,mytype)
def test_abs(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(abs(A),abs(self.spmatrix(A)).todense())
def test_neg(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(-A,(-self.spmatrix(A)).todense())
def test_real(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.real.todense(),D.real)
def test_imag(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.imag.todense(),D.imag)
def test_diagonal(self):
"""Does the matrix's .diagonal() method work?
"""
mats = []
mats.append( [[1,0,2]] )
mats.append( [[1],[0],[2]] )
mats.append( [[0,1],[0,2],[0,3]] )
mats.append( [[0,0,1],[0,0,2],[0,3,0]] )
mats.append( kron(mats[0],[[1,2]]) )
mats.append( kron(mats[0],[[1],[2]]) )
mats.append( kron(mats[1],[[1,2],[3,4]]) )
mats.append( kron(mats[2],[[1,2],[3,4]]) )
mats.append( kron(mats[3],[[1,2],[3,4]]) )
mats.append( kron(mats[3],[[1,2,3,4]]) )
for m in mats:
assert_equal(self.spmatrix(m).diagonal(),diag(m))
def test_nonzero(self):
A = array([[1, 0, 1],[0, 1, 1],[ 0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set( [tuple(ij) for ij in transpose(A.nonzero())] )
Asp_nz = set( [tuple(ij) for ij in transpose(Asp.nonzero())] )
assert_equal(A_nz, Asp_nz)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:])
assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1])
assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1])
def test_sum(self):
"""Does the matrix's .sum(axis=...) method work?
"""
assert_array_equal(self.dat.sum(), self.datsp.sum())
assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None))
assert_array_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0))
assert_array_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1))
def test_mean(self):
"""Does the matrix's .mean(axis=...) method work?
"""
assert_array_equal(self.dat.mean(), self.datsp.mean())
assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None))
assert_array_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0))
assert_array_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1))
def test_expm(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
Mexp = scipy.linalg.expm(M)
sMexp = expm(sM).todense()
assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
N = array([[ 3., 0., 1.], [ 0., 2., 0.], [ 0., 0., 0.]])
sN = self.spmatrix(N, shape=(3,3), dtype=float)
Nexp = scipy.linalg.expm(N)
sNexp = expm(sN).todense()
assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
def test_inv(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
sMinv = inv(sM)
assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3))
def test_from_array(self):
A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
A = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_matrix(self):
A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).todense(), A)
A = matrix([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_list(self):
A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
assert_array_equal(self.spmatrix(A).todense(), A)
A = [[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]]
assert_array_equal(self.spmatrix(A).toarray(), array(A))
assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16'))
def test_from_sparse(self):
D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
D = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
#def test_array(self):
# """test array(A) where A is in sparse format"""
# assert_equal( array(self.datsp), self.dat )
def test_todense(self):
# Check C-contiguous (default).
chk = self.datsp.todense()
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.todense(order='C')
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.todense(order='F')
assert_array_equal(chk, self.dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with out argument (array).
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk.base is out)
# Check with out array (matrix).
out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk is out)
a = matrix([1.,2.,3.])
dense_dot_dense = a * self.dat
check = a * self.datsp.todense()
assert_array_equal(dense_dot_dense, check)
b = matrix([1.,2.,3.,4.]).T
dense_dot_dense = self.dat * b
check2 = self.datsp.todense() * b
assert_array_equal(dense_dot_dense, check2)
def test_toarray(self):
# Check C-contiguous (default).
dat = asarray(self.dat)
chk = self.datsp.toarray()
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.toarray(order='C')
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.toarray(order='F')
assert_array_equal(chk, dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with output arg.
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
a = array([1.,2.,3.])
dense_dot_dense = dot(a, dat)
check = dot(a, self.datsp.toarray())
assert_array_equal(dense_dot_dense, check)
b = array([1.,2.,3.,4.])
dense_dot_dense = dot(dat, b)
check2 = dot(self.datsp.toarray(), b)
assert_array_equal(dense_dot_dense, check2)
def test_astype(self):
D = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
S = self.spmatrix(D)
for x in supported_dtypes:
assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type
assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values
assert_equal(S.astype(x).format, S.format) # format preserved
def test_asfptype(self):
A = self.spmatrix( arange(6,dtype='int32').reshape(2,3) )
assert_equal( A.dtype , np.dtype('int32') )
assert_equal( A.asfptype().dtype, np.dtype('float64') )
assert_equal( A.asfptype().format, A.format )
assert_equal( A.astype('int16').asfptype().dtype , np.dtype('float32') )
assert_equal( A.astype('complex128').asfptype().dtype , np.dtype('complex128') )
B = A.asfptype()
C = B.asfptype()
assert_( B is C )
def test_mul_scalar(self):
assert_array_equal(self.dat*2,(self.datsp*2).todense())
assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense())
def test_rmul_scalar(self):
assert_array_equal(2*self.dat,(2*self.datsp).todense())
assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense())
def test_add(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = b + a
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
def test_radd(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = a + b
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
def test_sub(self):
assert_array_equal((self.datsp - self.datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.datsp - A).todense(),self.dat - A.todense())
assert_array_equal((A - self.datsp).todense(),A.todense() - self.dat)
def test_rsub(self):
assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.dat - A),self.dat - A.todense())
assert_array_equal((A - self.dat),A.todense() - self.dat)
assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat)
assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
def test_add0(self):
""" Adding 0 to a sparse matrix """
assert_array_equal((self.datsp + 0).todense(), self.dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * self.datsp for k in range(1, 3)])
sumD = sum([k * self.dat for k in range(1, 3)])
assert_almost_equal(sumS.todense(), sumD)
def test_elementwise_multiply(self):
# real/real
A = array([[4,0,9],[2,-3,5]])
B = array([[0,7,0],[0,-4,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) #sparse/sparse
assert_almost_equal( Asp.multiply(B), A*B) #sparse/dense
# complex/complex
C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Csp = self.spmatrix(C)
Dsp = self.spmatrix(D)
assert_almost_equal( Csp.multiply(Dsp).todense(), C*D) #sparse/sparse
assert_almost_equal( Csp.multiply(D), C*D) #sparse/dense
# real/complex
assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse
assert_almost_equal( Asp.multiply(D), A*D) #sparse/dense
def test_elementwise_divide(self):
expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]]
assert_array_equal((self.datsp / self.datsp).todense(),expected)
denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
res = matrix([[1,0,0,0.5],[-3,0,inf,0],[0,0.25,0,0]],'d')
assert_array_equal((self.datsp / denom).todense(),res)
# complex
A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal( (Asp / Bsp).todense(), A/B)
def test_pow(self):
A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
B = self.spmatrix( A )
for exponent in [0,1,2,3]:
assert_array_equal((B**exponent).todense(),A**exponent)
#invalid exponents
for exponent in [-1, 2.2, 1 + 3j]:
self.assertRaises( Exception, B.__pow__, exponent )
#nonsquare matrix
B = self.spmatrix(A[:3,:])
self.assertRaises( Exception, B.__pow__, 1 )
def test_rmatvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
row = matrix([[1,2,3,4]])
assert_array_almost_equal(row*M, row*M.todense())
def test_small_multiplication(self):
"""test that A*x works for x with shape () (1,) and (1,1)
"""
A = self.spmatrix([[1],[2],[3]])
assert_(isspmatrix(A * array(1)))
assert_equal((A * array(1)).todense(), [[1],[2],[3]])
assert_equal(A * array([1]), array([1,2,3]))
assert_equal(A * array([[1]]), array([[1],[2],[3]]))
def test_matvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
col = matrix([1,2,3]).T
assert_array_almost_equal(M * col, M.todense() * col)
#check result dimensions (ticket #514)
assert_equal((M * array([1,2,3])).shape,(4,))
assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
#check result type
assert_(isinstance( M * array([1,2,3]), ndarray))
assert_(isinstance( M * matrix([1,2,3]).T, matrix))
#ensure exception is raised for improper dimensions
bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
matrix([1,2,3]), matrix([[1],[2]])]
for x in bad_vecs:
assert_raises(ValueError, M.__mul__, x)
# Should this be supported or not?!
#flat = array([1,2,3])
#assert_array_almost_equal(M*flat, M.todense()*flat)
# Currently numpy dense matrices promote the result to a 1x3 matrix,
# whereas sparse matrices leave the result as a rank-1 array. Which
# is preferable?
# Note: the following command does not work. Both NumPy matrices
# and spmatrices should raise exceptions!
# assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3])
# The current relationship between sparse matrix products and array
# products is as follows:
assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
# Note that the result of M * x is dense if x has a singleton dimension.
# Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
# is rank-2. Is this desirable?
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal( asp*b, a*b)
assert_array_almost_equal( a*bsp, a*b)
assert_array_almost_equal( a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix( [[1,2],[3,4]] )
B = self.spmatrix( [[1,2],[3,4],[5,6]] )
assert_raises(ValueError, A.__mul__, B)
def test_matmat_dense(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
asp = self.spmatrix(a)
# check both array and matrix types
bs = [ array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]]) ]
for b in bs:
result = asp*b
assert_( isinstance(result, type(b)) )
assert_equal( result.shape, (4,2) )
assert_equal( result, dot(a,b) )
def test_sparse_format_conversions(self):
A = sparse.kron( [[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]] )
D = A.todense()
A = self.spmatrix(A)
for format in ['bsr','coo','csc','csr','dia','dok','lil']:
a = A.asformat(format)
assert_equal(a.format,format)
assert_array_equal(a.todense(), D)
b = self.spmatrix(D+3j).asformat(format)
assert_equal(b.format,format)
assert_array_equal(b.todense(), D+3j)
c = eval(format + '_matrix')(A)
assert_equal(c.format,format)
assert_array_equal(c.todense(), D)
def test_tobsr(self):
x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
y = array([[0,1,2],[3,0,5]])
A = kron(x,y)
Asp = self.spmatrix(A)
for format in ['bsr']:
fn = getattr(Asp, 'to' + format )
for X in [ 1, 2, 3, 6 ]:
for Y in [ 1, 2, 3, 4, 6, 12]:
assert_equal( fn(blocksize=(X,Y)).todense(), A)
def test_transpose(self):
a = self.datsp.transpose()
b = self.dat.transpose()
assert_array_equal(a.todense(), b)
assert_array_equal(a.transpose().todense(), self.dat)
assert_array_equal( self.spmatrix((3,4)).T.todense(), zeros((4,3)) )
def test_add_dense(self):
""" adding a dense matrix to a sparse matrix
"""
sum1 = self.dat + self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = self.datsp + self.dat
assert_array_equal(sum2, 2*self.dat)
def test_sub_dense(self):
""" subtracting a dense matrix to/from a sparse matrix
"""
sum1 = 3*self.dat - self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = 3*self.datsp - self.dat
assert_array_equal(sum2, 2*self.dat)
def test_copy(self):
""" Check whether the copy=True and copy=False keywords work
"""
A = self.datsp
#check that copy preserves format
assert_equal(A.copy().format, A.format)
assert_equal(A.__class__(A,copy=True).format, A.format)
assert_equal(A.__class__(A,copy=False).format, A.format)
assert_equal(A.copy().todense(), A.todense())
assert_equal(A.__class__(A,copy=True).todense(), A.todense())
assert_equal(A.__class__(A,copy=False).todense(), A.todense())
#check that XXX_matrix.toXXX() works
toself = getattr(A,'to' + A.format)
assert_equal(toself().format, A.format)
assert_equal(toself(copy=True).format, A.format)
assert_equal(toself(copy=False).format, A.format)
assert_equal(toself().todense(), A.todense())
assert_equal(toself(copy=True).todense(), A.todense())
assert_equal(toself(copy=False).todense(), A.todense())
# check whether the data is copied?
# TODO: deal with non-indexable types somehow
B = A.copy()
try:
B[0,0] += 1
assert_(B[0,0] != A[0,0])
except NotImplementedError:
# not all sparse matrices can be indexed
pass
except TypeError:
# not all sparse matrices can be indexed
pass
# Eventually we'd like to allow matrix products between dense
# and sparse matrices using the normal dot() function:
#def test_dense_dot_sparse(self):
# a = array([1.,2.,3.])
# dense_dot_dense = dot(a, self.dat)
# dense_dot_sparse = dot(a, self.datsp)
# assert_array_equal(dense_dot_dense, dense_dot_sparse)
#def test_sparse_dot_dense(self):
# b = array([1.,2.,3.,4.])
# dense_dot_dense = dot(self.dat, b)
# dense_dot_sparse = dot(self.datsp, b)
# assert_array_equal(dense_dot_dense, dense_dot_sparse)
class _TestInplaceArithmetic:
def test_imul_scalar(self):
a = self.datsp.copy()
a *= 2
assert_array_equal(self.dat*2,a.todense())
a = self.datsp.copy()
a *= 17.3
assert_array_equal(self.dat*17.3,a.todense())
def test_idiv_scalar(self):
a = self.datsp.copy()
a /= 2
assert_array_equal(self.dat/2,a.todense())
a = self.datsp.copy()
a /= 17.3
assert_array_equal(self.dat/17.3,a.todense())
class _TestGetSet:
def test_setelement(self):
A = self.spmatrix((3,4))
A[ 0, 0] = 0 # bug 870
A[ 1, 2] = 4.0
A[ 0, 1] = 3
A[ 2, 0] = 2.0
A[ 0,-1] = 8
A[-1,-2] = 7
A[ 0, 1] = 5
assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1,2,3], array([1,2,3])]:
assert_raises(ValueError, A.__setitem__, (0,0), v)
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0,0), v)
def test_getelement(self):
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]])
A = self.spmatrix(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1)]:
assert_raises(IndexError, A.__getitem__, ij)
class _TestSolve:
def test_solve(self):
""" Test whether the lu_solve command segfaults, as reported by Nils
Wagner for a 64-bit machine, 02 March 2005 (EJS)
"""
n = 20
np.random.seed(0) #make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
class _TestHorizSlicing:
"""Tests horizontal slicing (e.g. [0, :]). Tests for individual sparse
matrix types that implement this should derive from this class.
"""
def test_get_horiz_slice(self):
"""Test for new slice functionality (EJS)"""
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[1,:], A[1,:].todense())
assert_array_equal(B[1,2:5], A[1,2:5].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1, 1:3], D[1, 1:3].todense())
# Now test slicing when a row contains only zeros
E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1, 1:3], F[1, 1:3].todense())
assert_array_equal(E[2, -2:], F[2, -2:].A)
# The following should raise exceptions:
caught = 0
try:
a = A[:,11]
except IndexError:
caught += 1
try:
a = A[6,3:7]
except IndexError:
caught += 1
assert_(caught == 2)
class _TestVertSlicing:
"""Tests vertical slicing (e.g. [:, 0]). Tests for individual sparse
matrix types that implement this should derive from this class.
"""
def test_get_vert_slice(self):
"""Test for new slice functionality (EJS)"""
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[2:5,0], A[2:5,0].todense())
assert_array_equal(B[:,1], A[:,1].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1:3, 1], D[1:3, 1].todense())
assert_array_equal(C[:, 2], D[:, 2].todense())
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[:, 1], F[:, 1].todense())
assert_array_equal(E[-2:, 2], F[-2:, 2].todense())
# The following should raise exceptions:
caught = 0
try:
a = A[:,11]
except IndexError:
caught += 1
try:
a = A[6,3:7]
except IndexError:
caught += 1
assert_(caught == 2)
class _TestBothSlicing:
"""Tests vertical and horizontal slicing (e.g. [:,0:2]). Tests for
individual sparse matrix types that implement this should derive from this
class.
"""
def test_get_slices(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
assert_array_equal(A[1:,:-1].todense(), B[1:,:-1])
assert_array_equal(A[:-1,1:].todense(), B[:-1,1:])
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense())
assert_array_equal(E[:, 1:], F[:, 1:].todense())
class _TestFancyIndexing:
"""Tests fancy indexing features. The tests for any matrix formats
that implement these features should derive from this class.
"""
def test_fancy_indexing_set(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
A = self.spmatrix((n, m))
A[i, j] = 1
assert_almost_equal(A.sum(), nitems)
assert_almost_equal(A[i, j], 1)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
_test_set(i, j, 1)
# [i,1:2]
for i, j in [(2, slice(m)), (2, slice(5, -2)), (array(2), slice(5, -2))]:
_test_set(i, j, 3)
def test_fancy_indexing(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix( B )
# [i,j]
assert_equal(A[2,3], B[2,3])
assert_equal(A[-1,8], B[-1,8])
assert_equal(A[-1,-2],B[-1,-2])
assert_equal(A[array(-1),-2],B[-1,-2])
assert_equal(A[-1,array(-2)],B[-1,-2])
assert_equal(A[array(-1),array(-2)],B[-1,-2])
# [i,1:2]
assert_equal(A[2,:].todense(), B[2,:])
assert_equal(A[2,5:-2].todense(),B[2,5:-2])
assert_equal(A[array(2),5:-2].todense(),B[2,5:-2])
# [i,[1,2]]
assert_equal(A[3,[1,3]].todense(), B[3,[1,3]])
assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]])
# [1:2,j]
assert_equal(A[:,2].todense(), B[:,2])
assert_equal(A[3:4,9].todense(), B[3:4,9])
assert_equal(A[1:4,-5].todense(),B[1:4,-5])
assert_equal(A[2:-1,3].todense(),B[2:-1,3])
assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3])
# [1:2,1:2]
assert_equal(A[1:2,1:2].todense(),B[1:2,1:2])
assert_equal(A[4:,3:].todense(), B[4:,3:])
assert_equal(A[:4,:5].todense(), B[:4,:5])
assert_equal(A[2:-1,:5].todense(),B[2:-1,:5])
# [1:2,[1,2]]
assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]])
assert_equal(A[3:4,[9]].todense(), B[3:4,[9]])
assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]])
assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]])
# [[1,2],j]
assert_equal(A[[1,3],3].todense(), B[[1,3],3])
assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4])
assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4])
# [[1,2],1:2]
assert_equal(A[[1,3],:].todense(), B[[1,3],:])
assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1])
# [[1,2],[1,2]]
assert_equal(A[[1,3],[2,4]], B[[1,3],[2,4]])
assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]])
assert_equal(A[array([-1,-3]),[2,-4]],B[[-1,-3],[2,-4]])
assert_equal(A[[-1,-3],array([2,-4])],B[[-1,-3],[2,-4]])
assert_equal(A[array([-1,-3]),array([2,-4])],B[[-1,-3],[2,-4]])
# [[[1],[2]],[1,2]]
assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]])
assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
# [i]
assert_equal(A[1,:].todense(), B[1,:])
assert_equal(A[-2,:].todense(),B[-2,:])
assert_equal(A[array(-2),:].todense(),B[-2,:])
# [1:2]
assert_equal(A[1:4].todense(), B[1:4])
assert_equal(A[1:-2].todense(),B[1:-2])
# [[1,2]]
assert_equal(A[[1,3]].todense(), B[[1,3]])
assert_equal(A[[-1,-3]].todense(),B[[-1,-3]])
assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]])
# [[1,2],:][:,[1,2]]
assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]] )
assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]] )
assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]] )
# [:,[1,2]][[1,2],:]
assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:] )
assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:] )
assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:] )
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_fancy_indexing_randomized(self):
random.seed(0) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.random_integers(-M + 1, M - 1, size=NUM_SAMPLES)
J = np.random.random_integers(-N + 1, N - 1, size=NUM_SAMPLES)
S = self.spmatrix(D)
assert_equal(S[I,J], D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
class _TestArithmetic:
"""
Test real/complex arithmetic
"""
def arith_init(self):
#these can be represented exactly in FP (so arithmetic should be exact)
self.A = matrix([[ -1.5, 6.5, 0, 2.25, 0, 0],
[ 3.125, -7.875, 0.625, 0, 0, 0],
[ 0, 0, -0.125, 1.0, 0, 0],
[ 0, 0, 8.375, 0, 0, 0]],'float64')
self.B = matrix([[ 0.375, 0, 0, 0, -5, 2.5],
[ 14.25, -3.75, 0, 0, -0.125, 0],
[ 0, 7.25, 0, 0, 0, 0],
[ 18.5, -0.0625, 0, 0, 0, 0]],'complex128')
self.B.imag = matrix([[ 1.25, 0, 0, 0, 6, -3.875],
[ 2.25, 4.125, 0, 0, 0, 2.75],
[ 0, 4.125, 0, 0, 0, 0],
[ -0.0625, 0, 0, 0, 0, 0]],'float64')
#fractions are all x/16ths
assert_array_equal((self.A*16).astype('int32'),16*self.A)
assert_array_equal((self.B.real*16).astype('int32'),16*self.B.real)
assert_array_equal((self.B.imag*16).astype('int32'),16*self.B.imag)
self.Asp = self.spmatrix(self.A)
self.Bsp = self.spmatrix(self.B)
def test_add_sub(self):
self.arith_init()
#basic tests
assert_array_equal((self.Asp+self.Bsp).todense(),self.A+self.B)
#check conversions
for x in supported_dtypes:
A = self.A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
B = self.B.astype(y)
Bsp = self.spmatrix(B)
#addition
D1 = A + B
S1 = Asp + Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp + B,D1) #check sparse + dense
assert_array_equal(A + Bsp,D1) #check dense + sparse
#subtraction
D1 = A - B
S1 = Asp - Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp - B,D1) #check sparse - dense
assert_array_equal(A - Bsp,D1) #check dense - sparse
def test_mu(self):
self.arith_init()
#basic tests
assert_array_equal((self.Asp*self.Bsp.T).todense(),self.A*self.B.T)
for x in supported_dtypes:
A = self.A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
B = self.B.astype(y)
Bsp = self.spmatrix(B)
D1 = A * B.T
S1 = Asp * Bsp.T
assert_array_equal(S1.todense(),D1)
assert_equal(S1.dtype,D1.dtype)
class _Test2DSlicingRegression:
def test_non_unit_stride_2d_indexing_raises_exception(self):
# Regression test -- used to silently ignore the stride.
try:
self.spmatrix((500, 500))[0:100:2, 0:100:2]
except ValueError:
return
assert_(False) # Should not happen.
class TestCSR(_TestCommon, _TestGetSet, _TestSolve,
_TestInplaceArithmetic, _TestArithmetic,
_TestHorizSlicing, _TestVertSlicing, _TestBothSlicing,
_TestFancyIndexing, _Test2DSlicingRegression, TestCase):
spmatrix = csr_matrix
@dec.knownfailureif(True, "Fancy indexing is known to be broken for CSR" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
def test_constructor1(self):
b = matrix([[0,4,0],
[3,0,0],
[0,2,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[4,3,2])
assert_array_equal(bsp.indices,[1,0,1])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_equal(bsp.getnnz(),3)
assert_equal(bsp.getformat(),'csr')
assert_array_equal(bsp.todense(),b)
def test_constructor2(self):
b = zeros((6,6),'d')
b[3,4] = 5
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[4])
assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
assert_array_almost_equal(bsp.todense(),b)
def test_constructor3(self):
b = matrix([[1,0],
[0,2],
[3,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,3])
assert_array_equal(bsp.indices,[0,1,0])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_array_almost_equal(bsp.todense(),b)
### currently disabled
## def test_constructor4(self):
## """try using int64 indices"""
## data = arange( 6 ) + 1
## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' )
## ptr = array( [0, 2, 4, 6], dtype='int64' )
##
## a = csr_matrix( (data, col, ptr), shape = (3,3) )
##
## b = matrix([[0,1,2],
## [4,3,0],
## [5,0,6]],'d')
##
## assert_equal(a.indptr.dtype,numpy.dtype('int64'))
## assert_equal(a.indices.dtype,numpy.dtype('int64'))
## assert_array_equal(a.todense(),b)
def test_constructor4(self):
"""using (data, ij) format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csr = csr_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csr.todense())
def test_constructor5(self):
"""infer dimensions from arrays"""
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape,(3,6))
def test_sort_indices(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
bsp = asp.copy()
asp.sort_indices( )
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_eliminate_zeros(self):
data = array( [1, 0, 0, 0, 2, 0, 3, 0] )
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
bsp = asp.copy()
asp.eliminate_zeros( )
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
data = arange( 6 )
indices = array( [8, 1, 5, 7, 2, 4] )
indptr = array( [0, 2, 6] )
bsp = csr_matrix( (data, indices, indptr), shape=(2,10) )
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
class TestCSC(_TestCommon, _TestGetSet, _TestSolve,
_TestInplaceArithmetic, _TestArithmetic,
_TestHorizSlicing, _TestVertSlicing, _TestBothSlicing,
_TestFancyIndexing, _Test2DSlicingRegression, TestCase):
spmatrix = csc_matrix
@dec.knownfailureif(True, "Fancy indexing is known to be broken for CSC" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
def test_constructor1(self):
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.getnnz(),4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.getformat(),'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = matrix([[1,0],[0,0],[0,2]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
"""using (data, ij) format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = csc_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csc.todense())
def test_constructor5(self):
"""infer dimensions from arrays"""
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_eliminate_zeros(self):
data = array( [1, 0, 0, 0, 2, 0, 3, 0] )
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = csc_matrix( (data, indices, indptr), shape=(10,2) )
bsp = asp.copy()
asp.eliminate_zeros( )
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_sort_indices(self):
data = arange( 5 )
row = array( [7, 2, 1, 5, 4] )
ptr = [0, 3, 5]
asp = csc_matrix( (data, row, ptr), shape=(10,2) )
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csc_matrix( (data, indices, indptr), shape=(10,2) )
data = arange( 6 )
indices = array( [8, 1, 5, 7, 2, 4] )
indptr = array( [0, 2, 6] )
bsp = csc_matrix( (data, indices, indptr), shape=(10,2) )
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
class TestDOK(_TestCommon, _TestGetSet, _TestSolve, TestCase):
spmatrix = dok_matrix
def test_mult(self):
A = dok_matrix((10,10))
A[0,3] = 10
A[5,6] = 20
D = A*A.T
E = A*A.H
assert_array_equal(D.A, E.A)
def test_add(self):
A = dok_matrix((3,2))
A[0,1] = -10
A[2,0] = 20
A = A + 10
B = matrix([[10, 0], [10, 10], [30, 10]])
assert_array_equal(A.todense(), B)
def test_convert(self):
"""Test provided by Andrew Straw. Fails in SciPy <= r1477.
"""
(m, n) = (6, 7)
a=dok_matrix((m, n))
# set a few elements, but none in the last column
a[2,1]=1
a[0,2]=2
a[3,1]=3
a[1,5]=4
a[4,3]=5
a[4,2]=6
# assert that the last column is all zeros
assert_array_equal( a.toarray()[:,n-1], zeros(m,) )
# make sure it still works for CSC format
csc=a.tocsc()
assert_array_equal( csc.toarray()[:,n-1], zeros(m,) )
# now test CSR
(m, n) = (n, m)
b = a.transpose()
assert_equal(b.shape, (m, n))
# assert that the last row is all zeros
assert_array_equal( b.toarray()[m-1,:], zeros(n,) )
# make sure it still works for CSR format
csr=b.tocsr()
assert_array_equal( csr.toarray()[m-1,:], zeros(n,))
def test_set_slice(self):
"""Test for slice functionality (EJS)"""
A = dok_matrix((5,10))
B = zeros((5,10), float)
A[:,0] = 1
B[:,0] = 1
assert_array_equal(A.todense(), B)
A[1,:] = 2
B[1,:] = 2
assert_array_equal(A.todense(), B)
A[:,:] = 3
B[:,:] = 3
assert_array_equal(A.todense(), B)
A[1:5, 3] = 4
B[1:5, 3] = 4
assert_array_equal(A.todense(), B)
A[1, 3:6] = 5
B[1, 3:6] = 5
assert_array_equal(A.todense(), B)
A[1:4, 3:6] = 6
B[1:4, 3:6] = 6
assert_array_equal(A.todense(), B)
A[1, 3:10:3] = 7
B[1, 3:10:3] = 7
assert_array_equal(A.todense(), B)
A[1:5, 0] = range(1,5)
B[1:5, 0] = range(1,5)
assert_array_equal(A.todense(), B)
A[0, 1:10:2] = xrange(1,10,2)
B[0, 1:10:2] = xrange(1,10,2)
assert_array_equal(A.todense(), B)
caught = 0
# The next 6 commands should raise exceptions
try:
A[0,0] = range(100)
except ValueError:
caught += 1
try:
A[0,0] = arange(100)
except ValueError:
caught += 1
try:
A[0,:] = range(100)
except ValueError:
caught += 1
try:
A[:,1] = range(100)
except ValueError:
caught += 1
try:
A[:,1] = A.copy()
except:
caught += 1
assert_equal(caught,5)
def test_ctor(self):
caught = 0
# Empty ctor
try:
A = dok_matrix()
except TypeError, e:
caught+=1
assert_equal(caught, 1)
# Dense ctor
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
A = dok_matrix(b)
assert_equal(A.todense(), b)
# Sparse ctor
c = csr_matrix(b)
assert_equal(A.todense(), c.todense())
def test_resize(self):
"""A couple basic tests of the resize() method.
resize(shape) resizes the array in-place.
"""
a = dok_matrix((5,5))
a[:,0] = 1
a.resize((2,2))
expected1 = array([[1,0],[1,0]])
assert_array_equal(a.todense(), expected1)
a.resize((3,2))
expected2 = array([[1,0],[1,0],[0,0]])
assert_array_equal(a.todense(), expected2)
def test_ticket1160(self):
"""Regression test for ticket #1160."""
a = dok_matrix((3,3))
a[0,0] = 0
# This assert would fail, because the above assignment would
# incorrectly call __set_item__ even though the value was 0.
assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
# Slice assignments were also affected.
b = dok_matrix((3,3))
b[:,0] = 0
assert_(len(b.keys())==0, "Unexpected entries in keys")
# The following five tests are duplicates from _TestCommon, so they can be
# marked as knownfail for Python 2.4. Once 2.4 is no longer supported,
# these duplicates can be removed again.
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_add_dense(self):
""" adding a dense matrix to a sparse matrix
"""
sum1 = self.dat + self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = self.datsp + self.dat
assert_array_equal(sum2, 2*self.dat)
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_radd(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = a + b
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_rsub(self):
assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.dat - A),self.dat - A.todense())
assert_array_equal((A - self.dat),A.todense() - self.dat)
assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat)
assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal( asp*b, a*b)
assert_array_almost_equal( a*bsp, a*b)
assert_array_almost_equal( a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix( [[1,2],[3,4]] )
B = self.spmatrix( [[1,2],[3,4],[5,6]] )
assert_raises(ValueError, A.__mul__, B)
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_sub_dense(self):
""" subtracting a dense matrix to/from a sparse matrix
"""
sum1 = 3*self.dat - self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = 3*self.datsp - self.dat
assert_array_equal(sum2, 2*self.dat)
class TestLIL( _TestCommon, _TestHorizSlicing, _TestVertSlicing,
_TestBothSlicing, _TestGetSet, _TestSolve,
_TestArithmetic, _TestInplaceArithmetic, _TestFancyIndexing,
TestCase):
spmatrix = lil_matrix
B = lil_matrix((4,3))
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
@dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
@dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \
" matrices")
def test_fancy_indexing_randomized(self):
_TestFancyIndexing.test_fancy_indexing_randomized(self)
def test_dot(self):
A = matrix(zeros((10,10)))
A[0,3] = 10
A[5,6] = 20
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
assert_array_equal(A * A.T, (B * B.T).todense())
assert_array_equal(A * A.H, (B * B.H).todense())
def test_scalar_mul(self):
x = lil_matrix((3,3))
x[0,0] = 2
x = x*2
assert_equal(x[0,0],4)
x = x*0
assert_equal(x[0,0],0)
def test_reshape(self):
x = lil_matrix((4,3))
x[0,0] = 1
x[2,1] = 3
x[3,2] = 5
x[0,2] = 7
for s in [(12,1),(1,12)]:
assert_array_equal(x.reshape(s).todense(),
x.todense().reshape(s))
def test_lil_lil_assignment(self):
""" Tests whether a row of one lil_matrix can be assigned to
another.
"""
B = self.B.copy()
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].A, B[0,:].A)
def test_inplace_ops(self):
A = lil_matrix([[0,2,3],[4,0,6]])
B = lil_matrix([[0,1,0],[0,2,3]])
data = {'add': (B,A + B),
'sub': (B,A - B),
'mul': (3,A * 3)}
for op,(other,expected) in data.iteritems():
result = A.copy()
getattr(result, '__i%s__' % op)(other)
assert_array_equal(result.todense(), expected.todense())
def test_lil_slice_assignment(self):
B = lil_matrix((4,3))
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
B[:,:] = B+B
assert_array_equal(B.todense(),expected)
block = [[1,0],[0,4]]
B[:2,:2] = csc_matrix(array(block))
assert_array_equal(B.todense()[:2,:2],block)
def test_lil_sequence_assignment(self):
A = lil_matrix((4,3))
B = eye(3,4,format='lil')
i0 = [0,1,2]
i1 = (0,1,2)
i2 = array( i0 )
A[0,i0] = B[i0,0]
A[1,i1] = B[i1,1]
A[2,i2] = B[i2,2]
assert_array_equal(A.todense(),B.T.todense())
# column slice
A = lil_matrix((2,3))
A[1,1:3] = [10,20]
assert_array_equal(A.todense(), [[0,0,0],[0,10,20]])
# column slice
A = lil_matrix((3,2))
A[1:3,1] = [[10],[20]]
assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]])
def test_lil_iteration(self):
row_data = [[1,2,3],[4,5,6]]
B = lil_matrix(array(row_data))
for r,row in enumerate(B):
assert_array_equal(row.todense(),array(row_data[r],ndmin=2))
def test_lil_from_csr(self):
""" Tests whether a lil_matrix can be constructed from a
csr_matrix.
"""
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
B[8,3] = 30
B[3,8] = 40
B[8,9] = 50
C = B.tocsr()
D = lil_matrix(C)
assert_array_equal(C.A, D.A)
def test_fancy_indexing(self):
M = arange(25).reshape(5,5)
A = lil_matrix( M )
assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3])
def test_point_wise_multiply(self):
l = lil_matrix((4,3))
l[0,0] = 1
l[1,1] = 2
l[2,2] = 3
l[3,1] = 4
m = lil_matrix((4,3))
m[0,0] = 1
m[0,1] = 2
m[2,2] = 3
m[3,1] = 4
m[3,2] = 4
assert_array_equal(l.multiply(m).todense(),
m.multiply(l).todense())
assert_array_equal(l.multiply(m).todense(),
[[1,0,0],
[0,0,0],
[0,0,9],
[0,16,0]])
def test_lil_multiply_removal(self):
"""Ticket #1427."""
a = lil_matrix(np.ones((3,3)))
a *= 2.
a[0, :] = 0
class TestCOO(_TestCommon, TestCase):
spmatrix = coo_matrix
def test_constructor1(self):
"""unsorted triplet format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
coo = coo_matrix((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4,3),coo.todense())
def test_constructor2(self):
"""unsorted triplet format with duplicates (which are summed)"""
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = coo_matrix((data,(row,col)),(3,3))
mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]])
assert_array_equal(mat,coo.todense())
def test_constructor3(self):
"""empty matrix"""
coo = coo_matrix( (4,3) )
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.todense(),zeros((4,3)))
def test_constructor4(self):
"""from dense matrix"""
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat)
#upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat.reshape(1,-1))
class TestDIA(_TestCommon, _TestArithmetic, TestCase):
spmatrix = dia_matrix
def test_constructor1(self):
D = matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
data = np.array([[1,2,3,4]]).repeat(3,axis=0)
offsets = np.array([0,-1,2])
assert_equal(dia_matrix( (data,offsets), shape=(4,4)).todense(), D)
class TestBSR(_TestCommon, _TestArithmetic, _TestInplaceArithmetic, TestCase):
spmatrix = bsr_matrix
def test_constructor1(self):
"""check native BSR format constructor"""
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[ 0, 1, 2],
[ 3, 0, 5]])
data[1] = array([[ 0, 2, 4],
[ 6, 0, 10]])
data[2] = array([[ 0, 4, 8],
[12, 0, 20]])
data[3] = array([[ 0, 5, 10],
[15, 0, 25]])
A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
assert_equal(Asp.todense(),A)
#infer shape from arrays
Asp = bsr_matrix((data,indices,indptr))
assert_equal(Asp.todense(),A)
def test_constructor2(self):
"""construct from dense"""
#test zero mats
for shape in [ (1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(bsr_matrix(A).todense(),A)
A = zeros((4,6))
assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
assert_equal(bsr_matrix(A).todense(),A)
assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
A = kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] )
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = bsr_matrix( (data, indices, indptr), shape=(4,20) )
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.todense(),bsp.todense())
def test_bsr_matvec(self):
A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) )
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A*x, A.todense()*x)
def test_bsr_matvecs(self):
A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) )
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A*x, A.todense()*x)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
| -5,126,287,273,559,635,000
| 34.541757
| 99
| 0.507576
| false
| 2.958471
| true
| false
| false
|
sayoun/pyvac
|
pyvac/views/base.py
|
1
|
7745
|
# -*- coding: utf-8 -*-
import logging
import traceback
from datetime import datetime
from webob import Response
from pyramid.security import authenticated_userid
from pyramid.httpexceptions import HTTPFound
from pyramid.url import route_url
# from pyramid.response import Response
from pyramid.settings import asbool
from pyvac.helpers.sqla import ModelError
from .. import __version__
from ..models import DBSession, User, Request, Sudoer
log = logging.getLogger(__name__)
class ViewBase(object):
"""
Pyvac view base class.
"""
def __init__(self, request):
self.request = request
self.session = DBSession()
login = authenticated_userid(self.request)
if login:
self.login = login
self.user = User.by_login(self.session, login)
else:
self.login = 'anonymous'
self.user = None
def update_response(self, response):
pass
def on_error(self, exception):
return True
def __call__(self):
try:
log.info('dispatch view %s', self.__class__.__name__)
response = self.render()
self.update_response(response)
# if isinstance(response, dict):
# log.info("rendering template with context %r", dict)
self.session.flush()
except Exception as exc:
if self.on_error(exc):
log.error('Error on view %s' % self.__class__.__name__,
exc_info=True)
raise
log.info('view %s dispatched', self.__class__.__name__)
return response
def render(self):
return {}
class View(ViewBase):
"""
Base class of every views.
"""
def update_response(self, response):
# this is a view to render
if isinstance(response, dict):
global_ = {
'pyvac': {
'version': __version__,
'login': self.login,
'user': self.user,
}
}
if self.user:
# if logged, retrieve total requests count for header
req_list = {'requests': []}
requests = []
if self.user.is_admin:
country = self.user.country
requests = Request.all_for_admin_per_country(self.session,
country)
elif self.user.is_super:
requests = Request.by_manager(self.session, self.user)
req_list['requests'] = requests
# always add our requests
for req in Request.by_user(self.session, self.user):
if req not in req_list['requests']:
req_list['requests'].append(req)
# only count next requests
today = datetime.now()
if self.user.is_admin:
# for admin, display request from 1st of month
today = today.replace(day=1)
requests_count = len([req for req in req_list['requests']
if req.date_to >= today])
global_['pyvac']['requests_count'] = requests_count
# retrieve available users for sudo
sudoers = Sudoer.alias(self.session, self.user)
if sudoers:
sudoers.append(self.user)
global_['pyvac']['sudoers'] = sudoers
response.update(global_)
class RedirectView(View):
"""
Base class of every view that redirect after post.
"""
redirect_route = None
redirect_kwargs = {}
def render(self):
return self.redirect()
def redirect(self, redirect_route=None):
settings = self.request.registry.settings
if 'pyvac.force_scheme' in settings:
scheme = settings.get('pyvac.force_scheme')
self.redirect_kwargs['_scheme'] = scheme
route = redirect_route or self.redirect_route
return HTTPFound(location=route_url(route, self.request,
**self.redirect_kwargs))
class CreateView(RedirectView):
"""
Base class of every create view.
"""
model = None
matchdict_key = None
def parse_form(self):
kwargs = {}
prefix = self.model.__tablename__
for k, v in list(self.request.params.items()):
if v and k.startswith(prefix):
kwargs[k.split('.').pop()] = v
return kwargs
def get_model(self):
return self.model()
def update_model(self, model):
"""
trivial implementation for simple data in the form,
using the model prefix.
"""
for k, v in list(self.parse_form().items()):
if k == 'ldap_user':
v = bool(int(v))
setattr(model, k, v)
def update_view(self, model, view):
"""
render initialize trivial view propertie,
but update_view is a method to customize the view to render.
"""
def validate(self, model, errors):
return len(errors) == 0
def save_model(self, model):
log.debug('saving %s' % model.__class__.__name__)
log.debug('%r' % model.__dict__)
self.session.add(model)
def render(self):
settings = self.request.registry.settings
ldap = False
if 'pyvac.use_ldap' in settings:
ldap = asbool(settings.get('pyvac.use_ldap'))
if 'form.cancelled' in self.request.params:
return self.redirect()
log.debug('rendering %s' % self.__class__.__name__)
errors = []
model = self.get_model()
if self.user and not self.user.is_admin:
if model.id != self.user.id:
return self.redirect('home')
if 'form.submitted' in self.request.params:
self.validate(model, errors)
if not errors:
try:
self.update_model(model)
model.validate(self.session, ldap=ldap)
except ModelError as err:
errors.extend(err.errors)
if not errors:
self.save_model(model)
return self.redirect()
rv = {'errors': errors,
self.model.__tablename__: model,
'use_ldap': ldap,
'csrf_token': self.request.session.get_csrf_token()}
self.update_view(model, rv)
log.debug(repr(rv))
return rv
class EditView(CreateView):
"""
Base class of every edit view.
"""
def get_model(self):
return self.model.by_id(
self.session, int(self.request.matchdict[self.matchdict_key]))
class DeleteView(RedirectView):
"""
Base class of every delete view.
"""
model = None
matchdict_key = None
redirect_route = None
def delete(self, model):
self.session.delete(model)
def render(self):
model = self.model.by_id(
self.session, int(self.request.matchdict[self.matchdict_key]))
if 'form.submitted' in self.request.params:
self.delete(model)
return self.redirect()
return {self.model.__tablename__: model}
def forbidden_view(request):
return HTTPFound(location=route_url('login', request))
def exception_view(context, request):
log.error("The error was: %s" % context, exc_info=(context))
body = """Oops ! An internal error has occured, maybe this can help ?<br/>
<pre>%s</pre>""" % traceback.format_exc()
return Response(status_int=500, body=body)
|
bsd-3-clause
| -2,335,569,999,331,767,300
| 28.116541
| 78
| 0.541382
| false
| 4.348681
| false
| false
| false
|
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn
|
tests/python/automaton.py
|
1
|
13433
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import vcsn
from test import *
## -------------- ##
## dot: parsing. ##
## -------------- ##
# Check invalid input.
def xfail(a):
XFAIL(lambda: vcsn.automaton(a))
# Syntax error: missing }.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
''')
# Syntax error: string not closed.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b
}
''')
# Syntax error: attributes are assignments.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [attribute]
}
''')
# Syntax error: attributes are assignments.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [attribute =]
}
''')
# Syntax error: comma used after empty attribute.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [,a=a]
}
''')
# Syntax error: semicolon used after empty attribute
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [;a=a]
}
''')
# Syntax error: semicolon used after empty attribute
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [a=a,;]
}
''')
# Invalid label: letter not in alphabet.
xfail(r'''digraph
{
vcsn_context = "lal_char(), b"
0 -> 1 [label = a]
1 -> F1
I0 -> 0
}
''')
# Invalid label: \e is not valid in LAL.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
0 -> 1 [label = "\\e"]
1 -> F1
I0 -> 0
}
''')
# Invalid label: aa is not valid in LAL.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
0 -> 1 [label = "aa"]
1 -> F1
I0 -> 0
}
''')
# Invalid label: missing '>'.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), z"
0 -> 1 [label = "<2"]
1 -> F1
I0 -> 0
}
''')
# No context defined (see the typo in vcsn_context).
xfail(r'''digraph
{
vcsn_contxt = "lal_char(ab), b"
0 -> 1 [label = a]
1 -> F1
I0 -> 0
}
''')
# Invalid context.
xfail(r'''digraph
{
vcsn_context = "unknown"
0 -> 1 [label = a]
1 -> F1
I0 -> 0
}
''')
# Invalid initial label.
xfail(r'''digraph
{
vcsn_context = "lal_char(ab), b"
0 -> 1 [label = a]
1 -> F1
I0 -> 0 [label = a]
}
''')
# Invalid final label.
xfail(r'''digraph
{
vcsn_context = "lal_char(ab), b"
0 -> 1 [label = a]
1 -> F1 [label = a]
I0 -> 0
}
''')
# \e makes no sense when not in lan.
xfail(r'''digraph
{
vcsn_context = "lal_char(\\e), b"
0 -> 1 [label = "\\e"]
}
''')
# An open context (letters are not specified).
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char(abcd), b"
0 -> 0 [label="a, b, c, d"]
}'''),
vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char, b"
0 -> 0 [label="a, b, c, d"]
}'''))
# An open tuple context.
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lat<lal_char,law_char>, b"
0 -> 0 [label="(a|x),(b|xyz),(c|\\e)"]
}'''),
vcsn.automaton(r'''digraph
{
vcsn_context = "lat<lal_char(abc),law_char(xyz)>, b"
0 -> 0 [label="(a,x),(b,xyz),(c,\\e)"]
}'''))
# Coverage: different rarely used features.
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char(), b"
{
node [shape = circle]
0 [color = DimGray]
}
}'''),
vcsn.automaton(r'''digraph "a graph
name"
{
vcsn_context // single line comment
=
/* a
multiline
comment. */
"lal_char(), b"
graph [a = "graph attribute",]
edge [a = "edge attribute";]
node [a = "node attribute"]
0:port:nw [a1 = a1, a2 = a2; a3 = a3 a4 = a4]
}'''))
# A context string with ".
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char(\"\\'), b"
0 -> 0 [label="\", \\'"]
}'''),
vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char, b"
0 -> 0 [label="\", \\'"]
}'''))
# A dot file which uses the HTML strings. And a subgraph.
CHECK_EQ(r'''context = "nullableset<letterset<char_letters(ab)>>, b"
$ -> 0
$ -> 3
0 -> 1 a, b
1 -> $
2 -> 1 a
3 -> 2 b''',
vcsn.automaton(filename=medir+'/html.gv').format('daut'))
## --------------- ##
## automaton.dot. ##
## --------------- ##
# Make sure to check the rendering useful/useless named/nameless
# states, weights, and spontaneous transitions.
c = vcsn.context('lan_char(ab), z')
a = c.expression('<2>a+<2>b').thompson()
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0
1
2
3
4
5
}
I0 -> 0
0 -> 2 [label = "ε"]
0 -> 4 [label = "ε"]
1 -> F1
2 -> 3 [label = "⟨2⟩a"]
3 -> 1 [label = "ε"]
4 -> 5 [label = "⟨2⟩b"]
5 -> 1 [label = "ε"]
}''',
a.dot())
# conjunction: state names, and useless states, etc.
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F11
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0 [label = "0, 0", shape = box, fixedsize = false]
1 [label = "2, 0", shape = box, fixedsize = false]
2 [label = "4, 0", shape = box, fixedsize = false]
3 [label = "2, 2", shape = box, fixedsize = false]
4 [label = "2, 4", shape = box, fixedsize = false, fillcolor = lightgray]
5 [label = "4, 2", shape = box, fixedsize = false, fillcolor = lightgray]
6 [label = "4, 4", shape = box, fixedsize = false]
7 [label = "3, 3", shape = box, fixedsize = false]
8 [label = "5, 5", shape = box, fixedsize = false]
9 [label = "1, 3", shape = box, fixedsize = false]
10 [label = "1, 5", shape = box, fixedsize = false]
11 [label = "1, 1", shape = box, fixedsize = false]
}
I0 -> 0
0 -> 1 [label = "ε"]
0 -> 2 [label = "ε"]
1 -> 3 [label = "ε"]
1 -> 4 [label = "ε", color = DimGray]
2 -> 5 [label = "ε", color = DimGray]
2 -> 6 [label = "ε"]
3 -> 7 [label = "⟨4⟩a"]
6 -> 8 [label = "⟨4⟩b"]
7 -> 9 [label = "ε"]
8 -> 10 [label = "ε"]
9 -> 11 [label = "ε"]
10 -> 11 [label = "ε"]
11 -> F11
}''',
(a&a).dot())
# Tooltip.
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F11
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0 [tooltip = "0, 0"]
1 [tooltip = "2, 0"]
2 [tooltip = "4, 0"]
3 [tooltip = "2, 2"]
4 [tooltip = "2, 4", fillcolor = lightgray]
5 [tooltip = "4, 2", fillcolor = lightgray]
6 [tooltip = "4, 4"]
7 [tooltip = "3, 3"]
8 [tooltip = "5, 5"]
9 [tooltip = "1, 3"]
10 [tooltip = "1, 5"]
11 [tooltip = "1, 1"]
}
I0 -> 0
0 -> 1 [label = "ε"]
0 -> 2 [label = "ε"]
1 -> 3 [label = "ε"]
1 -> 4 [label = "ε", color = DimGray]
2 -> 5 [label = "ε", color = DimGray]
2 -> 6 [label = "ε"]
3 -> 7 [label = "⟨4⟩a"]
6 -> 8 [label = "⟨4⟩b"]
7 -> 9 [label = "ε"]
8 -> 10 [label = "ε"]
9 -> 11 [label = "ε"]
10 -> 11 [label = "ε"]
11 -> F11
}''',
(a&a).dot("tooltip"))
# Transitions.
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F11
}
{
node [shape = point, width = 0]
0 [label = "0, 0"]
1 [label = "2, 0"]
2 [label = "4, 0"]
3 [label = "2, 2"]
4 [label = "2, 4", fillcolor = lightgray]
5 [label = "4, 2", fillcolor = lightgray]
6 [label = "4, 4"]
7 [label = "3, 3"]
8 [label = "5, 5"]
9 [label = "1, 3"]
10 [label = "1, 5"]
11 [label = "1, 1"]
}
I0 -> 0
0 -> 1 [label = "ε"]
0 -> 2 [label = "ε"]
1 -> 3 [label = "ε"]
1 -> 4 [label = "ε", color = DimGray]
2 -> 5 [label = "ε", color = DimGray]
2 -> 6 [label = "ε"]
3 -> 7 [label = "⟨4⟩a"]
6 -> 8 [label = "⟨4⟩b"]
7 -> 9 [label = "ε"]
8 -> 10 [label = "ε"]
9 -> 11 [label = "ε"]
10 -> 11 [label = "ε"]
11 -> F11
}''',
(a&a).dot("transitions"))
# Empty set.
CHECK_EQ('''digraph
{
vcsn_context = "letterset<char_letters()>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F0
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0 [label = "∅ᶜ", shape = box, fixedsize = false]
}
I0 -> 0
0 -> F0
}''',
vcsn.context('lal_char, b').expression('\z{c}').derived_term().dot())
## ------------- ##
## dot: simple. ##
## ------------- ##
ctx = vcsn.context('lal<string>, b')
e = ctx.expression("'🍺':'🍾':'☕️':'🍷' & [^]*'🍺'[^]*'☕️'[^]* & ([^]*'🍷''🍾'[^]*){c}")
CHECK_EQ(open(medir + '/drinks-simple.gv').read().strip(),
e.automaton().minimize().dot('simple'))
## ------------------------------- ##
## Output: dot, dot2tex and TikZ. ##
## ------------------------------- ##
import glob
for fn in glob.glob(os.path.join(medir, '*.in.gv')):
print("Checking: ", fn)
a = vcsn.automaton(filename = fn)
exp = open(fn.replace('.in.gv', '.out.gv')).read().strip()
CHECK_EQ(exp, a.format('dot'))
exp = open(fn.replace('.in.gv', '.tex.gv')).read().strip()
CHECK_EQ(exp, a.format('dot,latex'))
exp = open(fn.replace('.in.gv', '.tex')).read().strip()
CHECK_EQ(exp, a.format('tikz'))
# Check state names in TikZ.
a = vcsn.context('lal_char, b').expression('\e+a').derived_term()
exp = open(os.path.join(medir, 'derived-term.tex')).read().strip()
CHECK_EQ(exp, a.format('tikz'))
## ----------- ##
## I/O: Daut. ##
## ----------- ##
for fn in glob.glob(os.path.join(medir, '*.in.gv')):
a = vcsn.automaton(filename=fn)
# Check output.
daut = a.format('daut')
exp = open(fn.replace('.in.gv', '.daut')).read().strip()
CHECK_EQ(exp, daut)
# Check input: make sure we can read it.
CHECK_EQ(a, vcsn.automaton(exp, 'daut'))
CHECK_EQ(a, vcsn.automaton(exp, 'auto'))
CHECK_EQ(a, vcsn.automaton(exp))
# A daut file whose names have quotes: beware of building "Ifoo" and
# "Ffoo", not I"foo" and F"foo".
CHECK_EQ(r'''digraph
{
vcsn_context = "letterset<char_letters()>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F0
}
{
node [shape = circle, style = rounded, width = 0.5]
0 [label = "foo", shape = box]
}
I0 -> 0
0 -> F0
}''',
vcsn.automaton('''$ -> "foo"
"foo" -> $''', strip = False))
## ----------- ##
## I/O: FAdo. ##
## ----------- ##
try:
import FAdo
has_fado = True
except ImportError:
has_fado = False
def check_fado(aut):
'''Check that FAdo accepts aut.format('fado') as input.'''
if has_fado:
name = "automaton.fado"
from FAdo import fa
# I did not find a means to read from a string...
with open(name, 'w') as f:
f.write(aut.format('fado') + "\n")
fa.readFromFile(name)
os.remove(name)
else:
SKIP("FAdo not installed")
for fn in glob.glob(os.path.join(medir, '*.fado')):
exp = vcsn.automaton(filename = fn.replace('.fado', '.gv'))
# Check that we can read FAdo.
CHECK_EQ(exp, vcsn.automaton(filename = fn, format = 'fado'))
CHECK_EQ(exp, vcsn.automaton(filename = fn, format = 'auto'))
# Check that we can print FAdo.
fado = open(fn).read().strip()
CHECK_EQ(fado, exp.format('fado'))
check_fado(a)
## --------------- ##
## Output: Grail. ##
## --------------- ##
def check_grail(aut):
'''Check that FAdo accepts aut.format('grail') as input.'''
if has_fado:
name = "automaton.grail"
from FAdo import grail
# I did not find a means to read from a string...
with open(name, 'w') as f:
f.write(aut.format('grail') + "\n")
grail.importFromGrailFile(name)
os.remove(name)
else:
SKIP("FAdo not installed")
for fn in glob.glob(os.path.join(medir, '*.grail')):
a = vcsn.automaton(filename = fn.replace('.grail', '.gv'))
# Check that we can print Grail.
grail = open(fn).read().strip()
CHECK_EQ(grail, a.format('grail'))
check_grail(a)
## ------------ ##
## Conversion. ##
## ------------ ##
# Convert an automaton from lal_char, b to law_char, z.
CHECK_EQ(vcsn.automaton('''context = "law_char, z"
$ -> 0
0 -> 1 a, b
1 -> 1 c
1 -> $''', 'daut'),
vcsn.automaton('''context = "lal_char(abc), b"
$ -> 0
0 -> 1 a, b
1 -> 1 c
1 -> $''', 'daut').automaton(vcsn.context("law_char(abc), z")))
# Convert an automaton to a smaller, valid, alphabet.
CHECK_EQ(vcsn.automaton('''context = "law_char(abc), z"
0 -> 1 a, b''', 'daut'),
vcsn.automaton('''context = "lal_char(a-z), b"
0 -> 1 a, b''', 'daut').automaton(vcsn.context("law_char(abc), z")))
# Convert an automaton to a smaller, invalid, alphabet.
XFAIL(lambda: vcsn.automaton('''context = "lal_char(abc), b"
0 -> 1 a, b''', 'daut').automaton(vcsn.context("law_char(xy), z")))
# Convert to an invalid smaller weightset.
XFAIL(lambda: vcsn.automaton('''context = "lal_char(abc), z"
0 -> 1 <3>a, b''', 'daut').automaton(vcsn.context("lal_char(xy), b")))
|
gpl-3.0
| 8,603,536,619,741,621,000
| 22.114385
| 135
| 0.531004
| false
| 2.620751
| false
| false
| false
|
nacl-webkit/chrome_deps
|
tools/telemetry/telemetry/page_unittest.py
|
1
|
1754
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import page
class TestPage(unittest.TestCase):
def testGetUrlBaseDirAndFileForAbsolutePath(self):
apage = page.Page('file:///somedir/otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/somedir/otherdir')
self.assertEqual(filename, 'file.html')
def testGetUrlBaseDirAndFileForRelativePath(self):
apage = page.Page('file:///../../otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/../../otherdir')
self.assertEqual(filename, 'file.html')
def testGetUrlBaseDirAndFileForUrlBaseDir(self):
apage = page.Page('file:///../../somedir/otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
setattr(apage, 'url_base_dir', 'file:///../../somedir/')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/../../somedir/')
self.assertEqual(filename, 'otherdir/file.html')
def testDisplayUrlForHttp(self):
self.assertEquals(page.Page('http://www.foo.com/', None).display_url,
'www.foo.com/')
def testDisplayUrlForFile(self):
self.assertEquals(
page.Page('file:///../../otherdir/file.html', None).display_url,
'file.html')
|
bsd-3-clause
| 6,564,029,803,146,257,000
| 41.780488
| 73
| 0.640251
| false
| 3.755889
| true
| false
| false
|
Jean-Simon-Barry/djangoproject
|
djangoproject/settings.py
|
1
|
2131
|
"""
Django settings for djangoproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'stmieloldo55*n#49w!wcsz8sg3e_9bh3_pd2vs1n#(g#mpef6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangoproject.urls'
WSGI_APPLICATION = 'djangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
gpl-2.0
| -9,102,493,995,731,263,000
| 23.77907
| 71
| 0.727827
| false
| 3.28858
| false
| false
| false
|
chris-ch/lemvi-risk
|
scripts/track-drawdowns.py
|
1
|
5109
|
import argparse
import json
import logging
import os
from datetime import datetime
import tenacity
import gservices
from risklimits import extract_navs, compute_high_watermark, extract_flows
def from_excel_datetime(excel_date):
return datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(excel_date) - 2)
def from_excel_date(excel_date):
return from_excel_datetime(excel_date).date()
@tenacity.retry(wait=tenacity.wait_fixed(100), stop=tenacity.stop_after_attempt(5))
def main(args):
full_config_path = os.path.abspath(args.config)
logging.info('using config file "{}"'.format(full_config_path))
with open(full_config_path, 'r') as config_file:
config = json.load(config_file)
secrets_file_path = os.path.abspath(args.file_secret)
logging.info('using secrets file "{}"'.format(secrets_file_path))
with open(secrets_file_path) as json_data:
secrets_content = json.load(json_data)
google_credential = secrets_content['google.credential']
authorized_http, credentials = gservices.authorize_services(google_credential)
svc_sheet = gservices.create_service_sheets(credentials)
google_sheet_flow_id = config['google.sheet.flows.id']
workbook_flows = svc_sheet.open_by_key(google_sheet_flow_id)
flows = workbook_flows.worksheet_by_title('Flows EUR').get_all_records()
google_sheet_nav_id = config['google.sheet.navs.id']
workbook_navs = svc_sheet.open_by_key(google_sheet_nav_id)
navs = dict()
for tab in workbook_navs.worksheets():
navs[tab.title] = tab.get_all_records()
hwms, drawdowns = compute_high_watermark(extract_flows(flows), extract_navs(navs))
google_sheet_risk_limits_id = config['google.sheet.risk_limits.id']
workbook_risk_limits = svc_sheet.open_by_key(google_sheet_risk_limits_id)
sheet_hwm = workbook_risk_limits.worksheet_by_title('Adjusted High Watermarks')
sheet_drawdowns = workbook_risk_limits.worksheet_by_title('Drawdowns')
header_hwms = sheet_hwm.get_row(1, returnas='matrix')
header_drawdowns = sheet_drawdowns.get_row(1, returnas='matrix')
hwm_update_only = False
hwm_last_date_value = sheet_hwm.cell('A2').value
if hwm_last_date_value == '':
hwm_last_date_value = sheet_hwm.cell('A3').value
last_hwm_update = datetime.strptime(hwm_last_date_value, '%Y-%m-%d').date()
dd_update_only = False
dd_last_date_value = sheet_drawdowns.cell('A2').value
if dd_last_date_value == '':
dd_last_date_value = sheet_drawdowns.cell('A3').value
last_drawdown_update = datetime.strptime(dd_last_date_value, '%Y-%m-%d').date()
last_hwms = hwms[hwms.index > last_hwm_update].sort_index(ascending=False)
for as_of_date, row in last_hwms.iterrows():
row_data = [as_of_date.strftime('%Y-%m-%d')]
for account_id in header_hwms[1:]:
if account_id in row.to_dict():
value = row.to_dict()[account_id]
row_data.append(float(value))
else:
row_data.append(0.)
if hwm_update_only:
sheet_hwm.update_rows(row=1, number=1, values=[row_data])
else:
sheet_hwm.insert_rows(row=1, number=1, values=[row_data])
last_drawdowns = drawdowns[drawdowns.index > last_drawdown_update].sort_index(ascending=False)
for as_of_date, row in last_drawdowns.iterrows():
row_data = [as_of_date.strftime('%Y-%m-%d')]
for account_id in header_drawdowns[1:]:
if account_id in row.to_dict():
value = row.to_dict()[account_id]
row_data.append(float(value))
else:
row_data.append(0.)
if dd_update_only:
sheet_drawdowns.update_rows(row=1, number=1, values=[row_data])
else:
sheet_drawdowns.insert_rows(row=1, number=1, values=[row_data])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
logging.getLogger('requests').setLevel(logging.WARNING)
file_handler = logging.FileHandler('update-nav-hist.log', mode='w')
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_handler)
parser = argparse.ArgumentParser(description='NAV history update.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--file-ibrokers-flex', type=str, help='InteractiveBrokers Flex response')
parser.add_argument('--file-secret', type=str, help='file including secret connection data', default='secrets.json')
parser.add_argument('--config', type=str, help='file including secret connection data', default='config.json')
args = parser.parse_args()
main(args)
|
mit
| 385,711,334,942,550,340
| 43.043103
| 120
| 0.633001
| false
| 3.482618
| true
| false
| false
|
aishmittal/Product-Info-Crawler
|
demo/app/views.py
|
1
|
1977
|
from app import app
import os
from flask import render_template
from flask import Flask, redirect, url_for, request, send_from_directory
from flask import json
import sys
import sys
import csv
curfilePath = os.path.abspath(__file__)
curDir = os.path.abspath(os.path.join(curfilePath, os.pardir))
parDir = os.path.abspath(os.path.join(curDir, os.pardir))
tmpDir = os.path.abspath(os.path.join(curDir,'tmp/'))
resultFile=os.path.abspath(os.path.join(parDir,'results.csv'))
crawlerFile=os.path.abspath(os.path.join(curDir, os.pardir,os.pardir,'run_crawler_demo.py'))
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html",title='Home')
@app.route('/search_results',methods = ['POST'])
def search_results():
if request.method == 'POST':
os.system('python '+crawlerFile+' '+ request.json['product'])
print 'Crawling Completed'
title=[]
image=[]
price=[]
url=[]
source=[]
with open(resultFile) as f:
records = csv.DictReader(f)
for row in records:
title.append(row['product_name'])
image.append(row['image_url'])
price.append(row['price'])
url.append(row['product_url'])
source.append(row['source'])
data=dict({'product_name':title,'image_url':image,'price':price,'product_url':url,'source':source})
response = app.response_class(
response=json.dumps(data, cls=MyEncoder),
status=200,
mimetype='application/json'
)
return response
|
mit
| -3,283,697,977,506,034,700
| 30.903226
| 109
| 0.607486
| false
| 3.794626
| false
| false
| false
|
Rahveiz/PingCheck
|
main.py
|
1
|
6665
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import time
import socket
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from mainwindow4 import Ui_MainWindow
#On cree un thread pour les operations de ping
class pingThread(QThread):
def __init__(self,hostname, timecheck,GUI, report):
QThread.__init__(self)
#On initialise les variables locales de la classe
self.hostname = hostname
self.timecheck = timecheck
self.ui = GUI
self.report = open(report + "/PyPingCheck_report.txt",'a')
#On recupere le temps de l'ouverture du thread comme temps du dernier succes
self.successtime= int(time.time())
#On initialise une variable d'erreur de ping
self.pingerror = False
def __del__(self):
self.wait()
def _ping_check(self, hostname, timecheck):
#On change le texte du resultat avant de faire un ping
self.text_result = "PingCheck : Checking ..."
self.ui.label_status.setText(self.text_result)
self.ui.label_status.setStyleSheet("color: rgba(0,0,0,1);")
#On ping l'ip entree en argument
#On redirige la sortie de la commande vers une variable ping_var
#On desactive l'affichage de la console a l'appel de subprocess
self.ping_var = str(subprocess.Popen("ping %s" %self.hostname, stdout=subprocess.PIPE, creationflags=8).stdout.read())
#On check si l'ip repond au ping
if "TTL" in self.ping_var:
self.text_result = "PingCheck : SUCCESS"
#Si la variable d'erreur est vraie, on reset le temps du dernier succes
if self.pingerror == True:
self.successtime = int(time.time())
#On remet la variable d'erreur sur faux
self.pingerror = False
else:
self.text_result = "PingCheck : FAIL"
#On met la variable d'erreur a l'etat vrai
self.pingerror = True
#On log dans le fichier si l'ip ne repond pas
self.report.write(time.strftime("%d-%m-%Y | %X", time.localtime()) + '\t PingCheck failed (Hostname : %s)\n'%self.hostname)
self.report.flush()
#On update le texte du resultat
self.ui.label_status.setText(self.text_result)
self.ui.label_status.setStyleSheet("color: rgba(255,0,0,1);")
#On log la reponse consecutive de l'ip pendant X sec
if (int(time.time()) >= (self.successtime + self.timecheck)):
self.report.write(time.strftime("%d-%m-%Y | %X", time.localtime()) + '\t %s secs of SUCCESS '%self.timecheck + '(Hostname : %s)\n'%self.hostname)
self.report.flush()
self.successtime = int(time.time())
def run(self):
while True:
self._ping_check(self.hostname, self.timecheck)
self.sleep(3)
#Application
class ShipHolderApplication(QMainWindow):
#On initialise l'interface graphique et le bouton start
def __init__(self):
super (self.__class__, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.input_file.setText("Report directory")
self.ui.button_start.clicked.connect(self.start_thread)
self.ui.button_file.clicked.connect(self.get_file)
def get_file(self):
self.report_file_path = QFileDialog.getExistingDirectory(self)
self.ui.input_file.setText(self.report_file_path)
def check_input(self):
#On initialise la liste sur True
self.check = [True,True,True]
#On recupere le chemin du rapport
self.report = str(self.ui.input_file.text())
if os.path.isdir(self.report) != True:
self.ui.input_file.setText("Error : Please select a directory")
self.check[2] = False
#On recupere la valeur d'input de l'host
self.host = str(self.ui.input_ip.text())
#On teste si l'ip est valide
if valid_ip(self.host) != True:
#On affiche un message d'erreur si elle ne l'est pas
self.ui.label_iperror.setText("Wrong IP format")
#On met l'element de la liste sur False
self.check[0] = False
else:
self.ui.label_iperror.setText("")
#On recupere la valeur d'input time
self.period = str(self.ui.input_time.text())
#On essaye de convertir la chaine en entier
try:
int(self.period)
except:
#On affiche un message d'erreur si besoin
self.ui.label_timerror.setText("Wrong time format")
#On met la liste a jour
self.check[1] = False
else:
self.ui.label_timerror.setText("")
#Si c'est possible, on convertit la chaine en entier
self.period = int(self.period)
#On retourne la liste
return self.check
def start_thread(self):
#Uniquement si les input sont valides
if self.check_input() == [True,True,True]:
#On charge le thread
self.get_thread = pingThread(self.host,self.period,self.ui, self.report)
#On l'execute
self.get_thread.start()
#On active le bouton stop
self.ui.button_stop.setEnabled(True)
#On desactive les input tant que le thread tourne
self.ui.input_ip.setDisabled(True)
self.ui.input_time.setDisabled(True)
self.ui.input_file.setDisabled(True)
#On desactive le bouton de recherche
self.ui.button_file.setEnabled(False)
#On connecte le bouton stop a la fonction de stop
self.ui.button_stop.clicked.connect(self.end_thread)
#On desactive le bouton start pour ne pas lancer d'autre thread en meme temps
self.ui.button_start.setEnabled(False)
def end_thread(self):
self.get_thread.terminate()
self.ui.button_start.setEnabled(True)
self.button_file.setEnabled(True)
self.ui.input_ip.setDisabled(False)
self.ui.input_time.setDisabled(False)
self.ui.input_file.setDisabled(False)
self.ui.button_stop.setEnabled(False)
def valid_ip(address):
try:
socket.inet_aton(address)
return True
except:
return False
def exitapp(app):
app.exec_()
def main():
app = QApplication(sys.argv)
myapp = ShipHolderApplication()
myapp.setWindowTitle("PyPingCheck")
myapp.setWindowIcon(QIcon("Icone/ping_icon.png"))
myapp.show()
sys.exit(exitapp(app))
if __name__ == '__main__':
main()
|
gpl-3.0
| -1,618,018,899,325,959,200
| 31.99505
| 157
| 0.613053
| false
| 3.52459
| false
| false
| false
|
mallconnectionorg/openerp
|
rrhh/l10n_cl_hr_payroll/model/hr_family_responsibilities.py
|
1
|
2469
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Pedro Arroyo M <parroyo@mallconnection.com>
# Copyright (C) 2015 Mall Connection(<http://www.mallconnection.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class hr_family_responsibilities(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.family.responsibilities'
_description = 'openerpmodel'
_columns = {
'name':fields.char('Name', size=64, required=True, readonly=False),
'type':fields.selection([
('simple','simple responsibility'),
('maternal','maternal responsibility'),
('invalid','invalid responsibility'),
], 'State', select=True),
'relationship':fields.selection([
('father','father'),
('son','son / daughter'),
('spouse','spouse'),
('Father in law','Father in law / mother in law'),
('son','son / daughter'),
('second','second'),
('Grandfather','Grandfather / Grandmother'),
('grandchild','grandchild / granddaughter'),
('sister','sister / brother'),
('brother in law','brother in law / sister in law'),
], 'Relationship', select=True, readonly=False),
'vat': fields.char('TIN', size=32, help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."),
'employee_id': fields.many2one('hr.employee', string='Employee'),
}
hr_family_responsibilities()
|
agpl-3.0
| 8,236,057,115,983,737,000
| 42.333333
| 177
| 0.565411
| false
| 4.346831
| false
| false
| false
|
airalcorn2/Deep-Semantic-Similarity-Model
|
deep_semantic_similarity_keras.py
|
1
|
8280
|
# Michael A. Alcorn (malcorn@redhat.com)
# An implementation of the Deep Semantic Similarity Model (DSSM) found in [1].
# [1] Shen, Y., He, X., Gao, J., Deng, L., and Mesnil, G. 2014. A latent semantic model
# with convolutional-pooling structure for information retrieval. In CIKM, pp. 101-110.
# http://research.microsoft.com/pubs/226585/cikm2014_cdssm_final.pdf
# [2] http://research.microsoft.com/en-us/projects/dssm/
# [3] http://research.microsoft.com/pubs/238873/wsdm2015.v3.pdf
import numpy as np
from keras import backend
from keras.layers import Activation, Input
from keras.layers.core import Dense, Lambda, Reshape
from keras.layers.convolutional import Convolution1D
from keras.layers.merge import concatenate, dot
from keras.models import Model
LETTER_GRAM_SIZE = 3 # See section 3.2.
WINDOW_SIZE = 3 # See section 3.2.
TOTAL_LETTER_GRAMS = int(3 * 1e4) # Determined from data. See section 3.2.
WORD_DEPTH = WINDOW_SIZE * TOTAL_LETTER_GRAMS # See equation (1).
K = 300 # Dimensionality of the max-pooling layer. See section 3.4.
L = 128 # Dimensionality of latent semantic space. See section 3.5.
J = 4 # Number of random unclicked documents serving as negative examples for a query. See section 4.
FILTER_LENGTH = 1 # We only consider one time step for convolutions.
# Input tensors holding the query, positive (clicked) document, and negative (unclicked) documents.
# The first dimension is None because the queries and documents can vary in length.
query = Input(shape = (None, WORD_DEPTH))
pos_doc = Input(shape = (None, WORD_DEPTH))
neg_docs = [Input(shape = (None, WORD_DEPTH)) for j in range(J)]
# Query model. The paper uses separate neural nets for queries and documents (see section 5.2).
# In this step, we transform each word vector with WORD_DEPTH dimensions into its
# convolved representation with K dimensions. K is the number of kernels/filters
# being used in the operation. Essentially, the operation is taking the dot product
# of a single weight matrix (W_c) with each of the word vectors (l_t) from the
# query matrix (l_Q), adding a bias vector (b_c), and then applying the tanh activation.
# That is, h_Q = tanh(W_c • l_Q + b_c). With that being said, that's not actually
# how the operation is being calculated here. To tie the weights of the weight
# matrix (W_c) together, we have to use a one-dimensional convolutional layer.
# Further, we have to transpose our query matrix (l_Q) so that time is the first
# dimension rather than the second (as described in the paper). That is, l_Q[0, :]
# represents our first word vector rather than l_Q[:, 0]. We can think of the weight
# matrix (W_c) as being similarly transposed such that each kernel is a column
# of W_c. Therefore, h_Q = tanh(l_Q • W_c + b_c) with l_Q, W_c, and b_c being
# the transposes of the matrices described in the paper. Note: the paper does not
# include bias units.
query_conv = Convolution1D(K, FILTER_LENGTH, padding = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")(query) # See equation (2).
# Next, we apply a max-pooling layer to the convolved query matrix. Keras provides
# its own max-pooling layers, but they cannot handle variable length input (as
# far as I can tell). As a result, I define my own max-pooling layer here. In the
# paper, the operation selects the maximum value for each row of h_Q, but, because
# we're using the transpose, we're selecting the maximum value for each column.
query_max = Lambda(lambda x: backend.max(x, axis = 1), output_shape = (K, ))(query_conv) # See section 3.4.
# In this step, we generate the semantic vector represenation of the query. This
# is a standard neural network dense layer, i.e., y = tanh(W_s • v + b_s). Again,
# the paper does not include bias units.
query_sem = Dense(L, activation = "tanh", input_dim = K)(query_max) # See section 3.5.
# The document equivalent of the above query model.
doc_conv = Convolution1D(K, FILTER_LENGTH, padding = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")
doc_max = Lambda(lambda x: backend.max(x, axis = 1), output_shape = (K, ))
doc_sem = Dense(L, activation = "tanh", input_dim = K)
pos_doc_conv = doc_conv(pos_doc)
neg_doc_convs = [doc_conv(neg_doc) for neg_doc in neg_docs]
pos_doc_max = doc_max(pos_doc_conv)
neg_doc_maxes = [doc_max(neg_doc_conv) for neg_doc_conv in neg_doc_convs]
pos_doc_sem = doc_sem(pos_doc_max)
neg_doc_sems = [doc_sem(neg_doc_max) for neg_doc_max in neg_doc_maxes]
# This layer calculates the cosine similarity between the semantic representations of
# a query and a document.
R_Q_D_p = dot([query_sem, pos_doc_sem], axes = 1, normalize = True) # See equation (4).
R_Q_D_ns = [dot([query_sem, neg_doc_sem], axes = 1, normalize = True) for neg_doc_sem in neg_doc_sems] # See equation (4).
concat_Rs = concatenate([R_Q_D_p] + R_Q_D_ns)
concat_Rs = Reshape((J + 1, 1))(concat_Rs)
# In this step, we multiply each R(Q, D) value by gamma. In the paper, gamma is
# described as a smoothing factor for the softmax function, and it's set empirically
# on a held-out data set. We're going to learn gamma's value by pretending it's
# a single 1 x 1 kernel.
weight = np.array([1]).reshape(1, 1, 1)
with_gamma = Convolution1D(1, 1, padding = "same", input_shape = (J + 1, 1), activation = "linear", use_bias = False, weights = [weight])(concat_Rs) # See equation (5).
with_gamma = Reshape((J + 1, ))(with_gamma)
# Finally, we use the softmax function to calculate P(D+|Q).
prob = Activation("softmax")(with_gamma) # See equation (5).
# We now have everything we need to define our model.
model = Model(inputs = [query, pos_doc] + neg_docs, outputs = prob)
model.compile(optimizer = "adadelta", loss = "categorical_crossentropy")
# Build a random data set.
sample_size = 10
l_Qs = []
pos_l_Ds = []
# Variable length input must be handled differently from padded input.
BATCH = True
(query_len, doc_len) = (5, 100)
for i in range(sample_size):
if BATCH:
l_Q = np.random.rand(query_len, WORD_DEPTH)
l_Qs.append(l_Q)
l_D = np.random.rand(doc_len, WORD_DEPTH)
pos_l_Ds.append(l_D)
else:
query_len = np.random.randint(1, 10)
l_Q = np.random.rand(1, query_len, WORD_DEPTH)
l_Qs.append(l_Q)
doc_len = np.random.randint(50, 500)
l_D = np.random.rand(1, doc_len, WORD_DEPTH)
pos_l_Ds.append(l_D)
neg_l_Ds = [[] for j in range(J)]
for i in range(sample_size):
possibilities = list(range(sample_size))
possibilities.remove(i)
negatives = np.random.choice(possibilities, J, replace = False)
for j in range(J):
negative = negatives[j]
neg_l_Ds[j].append(pos_l_Ds[negative])
if BATCH:
y = np.zeros((sample_size, J + 1))
y[:, 0] = 1
l_Qs = np.array(l_Qs)
pos_l_Ds = np.array(pos_l_Ds)
for j in range(J):
neg_l_Ds[j] = np.array(neg_l_Ds[j])
history = model.fit([l_Qs, pos_l_Ds] + [neg_l_Ds[j] for j in range(J)], y, epochs = 1, verbose = 0)
else:
y = np.zeros(J + 1).reshape(1, J + 1)
y[0, 0] = 1
for i in range(sample_size):
history = model.fit([l_Qs[i], pos_l_Ds[i]] + [neg_l_Ds[j][i] for j in range(J)], y, epochs = 1, verbose = 0)
# Here, I walk through how to define a function for calculating output from the
# computational graph. Let's define a function that calculates R(Q, D+) for a given
# query and clicked document. The function depends on two inputs, query and pos_doc.
# That is, if you start at the point in the graph where R(Q, D+) is calculated
# and then work backwards as far as possible, you'll end up at two different starting
# points: query and pos_doc. As a result, we supply those inputs in a list to the
# function. This particular function only calculates a single output, but multiple
# outputs are possible (see the next example).
get_R_Q_D_p = backend.function([query, pos_doc], [R_Q_D_p])
if BATCH:
get_R_Q_D_p([l_Qs, pos_l_Ds])
else:
get_R_Q_D_p([l_Qs[0], pos_l_Ds[0]])
# A slightly more complex function. Notice that both neg_docs and the output are
# lists.
get_R_Q_D_ns = backend.function([query] + neg_docs, R_Q_D_ns)
if BATCH:
get_R_Q_D_ns([l_Qs] + [neg_l_Ds[j] for j in range(J)])
else:
get_R_Q_D_ns([l_Qs[0]] + neg_l_Ds[0])
|
mit
| 1,212,973,223,307,917,600
| 46.551724
| 168
| 0.68685
| false
| 3.012013
| false
| false
| false
|
protonyx/labtronyx-gui
|
labtronyxgui/applets/Resources/VISA.py
|
1
|
2904
|
"""
.. codeauthor:: Kevin Kennedy <kennedy.kevin@gmail.com>
"""
from Base_Applet import Base_Applet
import Tkinter as Tk
from widgets import *
class VISA(Base_Applet):
info = {
# Description
'description': 'Generic view for VISA Resources',
# List of compatible resource types
'validResourceTypes': ['VISA']
}
def run(self):
self.wm_title("VISA Resource")
self.instr = self.getInstrument()
# Driver info
self.w_info = vw_info.vw_DriverInfo(self, self.instr)
self.w_info.grid(row=0, column=0, columnspan=2)
# Send
self.send_val = Tk.StringVar(self)
self.lbl_send = Tk.Label(self, width=20,
text="Send Command",
anchor=Tk.W, justify=Tk.LEFT)
self.lbl_send.grid(row=1, column=0)
self.txt_send = Tk.Entry(self, width=40,
textvariable=self.send_val)
self.txt_send.grid(row=1, column=1)
# Buttons
self.f_buttons = Tk.Frame(self, padx=5, pady=5)
self.btn_write = Tk.Button(self.f_buttons,
text="Write",
command=self.cb_write,
width=10,
padx=3)
self.btn_write.pack(side=Tk.LEFT)
self.btn_query = Tk.Button(self.f_buttons,
text="Query",
command=self.cb_query,
width=10,
padx=3)
self.btn_query.pack(side=Tk.LEFT),
self.btn_read = Tk.Button(self.f_buttons,
text="Read",
command=self.cb_read,
width=10,
padx=3)
self.btn_read.pack(side=Tk.LEFT)
self.f_buttons.grid(row=2, column=1)
# Receive
self.lbl_receive = Tk.Label(self, width=20,
text="Received Data",
anchor=Tk.W, justify=Tk.LEFT)
self.lbl_receive.grid(row=3, column=0)
self.txt_receive = Tk.Text(self, state=Tk.DISABLED,
width=20, height=10)
self.txt_receive.grid(row=3, column=1,
sticky=Tk.N+Tk.E+Tk.S+Tk.W)
def cb_write(self):
data = self.send_val.get()
self.instr.write(data)
def cb_query(self):
self.cb_write()
self.cb_read()
def cb_read(self):
data = self.instr.read()
self.txt_receive.configure(state=Tk.NORMAL)
self.txt_receive.delete(1, Tk.END)
self.txt_receive.insert(Tk.END, data)
self.txt_receive.configure(state=Tk.DISABLED)
|
mit
| -527,448,498,447,606,460
| 32.37931
| 68
| 0.474518
| false
| 3.851459
| false
| false
| false
|
SetBased/py-kerapu
|
kerapu/command/TestsetShredderCommand.py
|
1
|
11590
|
import csv
import datetime
import os
import random
import shutil
import string
import zipfile
from typing import Iterable, List, Dict
from cleo import Command
from lxml import etree
from kerapu.style.KerapuStyle import KerapuStyle
class TestShredderCommand(Command):
"""
Converteert XML-bestand met de testset naar een CSV-bestand
kerapu:test-shredder
{testset-zip : ZIP-bestand met de testset}
{testset-csv : Path waar het CSV-bestand met de tests moeten worden opgeslagen}
"""
# ------------------------------------------------------------------------------------------------------------------
def __extract_zip_file(self, zip_filename: str, tmp_dir: str):
"""
Extracts het ZIP-bestand met de testset in een folder.
:param str zip_filename: Het path naar het ZIP-bestand met de testset.
:param str tmp_dir: Path naar de folder.
"""
self.output.writeln('Uitpakken van <fso>{}</fso> in <fso>{}</fso>'.format(zip_filename, tmp_dir))
with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
zip_ref.extractall(tmp_dir)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def ordinal(path: str) -> int:
"""
Geeft het volgnummer van een test.
:param str path: Het path naar het XML-bestand met de test case.
"""
parts = os.path.basename(path).split('_')
return int(parts[6])
# ------------------------------------------------------------------------------------------------------------------
def __lees_test_cases_lijst(self, folder: str) -> List:
"""
Geeft een lijst met alle bestanden in een folder.
:param str folder: Het path naar de folder.
"""
entries = os.listdir(folder)
filenames = list()
for entry in entries:
path = os.path.join(folder, entry)
if os.path.isfile(path):
filenames.append(path)
self.output.writeln('Aantal gevonden test cases: {}'.format(len(filenames)))
return sorted(filenames, key=TestShredderCommand.ordinal)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __maak_xpath(parts: Iterable) -> str:
"""
Maakt een string met een xpath.
:param tuple parts: The onderdelen van het xpath.
:rtype: str
"""
xpath = ''
for part in parts:
if xpath:
xpath += '/'
xpath += 'xmlns:' + part
return xpath
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __convert_date(date: str) -> str:
"""
Converteert een datum in YYYYMMDD formaat naar YYYY-MM-DD format.
:param str date: De datum in YYYYMMDD format.
:rtype: str
"""
return date[:4] + '-' + date[4:6] + '-' + date[6:8]
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __leeftijd_geboorte_datum(date: str, leeftijd: int) -> str:
"""
Geeft de geboortedatum gegeven een datum en een leeftijd (en de persoon is niet jarig).
:param str date: De gegeven datum in YYYY-MM-DD format.
:param int leeftijd: De leeftijd in jaren.
:rtype: int
"""
date = datetime.date(int(date[:4]) - leeftijd, int(date[5:7]), int(date[8:10]))
date -= datetime.timedelta(days=1)
return date.isoformat()
# ------------------------------------------------------------------------------------------------------------------
def __shred_xml_bestand(self, filename: str) -> Dict:
"""
Leest de relevante data in een XML-bestand met een test case.
:param str filename: De filenaam van het XML bestand.
:rtype: dict
"""
doc = etree.parse(filename)
xpath = '/soapenv:Envelope/soapenv:Body/xmlns:FICR_IN900101NL04'
namespaces = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'xmlns': 'urn:hl7-org:v3'}
# Lees declaratiecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'id')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
declaratie_code = elements[0].get('extension')
# Lees specialismecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'derivedFrom',
'zorgtraject', 'responsibleParty', 'assignedPerson', 'code')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
specialisme_code = elements[0].get('code')
# Lees diagnosecode.
parts = (
'ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'pertinentInformation1',
'typerendeDiagnose', 'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
diagnose_code = elements[0].get('code')
# Lees zorgtypecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'code')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_type_code = elements[0].get('code') if elements else None
# Lees zorgvraagcode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'derivedFrom',
'zorgtraject', 'reason', 'zorgvraag', 'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_vraag_code = elements[0].get('code') if elements else None
# Lees begindatum.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'effectiveTime', 'low')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
begin_datum = self.__convert_date(elements[0].get('value')) if elements else None
# Lees de geboortedatum van de patient.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'subject', 'patient', 'subjectOf', 'leeftijd',
'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
leeftijd = int(elements[0].get('value')) if elements else None
geboorte_datum = self.__leeftijd_geboorte_datum(begin_datum, leeftijd)
# Lees het geslacht van de patient.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'subject', 'patient', 'patientPerson',
'administrativeGenderCode')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
geslacht_code = elements[0].get('code') if elements else None
# Lees de AGB-code van de zorginstelling.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'author', 'assignedOrganization', 'id')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_instelling_code = elements[0].get('extension') if elements else None
# Lees alle zorgactiviteiten.
zorg_activiteiten = list()
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'debit',
'zorgactiviteit')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
for element in elements:
path = 'xmlns:code'
sub_elements = element.xpath(path, namespaces=namespaces)
zorg_activiteit_code = sub_elements[0].get('code') if sub_elements else None
path = 'xmlns:repeatNumber'
sub_elements = element.xpath(path, namespaces=namespaces)
aantal = int(sub_elements[0].get('value')) if sub_elements else None
zorg_activiteiten.append((zorg_activiteit_code, aantal))
return {'subtraject_nummer': os.path.basename(filename),
'declaratie_code': declaratie_code,
'specialisme_code': specialisme_code,
'diagnose_code': diagnose_code,
'zorg_type_code': zorg_type_code,
'zorg_vraag_code': zorg_vraag_code,
'begin_datum': begin_datum,
'geboorte_datum': geboorte_datum,
'geslacht_code': geslacht_code,
'zorg_instelling_code': zorg_instelling_code,
'zorg_activiteiten': zorg_activiteiten}
# ----------------------------------------------------------------------------------------------------------------------
@staticmethod
def __write_subtraject(writer, subtraject: Dict) -> None:
"""
Schrijft het subtraject met alle zorgactiviteiten naar een CSV-bestand.
:param writer: De handle naar de CSV writer.
:param dict subtraject: De details van het subtract.
"""
writer.writerow((subtraject['subtraject_nummer'],
subtraject['specialisme_code'],
subtraject['diagnose_code'],
subtraject['zorg_type_code'],
subtraject['zorg_vraag_code'],
subtraject['begin_datum'],
subtraject['geboorte_datum'],
subtraject['geslacht_code'],
subtraject['zorg_instelling_code'],
subtraject['declaratie_code']))
for zorgactiviteit in subtraject['zorg_activiteiten']:
writer.writerow((zorgactiviteit[0], zorgactiviteit[1]))
# ----------------------------------------------------------------------------------------------------------------------
def __extract_files(self, writer, filenames: List) -> None:
"""
Extract de data van een lijst met XML-bestanden met test cases en schrijft deze data naar een CSV-bestand.
:param writer: De handle naar de CSV writer.
:param list filenames: De lijst met bestandsnamen van XML-bestanden met test cases.
"""
for filename in filenames:
subtraject = self.__shred_xml_bestand(filename)
self.__write_subtraject(writer, subtraject)
# ------------------------------------------------------------------------------------------------------------------
def handle(self) -> int:
"""
Executes the command.
"""
self.output = KerapuStyle(self.input, self.output)
zip_filename = self.argument('testset-zip')
csv_filename = self.argument('testset-csv')
tmp_dir = '.kerapu-' + ''.join(random.choices(string.ascii_lowercase, k=12))
os.mkdir(tmp_dir)
self.__extract_zip_file(zip_filename, tmp_dir)
files = self.__lees_test_cases_lijst(tmp_dir)
with open(csv_filename, 'w', encoding='utf-8') as handle:
csv_writer = csv.writer(handle, dialect=csv.unix_dialect)
self.__extract_files(csv_writer, files)
shutil.rmtree(tmp_dir)
return 0
# ----------------------------------------------------------------------------------------------------------------------
|
mit
| 6,899,941,541,554,615,000
| 42.246269
| 124
| 0.528645
| false
| 3.991047
| true
| false
| false
|
effigies/mne-python
|
examples/export/plot_epochs_to_nitime.py
|
2
|
2043
|
"""
=======================
Export epochs to NiTime
=======================
This script shows how to export Epochs to the NiTime library
for further signal processing and data analysis.
"""
# Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import mne
from mne import io
from mne.datasets import sample
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# Export to NiTime
epochs_ts = epochs.to_nitime(picks=np.arange(20), collapse=True)
###############################################################################
# Now use nitime's OO-interface to compute coherence between sensors
from nitime.analysis import MTCoherenceAnalyzer
from nitime.viz import drawmatrix_channels
import matplotlib.pyplot as plt
# setup coherency analyzer
C = MTCoherenceAnalyzer(epochs_ts)
# confine analysis to 10 - 20 Hz
freq_idx = np.where((C.frequencies > 10) * (C.frequencies < 30))[0]
# compute average coherence
coh = np.mean(C.coherence[:, :, freq_idx], -1) # Averaging on last dimension
drawmatrix_channels(coh, epochs.ch_names, color_anchor=0,
title='MEG gradiometer coherence')
plt.show()
|
bsd-3-clause
| -4,635,458,188,188,562,000
| 30.430769
| 79
| 0.625551
| false
| 3.217323
| false
| false
| false
|
Knygar/hwios
|
services/web_ui/models/ws_realm.py
|
1
|
7844
|
'''
Copyright (c) OS-Networks, http://os-networks.net
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the HWIOS Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
import os
import re
import uuid
import random
from twisted.internet import reactor, defer
from twisted.python import failure, log
from django.contrib.sessions.models import Session
from hwios.core.application import HWIOS
from web_ui.models.signal import Signal, SignalPool
from web_ui.models.hwm_queue import HWM_Queue
import web_ui.urls as urls
from web_ui.models.statics import *
from web_ui.models.profiles import Profile
from web_ui.models.client import Client
class WebSocketDispatcher(object):
#each websocket controller now has access to the signal pool
signals = SignalPool()
compiled_ws_patterns = []
valid_routes = {}
def __init__(self):
'''
Initialize all modules that are specified in urls.py
'''
self.pool = WebSocketPool(self.signals)
for pattern in urls.ws_patterns:
p = re.compile(pattern[0])
module = __import__(pattern[1], globals(), locals(), [pattern[2]],-1)
self.compiled_ws_patterns.append((p,module,pattern[2],pattern[3]))
for pattern in self.compiled_ws_patterns:
if pattern[2] not in self.valid_routes:
self.valid_routes[pattern[2]] ={'instance': getattr(pattern[1],pattern[2])(self),'methods':[]}
self.valid_routes[pattern[2]]['methods'].append(pattern[3])
def match(self, url):
'''Compare the url with a list of compiled regex patterns
@return Tuple
'''
for pattern in self.compiled_ws_patterns:
rp = pattern[0].match(url)
if rp != None:
return (pattern[2],pattern[3], rp.groupdict())
return None
def route(self, target):
'''Routes clientside HWM urls to the apprioate HWIOS function handler'''
cls, method, get_params = self.match(target)
if cls in self.valid_routes:
instance = self.valid_routes[cls]['instance']
if hasattr(instance, method):
return [instance, method, get_params]
else:
return None
return None
class WebSocketPool(object):
clients = []
subscription = {}
def __init__(self, signals):
self.signals = signals
self.userlist = []
#register signals
self.signals.append(Signal('view_changed'))
self.signals.append(Signal('ws_connect'))
self.signals.append(Signal('ws_disconnect'))
def name_taken(self,name):
for _client in self.clients:
print type(_client)
print _client.profile
if _client.profile != None:
if _client.profile.username == name:
return True
else:
return False
return False
def get_userlist(self):
userlist = []
for _client in self.clients:
print 'CLIENT:%s' % _client.profile.username
if hasattr(_client,'transport'):
userlist.append(_client.profile.username)
return userlist
def rm_subscription(self, client):
"""
When a client disconnects, remove all subscription references that may be left
"""
for area in self.subscription:
for cid in self.subscription[area]:
for _client in self.subscription[area][cid]['clients']:
if _client.profile.uuid == client.profile.uuid:
self.subscription[area][cid]['clients'].remove(_client)
def add_client(self, transport):
"""
After bootstrapping the client, the websocket connector needs to be added to the client....
"""
new_client = Client(transport.profile, transport.session, 'nl')
new_client.transport = transport
HWIOS.ws_realm.pool.clients.append(new_client)
log.msg('%s WS/76/HRM' % ('New client added...'),system='%s,IN' % transport.getPeer().host)
#self.clients.append(transport)
self.signals.send('ws_connect', client = new_client)
userlist = self.get_userlist()
for _client in self.clients:
#only send online update notification to already connected clients. New client will make it's own request
if _client.transport != new_client:
_client.remote('/data/modules/messenger/online/update/',{'online':userlist})
return new_client
def rm_client(self, transport):
"""
Remove a client from our clientlist, when the socket connection closes.
"""
self.signals.send('ws_disconnect', client = transport)
try:
for _client in self.clients:
if _client.transport == transport:
#_client.transport = None
self.clients.remove(_client)
self.rm_subscription(_client)
except ValueError: pass
userlist = self.get_userlist()
for _client in self.clients:
_client.remote('/data/modules/messenger/online/update/',{'online':userlist})
def get_clients(self):
return self.clients
def get_client(self, profile_uuid):
for _client in self.clients:
if _client.profile.uuid == profile_uuid:
return _client
return False
def get_anonymous_profile(self, session_id = None, ip = None):
profile = Profile()
profile.is_authenticated = False
while True:
pk = random.randrange(0, 10001, 2)
username = 'anonymous_%s' % pk
if not HWIOS.ws_realm.pool.name_taken(username):
profile.username = username
profile.pk = pk
if session_id != None:
profile.uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(session_id))
elif ip != None:
profile.uuid = uuid.uuid5(uuid.NAMESPACE_DNS, ip)
break
return profile
class WSRealm(object):
def __init__(self):
self.dispatcher = WebSocketDispatcher()
self.pool = self.dispatcher.pool
self.queue = HWM_Queue()
self._t = ws_table
|
bsd-3-clause
| -7,726,291,159,420,705,000
| 37.455882
| 117
| 0.610785
| false
| 4.508046
| false
| false
| false
|
swannapa/erpnext
|
erpnext/accounts/doctype/sales_invoice/sales_invoice.py
|
1
|
35544
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe.utils import cint, flt
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.accounts.doctype.sales_invoice.pos import update_multi_mode_option
from erpnext.controllers.selling_controller import SellingController
from erpnext.accounts.utils import get_account_currency
from erpnext.stock.doctype.delivery_note.delivery_note import update_billed_amount_based_on_so
from erpnext.projects.doctype.timesheet.timesheet import get_projectwise_timesheet_data
from erpnext.accounts.doctype.asset.depreciation \
import get_disposal_account_and_cost_center, get_gl_entries_on_asset_disposal
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos, get_delivery_note_serial_no
from erpnext.setup.doctype.company.company import update_company_current_month_sales
from erpnext.accounts.general_ledger import get_round_off_account_and_cost_center
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class SalesInvoice(SellingController):
print "test"
def __init__(self, *args, **kwargs):
super(SalesInvoice, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Sales Invoice Item',
'target_field': 'billed_amt',
'target_ref_field': 'amount',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_billed',
'source_field': 'amount',
'join_field': 'so_detail',
'percent_join_field': 'sales_order',
'status_field': 'billing_status',
'keyword': 'Billed',
'overflow_type': 'billing'
}]
def set_indicator(self):
"""Set indicator for portal"""
if self.outstanding_amount > 0:
self.indicator_color = "orange"
self.indicator_title = _("Unpaid")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def validate(self):
super(SalesInvoice, self).validate()
self.validate_auto_set_posting_time()
if not self.is_pos:
self.so_dn_required()
self.validate_proj_cust()
self.validate_with_previous_doc()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.check_close_sales_order("sales_order")
self.validate_debit_to_acc()
self.clear_unallocated_advances("Sales Invoice Advance", "advances")
self.add_remarks()
self.validate_write_off_account()
self.validate_account_for_change_amount()
self.validate_fixed_asset()
self.set_income_account_for_fixed_assets()
if cint(self.is_pos):
self.validate_pos()
if cint(self.update_stock):
self.validate_dropship_item()
self.validate_item_code()
self.validate_warehouse()
self.update_current_stock()
self.validate_delivery_note()
if not self.is_opening:
self.is_opening = 'No'
if self._action != 'submit' and self.update_stock and not self.is_return:
set_batch_nos(self, 'warehouse', True)
self.set_against_income_account()
self.validate_c_form()
self.validate_time_sheets_are_submitted()
self.validate_multiple_billing("Delivery Note", "dn_detail", "amount", "items")
if not self.is_return:
self.validate_serial_numbers()
self.update_packing_list()
self.set_billing_hours_and_amount()
self.update_timesheet_billing_for_project()
self.set_status()
def before_save(self):
set_account_for_mode_of_payment(self)
def on_submit(self):
self.validate_pos_paid_amount()
if not self.subscription:
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total, self)
self.check_prev_docstatus()
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
self.clear_unallocated_mode_of_payments()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
if self.update_stock == 1:
self.update_stock_ledger()
# this sequence because outstanding may get -ve
self.make_gl_entries()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.check_credit_limit()
self.update_serial_no()
if not cint(self.is_pos) == 1 and not self.is_return:
self.update_against_document_in_jv()
self.update_time_sheet(self.name)
self.update_current_month_sales()
def validate_pos_paid_amount(self):
if len(self.payments) == 0 and self.is_pos:
frappe.throw(_("At least one mode of payment is required for POS invoice."))
def before_cancel(self):
self.update_time_sheet(None)
def on_cancel(self):
self.check_close_sales_order("sales_order")
from erpnext.accounts.utils import unlink_ref_doc_from_payment_entries
if frappe.db.get_single_value('Accounts Settings', 'unlink_payment_on_cancellation_of_invoice'):
unlink_ref_doc_from_payment_entries(self)
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.update_serial_no(in_cancel=True)
self.validate_c_form_on_cancel()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
if self.update_stock == 1:
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
frappe.db.set(self, 'status', 'Cancelled')
self.update_current_month_sales()
def update_current_month_sales(self):
if frappe.flags.in_test:
update_company_current_month_sales(self.company)
else:
frappe.enqueue('erpnext.setup.doctype.company.company.update_company_current_month_sales',
company=self.company)
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.extend([{
'source_dt':'Sales Invoice Item',
'target_dt':'Sales Order Item',
'target_parent_dt':'Sales Order',
'target_parent_field':'per_delivered',
'target_field':'delivered_qty',
'target_ref_field':'qty',
'source_field':'qty',
'join_field':'so_detail',
'percent_join_field':'sales_order',
'status_field':'delivery_status',
'keyword':'Delivered',
'second_source_dt': 'Delivery Note Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Sales Invoice Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
# 'target_parent_field': 'per_delivered',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'percent_join_field': 'sales_order',
# 'overflow_type': 'delivery',
'extra_cond': """ and exists (select name from `tabSales Invoice` where name=`tabSales Invoice Item`.parent and update_stock=1 and is_return=1)"""
}
])
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
validate_against_credit_limit = False
for d in self.get("items"):
if not (d.sales_order or d.delivery_note):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company)
def set_missing_values(self, for_validate=False):
pos = self.set_pos_fields(for_validate)
if not self.debit_to:
self.debit_to = get_party_account("Customer", self.customer, self.company)
if not self.due_date and self.customer:
self.due_date = get_due_date(self.posting_date, "Customer", self.customer, self.company)
super(SalesInvoice, self).set_missing_values(for_validate)
if pos:
return {"print_format": pos.get("print_format") }
def update_time_sheet(self, sales_invoice):
for d in self.timesheets:
if d.time_sheet:
timesheet = frappe.get_doc("Timesheet", d.time_sheet)
self.update_time_sheet_detail(timesheet, d, sales_invoice)
timesheet.calculate_total_amounts()
timesheet.calculate_percentage_billed()
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def update_time_sheet_detail(self, timesheet, args, sales_invoice):
for data in timesheet.time_logs:
if (self.project and args.timesheet_detail == data.name) or \
(not self.project and not data.sales_invoice) or \
(not sales_invoice and data.sales_invoice == self.name):
data.sales_invoice = sales_invoice
def on_update(self):
self.set_paid_amount()
def set_paid_amount(self):
paid_amount = 0.0
base_paid_amount = 0.0
for data in self.payments:
data.base_amount = flt(data.amount*self.conversion_rate, self.precision("base_paid_amount"))
paid_amount += data.amount
base_paid_amount += data.base_amount
self.paid_amount = paid_amount
self.base_paid_amount = base_paid_amount
def validate_time_sheets_are_submitted(self):
for data in self.timesheets:
if data.time_sheet:
status = frappe.db.get_value("Timesheet", data.time_sheet, "status")
if status not in ['Submitted', 'Payslip']:
frappe.throw(_("Timesheet {0} is already completed or cancelled").format(data.time_sheet))
def set_pos_fields(self, for_validate=False):
"""Set retail related fields from POS Profiles"""
if cint(self.is_pos) != 1:
return
from erpnext.stock.get_item_details import get_pos_profile_item_details, get_pos_profile
pos = get_pos_profile(self.company)
if not self.get('payments') and not for_validate:
pos_profile = frappe.get_doc('POS Profile', pos.name) if pos else None
update_multi_mode_option(self, pos_profile)
if not self.account_for_change_amount:
self.account_for_change_amount = frappe.db.get_value('Company', self.company, 'default_cash_account')
if pos:
if not for_validate and not self.customer:
self.customer = pos.customer
self.mode_of_payment = pos.mode_of_payment
# self.set_customer_defaults()
if pos.get('account_for_change_amount'):
self.account_for_change_amount = pos.get('account_for_change_amount')
for fieldname in ('territory', 'naming_series', 'currency', 'taxes_and_charges', 'letter_head', 'tc_name',
'selling_price_list', 'company', 'select_print_heading', 'cash_bank_account',
'write_off_account', 'write_off_cost_center', 'apply_discount_on'):
if (not for_validate) or (for_validate and not self.get(fieldname)):
self.set(fieldname, pos.get(fieldname))
if not for_validate:
self.update_stock = cint(pos.get("update_stock"))
# set pos values in items
for item in self.get("items"):
if item.get('item_code'):
for fname, val in get_pos_profile_item_details(pos,
frappe._dict(item.as_dict()), pos).items():
if (not for_validate) or (for_validate and not item.get(fname)):
item.set(fname, val)
# fetch terms
if self.tc_name and not self.terms:
self.terms = frappe.db.get_value("Terms and Conditions", self.tc_name, "terms")
# fetch charges
if self.taxes_and_charges and not len(self.get("taxes")):
self.set_taxes()
return pos
def get_company_abbr(self):
return frappe.db.sql("select abbr from tabCompany where name=%s", self.company)[0][0]
def validate_debit_to_acc(self):
account = frappe.db.get_value("Account", self.debit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if not account:
frappe.throw(_("Debit To is required"))
if account.report_type != "Balance Sheet":
frappe.throw(_("Debit To account must be a Balance Sheet account"))
if self.customer and account.account_type != "Receivable":
frappe.throw(_("Debit To account must be a Receivable account"))
self.party_account_currency = account.account_currency
def clear_unallocated_mode_of_payments(self):
self.set("payments", self.get("payments", {"amount": ["not in", [0, None, ""]]}))
frappe.db.sql("""delete from `tabSales Invoice Payment` where parent = %s
and amount = 0""", self.name)
def validate_with_previous_doc(self):
super(SalesInvoice, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Delivery Note": {
"ref_dn_field": "delivery_note",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Delivery Note Item": {
"ref_dn_field": "dn_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Sales Order", "sales_order", "so_detail"],
["Delivery Note", "delivery_note", "dn_detail"]
])
def set_against_income_account(self):
"""Set against account for debit to account"""
against_acc = []
for d in self.get('items'):
if d.income_account not in against_acc:
against_acc.append(d.income_account)
self.against_income_account = ','.join(against_acc)
def add_remarks(self):
if not self.remarks: self.remarks = 'No Remarks'
def validate_auto_set_posting_time(self):
# Don't auto set the posting date and time if invoice is amended
if self.is_new() and self.amended_from:
self.set_posting_time = 1
self.validate_posting_time()
def so_dn_required(self):
"""check in manage account if sales order / delivery note required or not."""
dic = {'Sales Order':['so_required', 'is_pos'],'Delivery Note':['dn_required', 'update_stock']}
for i in dic:
if frappe.db.get_value('Selling Settings', None, dic[i][0]) == 'Yes':
for d in self.get('items'):
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') == 1 \
and not d.get(i.lower().replace(' ','_')) and not self.get(dic[i][1]):
msgprint(_("{0} is mandatory for Item {1}").format(i,d.item_code), raise_exception=1)
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or customer is null or customer = '')""",
(self.project, self.customer))
if not res:
throw(_("Customer {0} does not belong to project {1}").format(self.customer,self.project))
def validate_pos(self):
if self.is_return:
if flt(self.paid_amount) + flt(self.write_off_amount) - flt(self.grand_total) < \
1/(10**(self.precision("grand_total") + 1)):
frappe.throw(_("Paid amount + Write Off Amount can not be greater than Grand Total"))
def validate_item_code(self):
for d in self.get('items'):
if not d.item_code:
msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def validate_warehouse(self):
super(SalesInvoice, self).validate_warehouse()
for d in self.get_item_list():
if not d.warehouse and frappe.db.get_value("Item", d.item_code, "is_stock_item"):
frappe.throw(_("Warehouse required for stock Item {0}").format(d.item_code))
def validate_delivery_note(self):
for d in self.get("items"):
if d.delivery_note:
msgprint(_("Stock cannot be updated against Delivery Note {0}").format(d.delivery_note), raise_exception=1)
def validate_write_off_account(self):
if flt(self.write_off_amount) and not self.write_off_account:
self.write_off_account = frappe.db.get_value('Company', self.company, 'write_off_account')
if flt(self.write_off_amount) and not self.write_off_account:
msgprint(_("Please enter Write Off Account"), raise_exception=1)
def validate_account_for_change_amount(self):
if flt(self.change_amount) and not self.account_for_change_amount:
msgprint(_("Please enter Account for Change Amount"), raise_exception=1)
def validate_c_form(self):
""" Blank C-form no if C-form applicable marked as 'No'"""
if self.amended_from and self.c_form_applicable == 'No' and self.c_form_no:
frappe.db.sql("""delete from `tabC-Form Invoice Detail` where invoice_no = %s
and parent = %s""", (self.amended_from, self.c_form_no))
frappe.db.set(self, 'c_form_no', '')
def validate_c_form_on_cancel(self):
""" Display message if C-Form no exists on cancellation of Sales Invoice"""
if self.c_form_applicable == 'Yes' and self.c_form_no:
msgprint(_("Please remove this Invoice {0} from C-Form {1}")
.format(self.name, self.c_form_no), raise_exception = 1)
def validate_dropship_item(self):
for item in self.items:
if item.sales_order:
if frappe.db.get_value("Sales Order Item", item.so_detail, "delivered_by_supplier"):
frappe.throw(_("Could not update stock, invoice contains drop shipping item."))
def update_current_stock(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in self.get('packed_items'):
bin = frappe.db.sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def update_packing_list(self):
if cint(self.update_stock) == 1:
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
else:
self.set('packed_items', [])
def set_billing_hours_and_amount(self):
if not self.project:
for timesheet in self.timesheets:
ts_doc = frappe.get_doc('Timesheet', timesheet.time_sheet)
if not timesheet.billing_hours and ts_doc.total_billable_hours:
timesheet.billing_hours = ts_doc.total_billable_hours
if not timesheet.billing_amount and ts_doc.total_billable_amount:
timesheet.billing_amount = ts_doc.total_billable_amount
def update_timesheet_billing_for_project(self):
if not self.timesheets and self.project:
self.add_timesheet_data()
else:
self.calculate_billing_amount_for_timesheet()
def add_timesheet_data(self):
self.set('timesheets', [])
if self.project:
for data in get_projectwise_timesheet_data(self.project):
self.append('timesheets', {
'time_sheet': data.parent,
'billing_hours': data.billing_hours,
'billing_amount': data.billing_amt,
'timesheet_detail': data.name
})
self.calculate_billing_amount_for_timesheet()
def calculate_billing_amount_for_timesheet(self):
total_billing_amount = 0.0
for data in self.timesheets:
if data.billing_amount:
total_billing_amount += data.billing_amount
self.total_billing_amount = total_billing_amount
def get_warehouse(self):
user_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where ifnull(user,'') = %s and company = %s""", (frappe.session['user'], self.company))
warehouse = user_pos_profile[0][1] if user_pos_profile else None
if not warehouse:
global_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where (user is null or user = '') and company = %s""", self.company)
if global_pos_profile:
warehouse = global_pos_profile[0][1]
elif not user_pos_profile:
msgprint(_("POS Profile required to make POS Entry"), raise_exception=True)
return warehouse
def set_income_account_for_fixed_assets(self):
disposal_account = depreciation_cost_center = None
for d in self.get("items"):
if d.is_fixed_asset:
if not disposal_account:
disposal_account, depreciation_cost_center = get_disposal_account_and_cost_center(self.company)
d.income_account = disposal_account
if not d.cost_center:
d.cost_center = depreciation_cost_center
def check_prev_docstatus(self):
for d in self.get('items'):
if d.sales_order and frappe.db.get_value("Sales Order", d.sales_order, "docstatus") != 1:
frappe.throw(_("Sales Order {0} is not submitted").format(d.sales_order))
if d.delivery_note and frappe.db.get_value("Delivery Note", d.delivery_note, "docstatus") != 1:
throw(_("Delivery Note {0} is not submitted").format(d.delivery_note))
def make_gl_entries(self, gl_entries=None, repost_future_gle=True, from_repost=False):
auto_accounting_for_stock = erpnext.is_perpetual_inventory_enabled(self.company)
if not self.grand_total:
return
if not gl_entries:
gl_entries = self.get_gl_entries()
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
# if POS and amount is written off, updating outstanding amt after posting all gl entries
update_outstanding = "No" if (cint(self.is_pos) or self.write_off_account) else "Yes"
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
update_outstanding_amt(self.debit_to, "Customer", self.customer,
self.doctype, self.return_against if cint(self.is_return) else self.name)
if repost_future_gle and cint(self.update_stock) \
and cint(auto_accounting_for_stock):
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items)
elif self.docstatus == 2 and cint(self.update_stock) \
and cint(auto_accounting_for_stock):
from erpnext.accounts.general_ledger import delete_gl_entries
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import merge_similar_entries
gl_entries = []
self.make_customer_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_item_gl_entries(gl_entries)
# merge gl entries before adding pos entries
gl_entries = merge_similar_entries(gl_entries)
self.make_pos_gl_entries(gl_entries)
self.make_gle_for_change_amount(gl_entries)
self.make_write_off_gl_entry(gl_entries)
self.make_gle_for_rounding_adjustment(gl_entries)
return gl_entries
def make_customer_gl_entry(self, gl_entries):
if self.grand_total:
# Didnot use base_grand_total to book rounding loss gle
grand_total_in_company_currency = flt(self.grand_total * self.conversion_rate,
self.precision("grand_total"))
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.against_income_account,
"debit": grand_total_in_company_currency,
"debit_in_account_currency": grand_total_in_company_currency \
if self.party_account_currency==self.company_currency else self.grand_total,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
def make_tax_gl_entries(self, gl_entries):
for tax in self.get("taxes"):
if flt(tax.base_tax_amount_after_discount_amount):
account_currency = get_account_currency(tax.account_head)
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.customer,
"credit": flt(tax.base_tax_amount_after_discount_amount),
"credit_in_account_currency": flt(tax.base_tax_amount_after_discount_amount) \
if account_currency==self.company_currency else flt(tax.tax_amount_after_discount_amount),
"cost_center": tax.cost_center
}, account_currency)
)
def make_item_gl_entries(self, gl_entries):
# income account gl entries
for item in self.get("items"):
if flt(item.base_net_amount):
if item.is_fixed_asset:
asset = frappe.get_doc("Asset", item.asset)
fixed_asset_gl_entries = get_gl_entries_on_asset_disposal(asset, item.base_net_amount)
for gle in fixed_asset_gl_entries:
gle["against"] = self.customer
gl_entries.append(self.get_gl_dict(gle))
asset.db_set("disposal_date", self.posting_date)
asset.set_status("Sold" if self.docstatus==1 else None)
else:
account_currency = get_account_currency(item.income_account)
gl_entries.append(
self.get_gl_dict({
"account": item.income_account,
"against": self.customer,
"credit": item.base_net_amount,
"credit_in_account_currency": item.base_net_amount \
if account_currency==self.company_currency else item.net_amount,
"cost_center": item.cost_center
}, account_currency)
)
# expense account gl entries
if cint(self.update_stock) and \
erpnext.is_perpetual_inventory_enabled(self.company):
gl_entries += super(SalesInvoice, self).get_gl_entries()
def make_pos_gl_entries(self, gl_entries):
if cint(self.is_pos):
for payment_mode in self.payments:
if payment_mode.amount:
# POS, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": payment_mode.account,
"credit": payment_mode.base_amount,
"credit_in_account_currency": payment_mode.base_amount \
if self.party_account_currency==self.company_currency \
else payment_mode.amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
payment_mode_account_currency = get_account_currency(payment_mode.account)
gl_entries.append(
self.get_gl_dict({
"account": payment_mode.account,
"against": self.customer,
"debit": payment_mode.base_amount,
"debit_in_account_currency": payment_mode.base_amount \
if payment_mode_account_currency==self.company_currency \
else payment_mode.amount
}, payment_mode_account_currency)
)
def make_gle_for_change_amount(self, gl_entries):
if cint(self.is_pos) and self.change_amount:
if self.account_for_change_amount:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.account_for_change_amount,
"debit": flt(self.base_change_amount),
"debit_in_account_currency": flt(self.base_change_amount) \
if self.party_account_currency==self.company_currency else flt(self.change_amount),
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.account_for_change_amount,
"against": self.customer,
"credit": self.base_change_amount
})
)
else:
frappe.throw(_("Select change amount account"), title="Mandatory Field")
def make_write_off_gl_entry(self, gl_entries):
# write off entries, applicable if only pos
if self.write_off_account and self.write_off_amount:
write_off_account_currency = get_account_currency(self.write_off_account)
default_cost_center = frappe.db.get_value('Company', self.company, 'cost_center')
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.write_off_account,
"credit": self.base_write_off_amount,
"credit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.customer,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.write_off_cost_center or default_cost_center
}, write_off_account_currency)
)
def make_gle_for_rounding_adjustment(self, gl_entries):
if self.rounding_adjustment:
round_off_account, round_off_cost_center = \
get_round_off_account_and_cost_center(self.company)
gl_entries.append(
self.get_gl_dict({
"account": round_off_account,
"against": self.customer,
"credit_in_account_currency": self.rounding_adjustment,
"credit": self.base_rounding_adjustment,
"cost_center": round_off_cost_center,
}
))
def update_billing_status_in_dn(self, update_modified=True):
updated_delivery_notes = []
for d in self.get("items"):
if d.dn_detail:
billed_amt = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where dn_detail=%s and docstatus=1""", d.dn_detail)
billed_amt = billed_amt and billed_amt[0][0] or 0
frappe.db.set_value("Delivery Note Item", d.dn_detail, "billed_amt", billed_amt, update_modified=update_modified)
updated_delivery_notes.append(d.delivery_note)
elif d.so_detail:
updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified)
for dn in set(updated_delivery_notes):
frappe.get_doc("Delivery Note", dn).update_billing_percentage(update_modified=update_modified)
def on_recurring(self, reference_doc, subscription_doc):
for fieldname in ("c_form_applicable", "c_form_no", "write_off_amount"):
self.set(fieldname, reference_doc.get(fieldname))
self.due_date = None
def update_serial_no(self, in_cancel=False):
""" update Sales Invoice refrence in Serial No """
invoice = None if (in_cancel or self.is_return) else self.name
if in_cancel and self.is_return:
invoice = self.return_against
for item in self.items:
if not item.serial_no:
continue
for serial_no in item.serial_no.split("\n"):
if serial_no and frappe.db.exists('Serial No', serial_no):
sno = frappe.get_doc('Serial No', serial_no)
sno.sales_invoice = invoice
sno.db_update()
def validate_serial_numbers(self):
"""
validate serial number agains Delivery Note and Sales Invoice
"""
self.set_serial_no_against_delivery_note()
self.validate_serial_against_delivery_note()
self.validate_serial_against_sales_invoice()
def set_serial_no_against_delivery_note(self):
for item in self.items:
if item.serial_no and item.delivery_note and \
item.qty != len(get_serial_nos(item.serial_no)):
item.serial_no = get_delivery_note_serial_no(item.item_code, item.qty, item.delivery_note)
def validate_serial_against_delivery_note(self):
"""
validate if the serial numbers in Sales Invoice Items are same as in
Delivery Note Item
"""
for item in self.items:
if not item.delivery_note or not item.dn_detail:
continue
serial_nos = frappe.db.get_value("Delivery Note Item", item.dn_detail, "serial_no") or ""
dn_serial_nos = set(get_serial_nos(serial_nos))
serial_nos = item.serial_no or ""
si_serial_nos = set(get_serial_nos(serial_nos))
if si_serial_nos - dn_serial_nos:
frappe.throw(_("Serial Numbers in row {0} does not match with Delivery Note".format(item.idx)))
if item.serial_no and cint(item.qty) != len(si_serial_nos):
frappe.throw(_("Row {0}: {1} Serial numbers required for Item {2}. You have provided {3}.".format(
item.idx, item.qty, item.item_code, len(si_serial_nos))))
def validate_serial_against_sales_invoice(self):
""" check if serial number is already used in other sales invoice """
for item in self.items:
if not item.serial_no:
continue
for serial_no in item.serial_no.split("\n"):
sales_invoice = frappe.db.get_value("Serial No", serial_no, "sales_invoice")
if sales_invoice and self.name != sales_invoice:
frappe.throw(_("Serial Number: {0} is already referenced in Sales Invoice: {1}".format(
serial_no, sales_invoice
)))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Invoices'),
})
return list_context
@frappe.whitelist()
def get_bank_cash_account(mode_of_payment, company):
account = frappe.db.get_value("Mode of Payment Account",
{"parent": mode_of_payment, "company": company}, "default_account")
if not account:
frappe.throw(_("Please set default Cash or Bank account in Mode of Payment {0}")
.format(mode_of_payment))
return {
"account": account
}
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = flt(source_doc.qty) - flt(source_doc.delivered_qty)
target_doc.stock_qty = target_doc.qty * flt(source_doc.conversion_factor)
target_doc.base_amount = target_doc.qty * flt(source_doc.base_rate)
target_doc.amount = target_doc.qty * flt(source_doc.rate)
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Invoice Item": {
"doctype": "Delivery Note Item",
"field_map": {
"name": "si_detail",
"parent": "against_sales_invoice",
"serial_no": "serial_no",
"sales_order": "against_sales_order",
"so_detail": "so_detail",
"cost_center": "cost_center"
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Sales Invoice", source_name, target_doc)
def set_account_for_mode_of_payment(self):
for data in self.payments:
if not data.account:
data.account = get_bank_cash_account(data.mode_of_payment, self.company).get("account")
|
gpl-3.0
| -4,458,028,978,484,492,300
| 35.605561
| 153
| 0.692044
| false
| 3.099137
| false
| false
| false
|
tavisrudd/eventlet
|
eventlet/convenience.py
|
1
|
4364
|
import sys
from eventlet import greenio
from eventlet import greenthread
from eventlet import greenpool
from eventlet.green import socket
from eventlet.support import greenlets as greenlet
def connect(addr, family=socket.AF_INET, bind=None):
"""Convenience function for opening client sockets.
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param bind: Local address to bind to, optional.
:return: The connected green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if bind is not None:
sock.bind(bind)
sock.connect(addr)
return sock
def listen(addr, family=socket.AF_INET, backlog=50):
"""Convenience function for opening server sockets. This
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
Sets SO_REUSEADDR on the socket to save on annoyance.
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param backlog: The maximum number of queued connections. Should be at least 1; the maximum value is system-dependent.
:return: The listening green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(backlog)
return sock
class StopServe(Exception):
"""Exception class used for quitting :func:`~eventlet.serve` gracefully."""
pass
def _stop_checker(t, server_gt, conn):
try:
try:
t.wait()
finally:
conn.close()
except greenlet.GreenletExit:
pass
except Exception:
greenthread.kill(server_gt, *sys.exc_info())
def serve(sock, handle, concurrency=1000):
"""Runs a server on the supplied socket. Calls the function *handle* in a
separate greenthread for every incoming client connection. *handle* takes
two arguments: the client socket object, and the client address::
def myhandle(client_sock, client_addr):
print "client connected", client_addr
eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
Returning from *handle* closes the client socket.
:func:`serve` blocks the calling greenthread; it won't return until
the server completes. If you desire an immediate return,
spawn a new greenthread for :func:`serve`.
Any uncaught exceptions raised in *handle* are raised as exceptions
from :func:`serve`, terminating the server, so be sure to be aware of the
exceptions your application can raise. The return value of *handle* is
ignored.
Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
server -- that's the only way to get the server() function to return rather
than raise.
The value in *concurrency* controls the maximum number of
greenthreads that will be open at any time handling requests. When
the server hits the concurrency limit, it stops accepting new
connections until the existing ones complete.
"""
pool = greenpool.GreenPool(concurrency)
server_gt = greenthread.getcurrent()
while True:
try:
conn, addr = sock.accept()
gt = pool.spawn(handle, conn, addr)
gt.link(_stop_checker, server_gt, conn)
conn, addr, gt = None, None, None
except StopServe:
return
def wrap_ssl(sock, keyfile=None, certfile=None, server_side=False,
cert_reqs=None, ssl_version=None, ca_certs=None,
do_handshake_on_connect=True, suppress_ragged_eofs=True):
"""Convenience function for converting a regular socket into an SSL
socket. Has the same interface as :func:`ssl.wrap_socket`, but
works on 2.5 or earlier, using PyOpenSSL.
The preferred idiom is to call wrap_ssl directly on the creation
method, e.g., ``wrap_ssl(connect(addr))`` or
``wrap_ssl(listen(addr), server_side=True)``. This way there is
no "naked" socket sitting around to accidentally corrupt the SSL
session.
:return Green SSL object.
"""
pass
|
mit
| 2,061,297,895,791,826,400
| 37.280702
| 122
| 0.683089
| false
| 4.037003
| false
| false
| false
|
zstars/weblabdeusto
|
server/src/weblab/core/coordinator/redis/priority_queue_scheduler.py
|
1
|
33851
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import time
import datetime
import random
import json
from voodoo.log import logged
import voodoo.log as log
from voodoo.typechecker import typecheck
from voodoo.gen import CoordAddress
import voodoo.sessions.session_id as SessionId
from voodoo.override import Override
from weblab.core.coordinator.exc import ExpiredSessionError
from weblab.core.coordinator.scheduler_transactions_synchronizer import SchedulerTransactionsSynchronizer
from weblab.core.coordinator.scheduler import Scheduler
import weblab.core.coordinator.status as WSS
from weblab.core.coordinator.resource import Resource
from weblab.data.experiments import ExperimentInstanceId, ExperimentId
from weblab.core.coordinator.redis.constants import (
WEBLAB_RESOURCE_RESERVATION_PQUEUE,
WEBLAB_RESOURCE_SLOTS,
WEBLAB_RESOURCE_RESERVATIONS,
WEBLAB_RESOURCE_PQUEUE_RESERVATIONS,
WEBLAB_RESOURCE_PQUEUE_POSITIONS,
WEBLAB_RESOURCE_PQUEUE_MAP,
WEBLAB_RESOURCE_PQUEUE_SORTED,
WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS,
LAB_COORD,
CLIENT_INITIAL_DATA,
REQUEST_INFO,
EXPERIMENT_TYPE,
EXPERIMENT_INSTANCE,
START_TIME,
TIME,
INITIALIZATION_IN_ACCOUNTING,
PRIORITY,
TIMESTAMP_BEFORE,
TIMESTAMP_AFTER,
LAB_SESSION_ID,
EXP_INFO,
INITIAL_CONFIGURATION,
RESOURCE_INSTANCE,
ACTIVE_STATUS,
STATUS_RESERVED,
STATUS_WAITING_CONFIRMATION,
)
EXPIRATION_TIME = 3600 # seconds
DEBUG = False
###########################################################
#
# TODO write some documentation
#
def exc_checker(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
if DEBUG:
import traceback
traceback.print_exc()
log.log(
PriorityQueueScheduler, log.level.Error,
"Unexpected exception while running %s" % func.__name__ )
log.log_exc(PriorityQueueScheduler, log.level.Warning)
raise
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
TIME_ANTI_RACE_CONDITIONS = 0.1
class PriorityQueueScheduler(Scheduler):
def __init__(self, generic_scheduler_arguments, randomize_instances = True, **kwargs):
super(PriorityQueueScheduler, self).__init__(generic_scheduler_arguments, **kwargs)
self.randomize_instances = randomize_instances
self._synchronizer = SchedulerTransactionsSynchronizer(self)
self._synchronizer.start()
@Override(Scheduler)
def stop(self):
self._synchronizer.stop()
@Override(Scheduler)
def is_remote(self):
return False
@exc_checker
@logged()
@Override(Scheduler)
@typecheck(typecheck.ANY, typecheck.ANY, Resource)
def removing_current_resource_slot(self, client, resource):
weblab_resource_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (resource.resource_type, resource.resource_instance)
current_reservation_ids = client.smembers(weblab_resource_instance_reservations)
if len(current_reservation_ids) > 0:
current_reservation_id = list(current_reservation_ids)[0]
if client.srem(weblab_resource_instance_reservations, current_reservation_id):
self.reservations_manager.downgrade_confirmation(current_reservation_id)
self.resources_manager.release_resource(resource)
# Remove data that was added when confirmed
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, current_reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
reservation_data = json.loads(reservation_data_str)
reservation_data.pop(ACTIVE_STATUS, None)
reservation_data.pop(TIMESTAMP_BEFORE, None)
reservation_data.pop(TIMESTAMP_AFTER, None)
reservation_data.pop(LAB_SESSION_ID, None)
reservation_data.pop(EXP_INFO, None)
reservation_data_str = json.dumps(reservation_data)
reservation_data = client.set(weblab_reservation_pqueue, reservation_data_str)
# Add back to the queue
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
filled_reservation_id = client.hget(weblab_resource_pqueue_map, current_reservation_id)
client.zadd(weblab_resource_pqueue_sorted, filled_reservation_id, -1)
return True
return False
@exc_checker
@logged()
@Override(Scheduler)
def reserve_experiment(self, reservation_id, experiment_id, time, priority, initialization_in_accounting, client_initial_data, request_info):
"""
priority: the less, the more priority
"""
client = self.redis_maker()
# For indexing purposes
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
weblab_resource_reservations = WEBLAB_RESOURCE_RESERVATIONS % self.resource_type_name
# Queue management
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
weblab_resource_pqueue_positions = WEBLAB_RESOURCE_PQUEUE_POSITIONS % self.resource_type_name
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
# Within the same priority, we want all to sort all the requests by the order they came.
# In order to support this, we increase a long enough value and put it before the reservaiton_id
current_position = client.incr(weblab_resource_pqueue_positions)
filled_reservation_id = "%s_%s" % (str(current_position).zfill(100), reservation_id)
pipeline = client.pipeline()
pipeline.hset(weblab_resource_pqueue_map, reservation_id, filled_reservation_id)
pipeline.zadd(weblab_resource_pqueue_sorted, filled_reservation_id, priority)
pipeline.sadd(weblab_resource_reservations, reservation_id)
pipeline.sadd(weblab_resource_pqueue_reservations, reservation_id)
generic_data = {
TIME : time,
INITIALIZATION_IN_ACCOUNTING : initialization_in_accounting,
PRIORITY : priority,
}
pipeline.set(weblab_reservation_pqueue, json.dumps(generic_data))
pipeline.execute()
return self.get_reservation_status(reservation_id)
#######################################################################
#
# Given a reservation_id, it returns in which state the reservation is
#
@exc_checker
@logged()
@Override(Scheduler)
def get_reservation_status(self, reservation_id):
self._remove_expired_reservations()
expired = self.reservations_manager.update(reservation_id)
if expired:
self._delete_reservation(reservation_id)
raise ExpiredSessionError("Expired reservation")
self._synchronizer.request_and_wait()
reservation_id_with_route = '%s;%s.%s' % (reservation_id, reservation_id, self.core_server_route)
client = self.redis_maker()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
if reservation_data_str is None:
log.log(
PriorityQueueScheduler, log.level.Error,
"get_reservation_status called with a reservation_id that is not registered (not found on weblab_reservation_pqueue). Returning a WaitingInstanceStatus")
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, 50)
reservation_data = json.loads(reservation_data_str)
if ACTIVE_STATUS in reservation_data:
# Reserved or Waiting reservation
status = reservation_data[ACTIVE_STATUS]
# It may be just waiting for the experiment server to respond
if status == STATUS_WAITING_CONFIRMATION:
return WSS.WaitingConfirmationQueueStatus(reservation_id_with_route, self.core_server_url)
# Or the experiment server already responded and therefore we have all this data
str_lab_coord_address = reservation_data[LAB_COORD]
obtained_time = reservation_data[TIME]
initialization_in_accounting = reservation_data[INITIALIZATION_IN_ACCOUNTING]
lab_session_id = reservation_data[LAB_SESSION_ID]
initial_configuration = reservation_data[INITIAL_CONFIGURATION]
timestamp_before_tstamp = reservation_data[TIMESTAMP_BEFORE]
timestamp_after_tstamp = reservation_data[TIMESTAMP_AFTER]
if EXP_INFO in reservation_data and reservation_data[EXP_INFO]:
exp_info = json.loads(reservation_data[EXP_INFO])
else:
exp_info = {}
timestamp_before = datetime.datetime.fromtimestamp(timestamp_before_tstamp)
timestamp_after = datetime.datetime.fromtimestamp(timestamp_after_tstamp)
lab_coord_address = CoordAddress.translate(str_lab_coord_address)
if initialization_in_accounting:
before = timestamp_before_tstamp
else:
before = timestamp_after_tstamp
if before is not None:
remaining = (before + obtained_time) - self.time_provider.get_time()
else:
remaining = obtained_time
return WSS.LocalReservedStatus(reservation_id_with_route, lab_coord_address, SessionId.SessionId(lab_session_id), exp_info, obtained_time, initial_configuration, timestamp_before, timestamp_after, initialization_in_accounting, remaining, self.core_server_url)
# else it's waiting
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
filled_reservation_id = client.hget(weblab_resource_pqueue_map, reservation_id)
if filled_reservation_id is None:
log.log(
PriorityQueueScheduler, log.level.Error,
"get_reservation_status called with a reservation_id that is not registered (not found on the reservations map). Returning a WaitingInstanceStatus")
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, 50)
position = client.zrank(weblab_resource_pqueue_sorted, filled_reservation_id)
if position is None: # It's not in the queue now
time.sleep(TIME_ANTI_RACE_CONDITIONS * random.random())
return self.get_reservation_status(reservation_id)
if self.resources_manager.are_resource_instances_working(self.resource_type_name):
return WSS.WaitingQueueStatus(reservation_id_with_route, position)
else:
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, position)
################################################################
#
# Called when it is confirmed by the Laboratory Server.
#
@exc_checker
@logged()
@Override(Scheduler)
def confirm_experiment(self, reservation_id, lab_session_id, initial_configuration, exp_info):
self._remove_expired_reservations()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
client = self.redis_maker()
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
return
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
resource_instance_str = pqueue_reservation_data.get(RESOURCE_INSTANCE)
if resource_instance_str is not None:
resource_instance = Resource.parse(resource_instance_str)
if not self.resources_manager.check_working(resource_instance):
# TODO: if the experiment is broken and the student is ACTIVE_STATUS, something should be done
#
return
pqueue_reservation_data[LAB_SESSION_ID] = lab_session_id.id
pqueue_reservation_data[INITIAL_CONFIGURATION] = initial_configuration
pqueue_reservation_data[TIMESTAMP_AFTER] = self.time_provider.get_time()
pqueue_reservation_data[ACTIVE_STATUS] = STATUS_RESERVED
pqueue_reservation_data[EXP_INFO] = json.dumps(exp_info)
pqueue_reservation_data_str = json.dumps(pqueue_reservation_data)
client.set(weblab_reservation_pqueue, pqueue_reservation_data_str)
################################################################
#
# Called when the user disconnects or finishes the resource.
#
@exc_checker
@logged()
@Override(Scheduler)
def finish_reservation(self, reservation_id):
self._remove_expired_reservations()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
client = self.redis_maker()
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
return
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
if ACTIVE_STATUS in pqueue_reservation_data:
enqueue_free_experiment_args = self._clean_current_reservation(reservation_id)
else:
enqueue_free_experiment_args = None
self._delete_reservation(reservation_id)
if enqueue_free_experiment_args is not None:
self.confirmer.enqueue_free_experiment(*enqueue_free_experiment_args)
def _clean_current_reservation(self, reservation_id):
client = self.redis_maker()
enqueue_free_experiment_args = None
if reservation_id is not None:
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
if reservation_data_str is not None:
downgraded = self.reservations_manager.downgrade_confirmation(reservation_id)
if downgraded:
reservation_data = json.loads(reservation_data_str)
resource_instance_str = reservation_data.get(RESOURCE_INSTANCE)
if resource_instance_str is not None:
resource_instance = Resource.parse(resource_instance_str)
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (resource_instance.resource_type, resource_instance.resource_instance)
client.srem(weblab_resource_pqueue_instance_reservations, reservation_id)
# print "RELEASING AT _clean_current_reservation. SHOULD NEVER HAPPEN."
# self.resources_manager.release_resource(resource_instance)
lab_session_id = reservation_data.get(LAB_SESSION_ID)
experiment_instance_str = reservation_data.get(EXPERIMENT_INSTANCE)
experiment_instance_id = ExperimentInstanceId.parse(experiment_instance_str)
if experiment_instance_id is not None:
# If the experiment instance doesn't exist, there is no need to call the free_experiment method
lab_coord_address = reservation_data.get(LAB_COORD)
enqueue_free_experiment_args = (lab_coord_address, reservation_id, lab_session_id, experiment_instance_id)
# otherwise the student has been removed
return enqueue_free_experiment_args
def update(self):
self._update_queues()
#############################################################
#
# Take the queue of a given Resource Type and update it
#
@exc_checker
def _update_queues(self):
###########################################################
# There are reasons why a waiting reservation may not be
# able to be promoted while the next one is. For instance,
# if a user is waiting for "pld boards", but only for
# instances of "pld boards" which have a "ud-binary@Binary
# experiments" server running. If only a "ud-pld@PLD
# Experiments" is available, then this user will not be
# promoted and the another user which is waiting for a
# "ud-pld@PLD Experiments" can be promoted.
#
# Therefore, we have a list of the IDs of the waiting
# reservations we previously thought that they couldn't be
# promoted in this iteration. They will have another
# chance in the next run of _update_queues.
#
previously_waiting_reservation_ids = []
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
weblab_resource_slots = WEBLAB_RESOURCE_SLOTS % self.resource_type_name
###########################################################
# While there are free instances and waiting reservations,
# take the first waiting reservation and set it to current
# reservation. Make this repeatedly because we want to
# commit each change
#
while True:
client = self.redis_maker()
filled_waiting_reservation_ids = client.zrangebyscore(weblab_resource_pqueue_sorted, -10000, +10000, start=0, num=len(previously_waiting_reservation_ids) + 1)
first_waiting_reservation_id = None
for filled_waiting_reservation_id in filled_waiting_reservation_ids:
waiting_reservation_id = filled_waiting_reservation_id[filled_waiting_reservation_id.find('_')+1:]
if waiting_reservation_id not in previously_waiting_reservation_ids:
first_waiting_reservation_id = waiting_reservation_id
break
if first_waiting_reservation_id is None:
return # There is no waiting reservation for this resource that we haven't already tried
previously_waiting_reservation_ids.append(first_waiting_reservation_id)
#
# For the current resource_type, let's ask for
# all the resource instances available (i.e. those
# who are a member on weblab:resource:%s:slots )
#
free_instances = [ Resource(self.resource_type_name, resource_instance)
for resource_instance in client.smembers(weblab_resource_slots) ]
if len(free_instances) == 0:
# If there is no free instance, just return
return
#
# Select the correct free_instance for the current student among
# all the free_instances
#
if self.randomize_instances:
randomized_free_instances = [ free_instance for free_instance in free_instances ]
random.shuffle(randomized_free_instances)
else:
randomized_free_instances = sorted(free_instances, cmp=lambda r1, r2: cmp(r1.resource_type, r2.resource_type) or cmp(r1.resource_instance, r2.resource_instance))
for free_instance in randomized_free_instances:
#
# IMPORTANT: from here on every "continue" should first revoke the
# reservations_manager and resources_manager confirmations
#
working = self.resources_manager.check_working(free_instance)
if not working:
# The instance is not working
continue
confirmed = self.reservations_manager.confirm(first_waiting_reservation_id)
if not confirmed:
# student has already been confirmed somewhere else, so don't try with other
# instances, but rather with other student
break
acquired = self.resources_manager.acquire_resource(free_instance)
# print "ACQUIRED", free_instance, acquired, time.time()
if not acquired:
# the instance has been acquired by someone else. unconfirm student and
# try again with other free_instance
self.reservations_manager.downgrade_confirmation(first_waiting_reservation_id)
continue
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, free_instance.resource_instance)
client.sadd(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, first_waiting_reservation_id)
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
reservation_data = self.reservations_manager.get_reservation_data(first_waiting_reservation_id)
if pqueue_reservation_data_str is None or reservation_data is None:
# the student is not here anymore; downgrading confirmation is not required
# but releasing the resource is; and skip the rest of the free instances
self.resources_manager.release_resource(free_instance)
client.srem(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
break
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
start_time = self.time_provider.get_time()
total_time = pqueue_reservation_data[TIME]
pqueue_reservation_data[START_TIME] = start_time
pqueue_reservation_data[TIMESTAMP_BEFORE] = start_time
pqueue_reservation_data[ACTIVE_STATUS] = STATUS_WAITING_CONFIRMATION
pqueue_reservation_data[RESOURCE_INSTANCE] = free_instance.to_weblab_str()
initialization_in_accounting = pqueue_reservation_data[INITIALIZATION_IN_ACCOUNTING]
client_initial_data = reservation_data[CLIENT_INITIAL_DATA]
request_info = json.loads(reservation_data[REQUEST_INFO])
username = request_info.get('username')
locale = request_info.get('locale')
requested_experiment_type = ExperimentId.parse(reservation_data[EXPERIMENT_TYPE])
selected_experiment_instance = None
experiment_instances = self.resources_manager.list_experiment_instance_ids_by_resource(free_instance)
for experiment_instance in experiment_instances:
if experiment_instance.to_experiment_id() == requested_experiment_type:
selected_experiment_instance = experiment_instance
if selected_experiment_instance is None:
# This resource is not valid for this user, other free_instance should be
# selected. Try with other, but first clean the acquired resources
self.reservations_manager.downgrade_confirmation(first_waiting_reservation_id)
self.resources_manager.release_resource(free_instance)
client.srem(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
continue
pqueue_reservation_data[EXPERIMENT_INSTANCE] = selected_experiment_instance.to_weblab_str()
laboratory_coord_address = self.resources_manager.get_laboratory_coordaddress_by_experiment_instance_id(selected_experiment_instance)
pqueue_reservation_data[LAB_COORD] = laboratory_coord_address
client.set(weblab_reservation_pqueue, json.dumps(pqueue_reservation_data))
filled_reservation_id = client.hget(weblab_resource_pqueue_map, first_waiting_reservation_id)
client.zrem(weblab_resource_pqueue_sorted, filled_reservation_id)
#
# Enqueue the confirmation, since it might take a long time
# (for instance, if the laboratory server does not reply because
# of any network problem, or it just takes too much in replying),
# so this method might take too long. That's why we enqueue these
# petitions and run them in other threads.
#
deserialized_server_initial_data = {
'priority.queue.slot.length' : '%s' % total_time,
'priority.queue.slot.start' : '%s' % datetime.datetime.fromtimestamp(start_time),
'priority.queue.slot.initialization_in_accounting' : initialization_in_accounting,
'request.experiment_id.experiment_name' : selected_experiment_instance.exp_name,
'request.experiment_id.category_name' : selected_experiment_instance.cat_name,
'request.username' : username,
'request.full_name' : username,
'request.locale' : locale,
# TODO: add the username and user full name here
}
server_initial_data = json.dumps(deserialized_server_initial_data)
# server_initial_data will contain information such as "what was the last experiment used?".
# If a single resource was used by a binary experiment, then the next time may not require reprogramming the device
self.confirmer.enqueue_confirmation(laboratory_coord_address, first_waiting_reservation_id, selected_experiment_instance, client_initial_data, server_initial_data, self.resource_type_name)
#
# After it, keep in the while True in order to add the next
# reservation
#
break
################################################
#
# Remove all reservations whose session has expired
#
@exc_checker
def _remove_expired_reservations(self):
now = self.time_provider.get_time()
enqueue_free_experiment_args_retrieved = []
client = self.redis_maker()
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
reservations = [ reservation_id for reservation_id in client.smembers(weblab_resource_pqueue_reservations) ]
# Since there might be a lot of reservations, create a pipeline and retrieve
# every reservation data in a row
pipeline = client.pipeline()
for reservation_id in reservations:
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
pipeline.get(weblab_reservation_pqueue)
results = pipeline.execute()
for reservation_id, reservation_data in zip(reservations, results):
if reservation_data is not None:
data = json.loads(reservation_data)
if ACTIVE_STATUS in data:
total_time = data[TIME]
timestamp_before = data[TIMESTAMP_BEFORE]
timestamp_after = data.get(TIMESTAMP_AFTER)
initialization_in_accounting = data[INITIALIZATION_IN_ACCOUNTING]
# if timestamp_after is None and initialization should not be considered,
# then we can not calculate if the time has expired, so we skip it (it will
# be considered as expired for lack of LATEST_ACCESS
if timestamp_after is not None or initialization_in_accounting:
timestamp = timestamp_before if initialization_in_accounting else timestamp_after
if now >= timestamp + total_time: # Expired
enqueue_free_experiment_args = self._clean_current_reservation(reservation_id)
enqueue_free_experiment_args_retrieved.append(enqueue_free_experiment_args)
self._delete_reservation(reservation_id)
self.reservations_manager.delete(reservation_id)
# Anybody with latest_access later than this point is expired
current_expiration_time = datetime.datetime.utcfromtimestamp(now - EXPIRATION_TIME)
for expired_reservation_id in self.reservations_manager.list_expired_reservations(current_expiration_time):
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, expired_reservation_id)
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
continue
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
if ACTIVE_STATUS in pqueue_reservation_data:
enqueue_free_experiment_args = self._clean_current_reservation(expired_reservation_id)
enqueue_free_experiment_args_retrieved.append(enqueue_free_experiment_args)
self._delete_reservation(expired_reservation_id)
self.reservations_manager.delete(expired_reservation_id)
for enqueue_free_experiment_args in enqueue_free_experiment_args_retrieved:
if enqueue_free_experiment_args is not None:
self.confirmer.enqueue_free_experiment(*enqueue_free_experiment_args)
def _delete_reservation(self, reservation_id):
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
resource_instances = self.resources_manager.list_resource_instances_by_type(self.resource_type_name)
client = self.redis_maker()
pipeline = client.pipeline()
for resource_instance in resource_instances:
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, resource_instance.resource_instance)
pipeline.srem(weblab_resource_pqueue_instance_reservations, reservation_id)
pipeline.srem(weblab_resource_pqueue_reservations, reservation_id)
pipeline.delete(weblab_reservation_pqueue)
pipeline.execute()
filled_reservation_id = client.hget(weblab_resource_pqueue_map, reservation_id)
client.hdel(weblab_resource_pqueue_map, reservation_id)
client.zrem(weblab_resource_pqueue_sorted, filled_reservation_id)
##############################################################
#
# ONLY FOR TESTING: It completely removes the whole database
#
@Override(Scheduler)
def _clean(self):
client = self.redis_maker()
for reservation_id in self.reservations_manager.list_all_reservations():
client.delete(WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id))
client.delete(WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, '*'))
for resource_instance in self.resources_manager.list_resource_instances_by_type(self.resource_type_name):
client.delete(WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, resource_instance.resource_instance))
client.delete(WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_POSITIONS % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name)
|
bsd-2-clause
| 1,431,921,179,492,637,200
| 49.902256
| 271
| 0.629483
| false
| 4.337519
| false
| false
| false
|
cloudify-cosmo/softlayer-python
|
SoftLayer/CLI/mq/queue_add.py
|
1
|
1390
|
"""Create a queue."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import mq
import click
@click.command()
@click.argument('account-id')
@click.argument('queue-name')
@click.option('--datacenter', help="Datacenter, E.G.: dal05")
@click.option('--network',
type=click.Choice(['public', 'private']),
help="Network type")
@click.option('--visibility-interval',
type=click.INT,
default=30,
help="Time in seconds that messages will re-appear after being "
"popped")
@click.option('--expiration',
type=click.INT,
default=604800,
help="Time in seconds that messages will live")
@click.option('--tag', '-g', multiple=True, help="Tags to add to the queue")
@environment.pass_env
def cli(env, account_id, queue_name, datacenter, network, visibility_interval,
expiration, tag):
"""Create a queue."""
manager = SoftLayer.MessagingManager(env.client)
mq_client = manager.get_connection(account_id,
datacenter=datacenter, network=network)
queue = mq_client.create_queue(
queue_name,
visibility_interval=visibility_interval,
expiration=expiration,
tags=tag,
)
return mq.queue_table(queue)
|
mit
| 7,730,772,881,855,012,000
| 31.325581
| 78
| 0.621583
| false
| 4.064327
| false
| false
| false
|
cloudendpoints/endpoints-tools
|
auth/generate-jwt.py
|
1
|
2375
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python script generates a signed JWT token based on the input payload"""
import argparse
import time
import oauth2client.crypt
from oauth2client.service_account import ServiceAccountCredentials
def main(args):
"""Generates a signed JSON Web Token using a Google API Service Account."""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
args.service_account_file)
now = int(time.time())
payload = {
"exp": now + credentials.MAX_TOKEN_LIFETIME_SECS,
"iat": now,
"aud": args.aud,
}
if args.email:
payload["email"] = args.email
if args.groupId:
payload["groupId"] = args.groupId
if args.issuer:
payload["iss"] = args.issuer
payload["sub"] = args.issuer
else:
payload["iss"] = credentials.service_account_email
payload["sub"] = credentials.service_account_email
signed_jwt = oauth2client.crypt.make_signed_jwt(
credentials._signer, payload, key_id=credentials._private_key_id)
return signed_jwt
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"aud",
help="Audience . This must match 'audience' in the security configuration"
" in the swagger spec. It can be any string")
parser.add_argument(
'service_account_file',
help='The path to your service account json file.')
#optional arguments
parser.add_argument("-e", "--email", help="Email claim in JWT")
parser.add_argument("-g", "--groupId", help="GroupId claim in JWT")
parser.add_argument("-iss", "--issuer", help="Issuer claim. This will also be used for sub claim")
print main(parser.parse_args())
|
apache-2.0
| 7,548,041,424,393,917,000
| 32.450704
| 100
| 0.703579
| false
| 3.912685
| false
| false
| false
|
xjw1001001/IGCexpansion
|
test/Ancestral_reconstruction/IGCcluster_analysis.py
|
1
|
46187
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 22:00:31 2017
@author: xjw1001001
"""
import numpy as np
from IGCexpansion.CodonGeneconFunc import *
llpath = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/test/Ancestral_reconstruction/'
path = llpath + 'matrix/sitewise_IGC_statmatrix/'
paralog_list = [['YLR406C', 'YDL075W'],#pair#TODO: other data
['YER131W', 'YGL189C'], ['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'],
['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'],
['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'], ['YLR333C', 'YGR027C'], ['YMR142C', 'YDL082W'],
['YER102W', 'YBL072C'], ['EDN', 'ECP'],['ERa', 'ERb'],['ARa', 'ERa'],['AR', 'MR'],['AR', 'GR'],['AR', 'PR'],
['MR', 'GR'],['MR', 'PR'],['PR', 'GR'] ]
ARMRGRPR_list = [['AR', 'MR'],['AR', 'GR'],['AR', 'PR'],['MR', 'GR'],['MR', 'PR'],['PR', 'GR']]
Yeast_list = [['YLR406C', 'YDL075W'], ['YER131W', 'YGL189C'],['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'],
['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'], ['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'],
['YLR333C', 'YGR027C'],['YMR142C', 'YDL082W'], ['YER102W', 'YBL072C']]
EDNECP_newicktree ='/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/Zhang2002_data ECPEDN/from gene bank/primate_EDN_ECP.newick'
Yeast_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/YeastTree.newick'
ERa_ERb_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/SR_Thornton/ER/species.newick'
ARa_ERa_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/SR_Thornton/ARa_ERa/ERa_ARa_species.newick'
ARMRGRPR_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/SR_Thornton/AR_MR_GR_PR/species_common/species_common.newick'
bases = 'tcag'.upper()
codons = [a+b+c for a in bases for b in bases for c in bases]
amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
codon_table = dict(zip(codons, amino_acids))
codon_nonstop = [a for a in codon_table.keys() if not codon_table[a]=='*']
codon_to_state = {a.upper() : i for (i, a) in enumerate(codon_nonstop)}
state_to_codon = {i : a.upper() for (i, a) in enumerate(codon_nonstop)}
state_to_codon[61] = 'xxx'
pair_to_state = {pair:i for i, pair in enumerate(product(codon_nonstop, repeat = 2))}
Yeast_node = 13
EDNECP_node = 25
Model_list = ['IGC','tau=0']#model
#'arg_' +'_'.join(pair) + '_MG94_' + model + '.npy'
def state_to_compositecodons(state):
state_1, state_2 = divmod(state, 62)
state_1 = int(state_1)
state_2 = int(state_2)
return (state_to_codon[state_1],state_to_codon[state_2])
#read data of posterior etc
Expected_tau = {} #paralog array
ExpectedIGC = {}
ExpectedIGC['num'] = {}
ExpectedIGC['1to2'] = {}
ExpectedIGC['2to1'] = {}
model = {}
posterior = {}
posterior['1to2'] = {}
posterior['2to1'] = {}
posterior['IGC'] = {}
ExpectedIGC['point'] = {}
ExpectedIGC['proportion'] = {}
#'_'.join(pair)
for pair in paralog_list:
model['_'.join(pair)] = {}
model['_'.join(pair)]['IGC'] = np.loadtxt(open(llpath + 'model_likelihood/ancestral_reconstruction_' + '_'.join(pair) + '_MG94_IGCBFGS.txt','r'))
model['_'.join(pair)]['tau=0'] = np.loadtxt(open(llpath + 'model_likelihood/ancestral_reconstruction_' + '_'.join(pair) + '_MG94_tau=0BFGS.txt','r'))
model['_'.join(pair)]['PAML'] = np.loadtxt(open(llpath + 'PAML/output/summary/' + '_'.join(pair) + '.txt','r'))
Expected_tau['_'.join(pair)] = np.loadtxt(open(path + 'Expected_tau/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['num']['_'.join(pair)] = np.loadtxt(open(path + 'ExpectedIGCnum/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['1to2']['_'.join(pair)] = np.loadtxt(open(path + 'ExpectedIGCnum1_2/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['2to1']['_'.join(pair)] = np.loadtxt(open(path + 'ExpectedIGCnum2_1/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['point']['_'.join(pair)] = np.loadtxt(open(path + 'SitewiseExpectedpointMutation/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['proportion']['_'.join(pair)] = np.loadtxt(open(path + 'Sitewiseporpotion/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
posterior['1to2']['_'.join(pair)] = np.loadtxt(open(path + 'posterior/' + '_'.join(pair) + '_MG94_IGC_1to2.txt','r'))
posterior['2to1']['_'.join(pair)] = np.loadtxt(open(path + 'posterior/' + '_'.join(pair) + '_MG94_IGC_2to1.txt','r'))
posterior['IGC']['_'.join(pair)] = np.loadtxt(open(path + 'posterior/' + '_'.join(pair) + '_MG94_IGC_IGC.txt','r'))
#generate 0 1 x sequence for each data
reconstruct_path = 'matrix/reconstruction_likelihood/npy/'
dict_all = {}
difference_threshold_begin = 0.5 #threshold for difference
difference_threshold_end = 0.5
point_mutation_threshold = 0.2
IGC_high_threshold = 0.5
IGC_low_threshold = 0.1
for pair in paralog_list:
# read data
dict_all['_'.join(pair)]={}
for model in Model_list:
dict_all['_'.join(pair)][model]={}
dict_all['_'.join(pair)][model]['arg'] = np.load(llpath+reconstruct_path+'arg_' +'_'.join(pair) + '_MG94_' + model + '.npy')
dict_all['_'.join(pair)][model]['likelihood'] = np.load(llpath+reconstruct_path+'likelihood_' +'_'.join(pair) + '_MG94_' + model + '.npy')
branchwise_information = {}#1.how about begin difference near 0.5
branchwise_assign_1to2 = {}
branchwise_assign_2to1 = {}
branchwise_assign_IGC = {}
branchwise_display = {}
##Yeast
plist = Yeast_list
tree = Yeast_newicktree
outgroup = 'kluyveri'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##EDN
plist = [['EDN','ECP']]
tree = EDNECP_newicktree
outgroup = 'Saguinus_oedipus'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##ERaERb
plist = [['ERa', 'ERb']]
tree = ERa_ERb_newicktree
outgroup = 'Branchiostoma_floridae'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##ARaERa
plist = [['ARa', 'ERa']]
tree = ARa_ERa_newicktree
outgroup = 'Mus_musculus'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##ARMRGRPR
plist =ARMRGRPR_list
tree = ARMRGRPR_newicktree
outgroup = 'Aplysia_californica'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
|
gpl-3.0
| 2,687,871,542,146,271,000
| 60.666222
| 219
| 0.559486
| false
| 3.296717
| false
| false
| false
|
docusign/docusign-python-client
|
docusign_esign/models/workspace_settings.py
|
1
|
3403
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WorkspaceSettings(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comments_allowed': 'str'
}
attribute_map = {
'comments_allowed': 'commentsAllowed'
}
def __init__(self, comments_allowed=None): # noqa: E501
"""WorkspaceSettings - a model defined in Swagger""" # noqa: E501
self._comments_allowed = None
self.discriminator = None
if comments_allowed is not None:
self.comments_allowed = comments_allowed
@property
def comments_allowed(self):
"""Gets the comments_allowed of this WorkspaceSettings. # noqa: E501
# noqa: E501
:return: The comments_allowed of this WorkspaceSettings. # noqa: E501
:rtype: str
"""
return self._comments_allowed
@comments_allowed.setter
def comments_allowed(self, comments_allowed):
"""Sets the comments_allowed of this WorkspaceSettings.
# noqa: E501
:param comments_allowed: The comments_allowed of this WorkspaceSettings. # noqa: E501
:type: str
"""
self._comments_allowed = comments_allowed
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkspaceSettings, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkspaceSettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| -6,446,043,402,609,773,000
| 28.08547
| 140
| 0.566559
| false
| 4.307595
| false
| false
| false
|
FEniCS/fiat
|
FIAT/orthopoly.py
|
1
|
10895
|
"""
orthopoly.py - A suite of functions for generating orthogonal polynomials
and quadrature rules.
Copyright (c) 2014 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Last updated on Wed Jan 1 14:29:25 MST 2014
Modified by David A. Ham (david.ham@imperial.ac.uk), 2016
"""
import numpy as np
from functools import reduce
from math import gamma
def gauss(alpha, beta):
"""
Compute the Gauss nodes and weights from the recursion
coefficients associated with a set of orthogonal polynomials
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
Outputs:
x - quadrature nodes
w - quadrature weights
Adapted from the MATLAB code by Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m
"""
from numpy.linalg import eigh
A = np.diag(np.sqrt(beta)[1:], 1) + np.diag(alpha)
x, V = eigh(A, "U")
w = beta[0] * np.real(np.power(V[0, :], 2))
return x, w
def lobatto(alpha, beta, xl1, xl2):
"""
Compute the Lobatto nodes and weights with the preassigned
nodea xl1,xl2
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xl1 - assigned node location
xl2 - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from numpy.linalg import solve
n = len(alpha) - 1
en = np.zeros(n)
en[-1] = 1
A1 = np.vstack((np.sqrt(beta), alpha - xl1))
J1 = np.diag(A1[0, 1:-1], 1) + np.diag(A1[1, 1:]) + np.diag(A1[0, 1:-1], -1)
A2 = np.vstack((np.sqrt(beta), alpha - xl2))
J2 = np.diag(A2[0, 1:-1], 1) + np.diag(A2[1, 1:]) + np.diag(A2[0, 1:-1], -1)
g1 = solve(J1, en)
g2 = solve(J2, en)
C = np.array(((1, -g1[-1]), (1, -g2[-1])))
xl = np.array((xl1, xl2))
ab = solve(C, xl)
alphal = alpha
alphal[-1] = ab[0]
betal = beta
betal[-1] = ab[1]
x, w = gauss(alphal, betal)
return x, w
def rec_jacobi(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m
"""
nu = (b - a) / float(a + b + 2)
mu = 2 ** (a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)
if N == 1:
alpha = nu
beta = mu
else:
n = np.arange(1.0, N)
nab = 2 * n + a + b
alpha = np.hstack((nu, (b ** 2 - a ** 2) / (nab * (nab + 2))))
n = n[1:]
nab = nab[1:]
B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2) ** 2 * (a + b + 3))
B = 4 * (n + a) * (n + b) * n * (n + a + b) / \
(nab ** 2 * (nab + 1) * (nab - 1))
beta = np.hstack((mu, B1, B))
return alpha, beta
def rec_jacobi01(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
for the Jacobi polynomials which are orthogonal on [0,1]
See rec_jacobi for the recursion coefficients on [-1,1]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m
"""
if a <= -1 or b <= -1:
raise ValueError('''Jacobi coefficients are defined only
for alpha,beta > -1''')
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
c, d = rec_jacobi(N, a, b)
alpha = (1 + c) / 2
beta = d / 4
beta[0] = d[0] / 2 ** (a + b + 1)
return alpha, beta
def polyval(alpha, beta, x):
"""
Evaluate polynomials on x given the recursion coefficients alpha and beta
"""
N = len(alpha)
m = len(x)
P = np.zeros((m, N + 1))
P[:, 0] = 1
P[:, 1] = (x - alpha[0]) * P[:, 0]
for k in range(1, N):
P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]
return P
def jacobi(N, a, b, x, NOPT=1):
"""
JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns the
L2-normalized polynomials
"""
m = len(x)
P = np.zeros((m, N + 1))
apb = a + b
a1 = a - 1
b1 = b - 1
c = apb * (a - b)
P[:, 0] = 1
if N > 0:
P[:, 1] = 0.5 * (a - b + (apb + 2) * x)
if N > 1:
for k in range(2, N + 1):
k2 = 2 * k
g = k2 + apb
g1 = g - 1
g2 = g - 2
d = 2.0 * (k + a1) * (k + b1) * g
P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -
d * P[:, k - 2]) / (k2 * (k + apb) * g2)
if NOPT == 2:
k = np.arange(N + 1)
pnorm = 2 ** (apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \
((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))
P *= 1 / np.sqrt(pnorm)
return P
def jacobiD(N, a, b, x, NOPT=1):
"""
JACOBID computes the first derivatives of the normalized Jacobi
polynomials which are orthogonal on [-1,1] with respect
to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns
the derivatives of the L2-normalized polynomials
"""
z = np.zeros((len(x), 1))
if N == 0:
Px = z
else:
Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *
((a + b + 2 + np.arange(N)))))
return Px
def mm_log(N, a):
"""
MM_LOG Modified moments for a logarithmic weight function.
The call mm=MM_LOG(n,a) computes the first n modified moments of the
logarithmic weight function w(t)=t^a log(1/t) on [0,1] relative to
shifted Legendre polynomials.
REFERENCE: Walter Gautschi,``On the preceding paper `A Legendre
polynomial integral' by James L. Blue'',
Math. Comp. 33 (1979), 742-743.
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m
"""
if a <= -1:
raise ValueError('Parameter a must be greater than -1')
prod = lambda z: reduce(lambda x, y: x * y, z, 1)
mm = np.zeros(N)
c = 1
for n in range(N):
if isinstance(a, int) and a < n:
p = range(n - a, n + a + 2)
mm[n] = (-1) ** (n - a) / prod(p)
mm[n] *= gamma(a + 1) ** 2
else:
if n == 0:
mm[0] = 1 / (a + 1) ** 2
else:
k = np.arange(1, n + 1)
s = 1 / (a + 1 + k) - 1 / (a + 1 - k)
p = (a + 1 - k) / (a + 1 + k)
mm[n] = (1 / (a + 1) + sum(s)) * prod(p) / (a + 1)
mm[n] *= c
c *= 0.5 * (n + 1) / (2 * n + 1)
return mm
def mod_chebyshev(N, mom, alpham, betam):
"""
Calcuate the recursion coefficients for the orthogonal polynomials
which are are orthogonal with respect to a weight function which is
represented in terms of its modifed moments which are obtained by
integrating the monic polynomials against the weight function.
References
----------
John C. Wheeler, "Modified moments and Gaussian quadratures"
Rocky Mountain Journal of Mathematics, Vol. 4, Num. 2 (1974), 287--296
Walter Gautschi, "Orthogonal Polynomials (in Matlab)
Journal of Computational and Applied Mathematics, Vol. 178 (2005) 215--234
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m
"""
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
N = min(N, int(len(mom) / 2))
alpha = np.zeros(N)
beta = np.zeros(N)
normsq = np.zeros(N)
sig = np.zeros((N + 1, 2 * N))
alpha[0] = alpham[0] + mom[1] / mom[0]
beta[0] = mom[0]
sig[1, :] = mom
for n in range(2, N + 1):
for m in range(n - 1, 2 * N - n + 1):
sig[n, m] = sig[n - 1, m + 1] - (alpha[n - 2] - alpham[m]) * sig[n - 1, m] - \
beta[n - 2] * sig[n - 2, m] + betam[m] * sig[n - 1, m - 1]
alpha[n - 1] = alpham[n - 1] + sig[n, n] / sig[n, n - 1] - sig[n - 1, n - 1] / \
sig[n - 1, n - 2]
beta[n - 1] = sig[n, n - 1] / sig[n - 1, n - 2]
normsq = np.diagonal(sig, -1)
return alpha, beta, normsq
def rec_jaclog(N, a):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the monic polynomials which are orthogonal on [0,1]
with respect to the weight w(x)=x^a*log(1/x)
Inputs:
N - polynomial order
a - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adated from the MATLAB code:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m
"""
alphaj, betaj = rec_jacobi01(2 * N, 0, 0)
mom = mm_log(2 * N, a)
alpha, beta, _ = mod_chebyshev(N, mom, alphaj, betaj)
return alpha, beta
|
lgpl-3.0
| -129,732,180,367,428,380
| 27.372396
| 90
| 0.553281
| false
| 3.116419
| false
| false
| false
|
slremy/testingpubsub
|
myBallPlate/remote.py
|
1
|
7502
|
'''
Copyright (c) 2013 Sekou Remy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
'''
This program takes nine parameters as command line arguments:
the duration of the test,
the step size,
the 3 PID constants for the position of the ball
the 3 PID constants for the angle of the beam
It produces a "fitness value" (higher is better), and provides this response on stdout.
The value is derived from the step response error for the closed-loop system.
python evaluatePID.py http://IPADDRESS:PORT/ duration STEPSIZE KPball KIball KDball KPbeam KIbeam KDbeam
'''
import web
import timeit
from numpy import sign, power, cos, sin
from collections import deque
import signal
from sys import exit, exc_info, argv
from time import sleep
ref0=[]
ref1=[]
try:
client = argv[1]
network = argv[2]
host = argv[3]
port = argv[4]
suffix = argv[5]
clientport = 0;
duration= float(argv[6]) if len(argv) > 6 else 200;
h= float(argv[7]) if len(argv) > 7 else .02;
KpR= float(argv[8]) if len(argv) > 8 else 6;
KiR= float(argv[9]) if len(argv) > 9 else 0; #0;
KdR= float(argv[10]) if len(argv) > 10 else -5.18;
KpM= float(argv[11]) if len(argv) > 11 else -12.08;
KiM= float(argv[12]) if len(argv) > 12 else 0;# 0;
KdM= float(argv[13]) if len(argv) > 13 else -0.4;
localport = argv[14] if len(argv) > 14 else str(int(port)+1000);
except:
print exc_info()[0]
print "syntax is " + argv[0] + " [client] [network] [host] [port] [suffix] duration STEPSIZE KPball KIball KDball KPbeam KIbeam KDbeam"
exit(0)
#Select process method from the correct client
if client == 'tcp':
from tcpclient import *
print "Importing process from tcpclient"
elif client == 'pycurl':
from pycurlclient import *
print "Importing process from pycurlclient"
elif client == 'httplib':
from httplibclient import *
print "Importing process from httplibclient"
elif client == 'urllib':
from urllibclient import *
print "Importing process from urllibclient"
elif client == 'udp':
from udpclient import *
print "Importing process from udpclient"
print "Host: %s:%s/" % (host,port)
#strip off trailing slash and http, if present.
host = host.strip('http://').strip('/');
#set up the best clock that can be accessed on this machine
clock = timeit.default_timer;
#get the current time (time the remote was started).
t0 = clock();
t=0
def closeprocess():
#process(host, port, "/stop?", clientport);
process(host, port, "/init?", clientport);
def catcher(signum, _):
#Sekou, you or someone, should convert this to a PID controller (11/8/2014)
global X, THETA, Y, PHI, t, StateTime, u_x, u_y
global tcrash, crashed, iteration, mse_x, mse_y
if ref0==[]:return
# Update the time and iteration number
iteration += 1
t1 = clock()-t0
url = "/u?&value0=%.4f&value1=%.4f&time=%.6f&stime=%.6f&access=8783392" % (u_x,u_y,t,StateTime);
response=process(host,port,url,clientport);
tr = clock() - t0;
if response != "" and ref0 != []:
X.appendleft( float(response.split()[0]));
THETA.appendleft( float(response.split()[2]));
Y.appendleft( float(response.split()[1]));
PHI.appendleft( float(response.split()[3]));
StateTime = float(response.split()[4])
e_x = ref0 - X[0];
angle_d = AR * (e_x) + BR * (X[0]-X[1]);
if angle_d > angle_max: angle_d=angle_max;
elif angle_d < -angle_max: angle_d=-angle_max;
u_x = AM*(angle_d*16 - THETA[0]) + BM * (THETA[0] - THETA[1])
e_y = ref1 - Y[0];
angle_d1 = AR * (e_y) + BR * (Y[0]-Y[1]);
if angle_d1 > angle_max: angle_d1=angle_max;
elif angle_d1 < -angle_max: angle_d1=-angle_max;
u_y = AM*(angle_d1*16 - PHI[0]) + BM * (PHI[0] - PHI[1])
#Update the performance parameters
mse_x = (mse_x * iteration + e_x**2)/(iteration + 1)
mse_y = (mse_y * iteration + e_y**2)/(iteration + 1)
else:
print "Communication timed out! ", clock() - t0
print "(",ref0, ref1,")", X[-1], Y[-1]
web.config.debug = False;
urls = (
'/a','remotecontroller',
'/reset','reset',
'/stop','closecontroller'
)
app = web.application(urls, globals())
wsgifunc = app.wsgifunc()
wsgifunc = web.httpserver.StaticMiddleware(wsgifunc)
server = web.httpserver.WSGIServer(("0.0.0.0", int(localport)),wsgifunc)
print "http://%s:%s/" % ("0.0.0.0", localport)
class remotecontroller:
def GET(self):
return self.process();
def POST(self):
return self.process();
def process(self):
global ref0, ref1
i = web.input();#print i
(ref0, ref1) = (( float((i.ref0).replace(" ","+")) if hasattr(i, 'ref0') else 0 ), ( float((i.ref1).replace(" ","+")) if hasattr(i, 'ref1') else 0 ))
#print ref0, ref1 , "<<=== desired"
f = "%.4f %.4f %.4f %.4f %s" % (X[-1], Y[-1], THETA[-1], PHI[-1], repr(clock()));
web.header("Content-Type", "text/plain") # Set the Header
web.header("Access-Control-Allow-Origin", "*") # Set the Header
web.header("Access-Control-Allow-Credentials", "true") # Set the Header
return f
class reset:
def GET(self):
return self.process();
def POST(self):
return self.process();
def process(self):
global ref0, ref1
i = web.input();#print i
(ref0, ref1) = ([],[])
print ref0, ref1 , "<<=== desired"
f = "%.4f %.4f %.4f %.4f %s" % (X[-1], Y[-1], THETA[-1], PHI[-1], repr(clock()));
web.header("Content-Type", "text/plain") # Set the Header
web.header("Access-Control-Allow-Origin", "*") # Set the Header
web.header("Access-Control-Allow-Credentials", "true") # Set the Header
return f
def stopper():
server.stop()
exit(0);
if __name__ == "__main__":
(mysignal,myitimer)=(signal.SIGALRM,signal.ITIMER_REAL)
'''
(mysignal,myitimer)=(signal.SIGPROF,signal.ITIMER_PROF)
(mysignal,myitimer)=(signal.SIGVTALRM,signal.ITIMER_VIRTUAL)
'''
if h < duration/3.0 and h > 0.001:
signal.signal(mysignal, catcher)
signal.setitimer(myitimer, h, h)
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
print exc_info()[0],"Shutting down service"
#closeprocess();
#return value
|
mit
| 1,301,263,015,616,686,600
| 34.220657
| 157
| 0.624367
| false
| 3.283151
| false
| false
| false
|
RyanWHowe/SunStat
|
SunStat/SunStat.py
|
1
|
9538
|
from __future__ import division
import datetime
import math
__author__ = 'Ryan W. Howe'
class SunStat:
def __init__(self, latitude, longitude, year, month, day, utcoffset=0):
"""SunStat class is able to provide the sunset, sunrise, or noon time for the Sun at a provided date.
DISCLAIMER These calculations are theoretically accurate to the minute for locations between +/- 72 degrees
latitude, and within 10 minutes outside those latitudes. However due to variations in atmospheric composition,
temperature, pressure and conditions, observed values my vary from calculations.
The calculations (other than the Julian Day) are all taken from the NOAA, which came from Astronomical
Algorithms written by Jean Meeus.
:param latitude: Observation Latitude (+ North)
:param longitude: Observation Longitude (+ East)
:param year: Year of the Date for Observation
:param month: Month of the Date for Observation
:param day: Day of the Date for Observation
:param utcoffset: current UTC offset (else time objects are in UTC)
:return:
"""
try:
self.__date = datetime.date(year, month, day)
except:
raise SunStatException(self, "An invalid calendar date was entered")
if (latitude >= 90) | (latitude <= -90):
raise SunStatException(self, "Latitude range is 89.9999 to -89.9999")
else:
self.__latitude = latitude
if (longitude >= 180) | (longitude <= -180):
raise SunStatException(self, "Longitude range is 179.9999 to -179.9999")
else:
self.__longitude = longitude
if (utcoffset > 14) | (utcoffset < -14):
raise SunStatException(self, "UTC offsets are only valid between 14 and -14")
else:
self.__utcoffset = utcoffset
def julian_day(self):
""" This converts the current Gregorian calendar date into a Julian Day Number.
which means how many days have passed since noon November 24th, 4714 BC(Gregorian Calendar) at Greenwich
Once you understand that the math makes more sense.
all math taken from http://en.wikipedia.org/wiki/Julian_day
a is calculated so that it will be a 1 for January and February and 0 for all other months
y is the number of years since March 1st, -4800 (with a correction if we are in either Jan or Feb)
m is the current month since March (ie April = 4 so a = 0 and m = 4 + 12*0 - 3 ==> 1)
for the return calculation the first term is simply the day of the month
The second is some math magic that comes from integer division, using the m calculated above and removing the
remainder, this will give you the appropriate number of whole days since March 1 so for April (m=1)
so (153 * 1 + 2)// 5 = 31 (there are 31 days in March and that is how many have passed for April 1 for May 1
(m = 2) and (153 * 2 + 2)//5 = 61 (31 days for March and 30 for April). Give it a try, it is kinda cool!
Third term is simply the days in a standard calendar year (given the years as computed above)
Fourth, fifth, sixth terms all correct for leap years
so you add a day every year that is divisible by 4, subtract 1 back for the ones that are also divisible
by 100 then add back in the ones that are also divisible by 400 you can check here
http://en.wikipedia.org/wiki/Leap_year , I had to in theory (365 * y) could have just been replaced by
(365.2425 * y) but there is enough going on that I stuck with the wiki calculations
Lastly there is the number of days that are over counted going to 4800 BC, since we are only suppose to go to
4714 BC, the length of the year was calculated differently before the Gregorian calendar existed this factor
gets us to the correct whole date from March 1st, 4801 BC (or March 1st, -4800)
:return: float Julian Day of the passed Gregorian calendar date
"""
a = (14 - self.__date.month) // 12
y = self.__date.year + 4800 - a
m = self.__date.month + 12 * a - 3
return self.__date.day + ((153 * m + 2) // 5) + (365 * y) + (y // 4) - (y // 100) + (y // 400) - 32045
def julian_century(self):
"""Compute the current Julian Century
Julian Century starts from the Julian day of 2000/01/01 at 12:00 UTC which is a Julian Day of 2451545.0
The Julian calendar is exactly 365.25 days long, therefor the Julian century is 36525 days long.
Starting with the current Julian day the calculation is straight forward.
:return:float Julian Century
"""
return (self.julian_day() - 2451545) / 36525
def geom_mean_long_sun(self):
"""Calculate the mean longitude Solar Coordinate
:return:float degrees
"""
return 280.46646 + self.julian_century() * (36000.76983 + self.julian_century() * 0.0003032) % 360
def geom_mean_anom_sun(self):
"""Calculate the anomaly Solar Coordinate
:return:float degrees
"""
return 357.52911 + self.julian_century() * (35999.05029 - 0.0001537 * self.julian_century())
def sun_eq_of_cent(self):
"""Calculate the off center Solar Coordinate
:return:float degree
"""
return math.sin(math.radians(self.geom_mean_anom_sun())) * (
1.914602 - self.julian_century() * (0.004817 + 0.000014 * self.julian_century())) + math.sin(
math.radians(2 * self.geom_mean_anom_sun())) * (0.019993 - 0.000101 * self.julian_century()) + math.sin(
math.radians(3 * self.geom_mean_anom_sun())) * 0.000289
def sun_true_long(self):
return self.geom_mean_long_sun() + self.sun_eq_of_cent()
def sun_app_long(self):
return self.sun_true_long() - 0.00569 - 0.00478 * math.sin(
math.radians(125.04 - 1934.136 * self.julian_century()))
def mean_obliq_ecliptic(self):
"""
Calculate the ecliptic longitude to right ascension and declination delta
:return:
"""
return 23 + (26 + ((21.448 - self.julian_century() * (
46.815 + self.julian_century() * (0.00059 - self.julian_century() * 0.001813)))) / 60) / 60
def obliq_corr(self):
return self.mean_obliq_ecliptic() + 0.00256 * math.cos(math.radians(125.04 - 1934.136 * self.julian_century()))
def sun_declin(self):
return math.degrees(
math.asin(math.sin(math.radians(self.obliq_corr())) * math.sin(math.radians(self.sun_app_long()))))
def ha_sunrise(self):
return math.degrees(math.acos(math.cos(math.radians(90.833)) / (
math.cos(math.radians(self.__latitude)) * math.cos(math.radians(self.sun_declin()))) - math.tan(
math.radians(self.__latitude)) * math.tan(math.radians(self.sun_declin()))))
def eccent_earth_orbit(self):
return 0.016708634 - self.julian_century() * (0.000042037 + 0.0000001267 * self.julian_century())
def var_y(self):
return math.tan(math.radians(self.obliq_corr() / 2)) * math.tan(math.radians(self.obliq_corr() / 2))
def eq_of_time(self):
return 4 * math.degrees(
self.var_y() * math.sin(2 * math.radians(self.geom_mean_long_sun()))
- 2 * self.eccent_earth_orbit() * math.sin(math.radians(self.geom_mean_anom_sun()))
+ 4 * self.eccent_earth_orbit() * self.var_y() * math.sin(
math.radians(self.geom_mean_anom_sun())) * math.cos(2 * math.radians(self.geom_mean_long_sun()))
- 0.5 * self.var_y() * self.var_y() * math.sin(4 * math.radians(self.geom_mean_long_sun()))
- 1.25 * self.eccent_earth_orbit() * self.eccent_earth_orbit() * math.sin(
2 * math.radians(self.geom_mean_anom_sun()))
)
def __solar_noon_since_midnight(self):
return 720 - 4 * self.__longitude - self.eq_of_time() + self.__utcoffset * 60
def __sun_rise_since_midnight(self):
return self.__solar_noon_since_midnight() - self.ha_sunrise() * 4
def __sun_set_since_midnight(self):
return self.__solar_noon_since_midnight() + self.ha_sunrise() * 4
def sunrise(self):
_hour = self.__sun_rise_since_midnight() // 60
_minute = self.__sun_rise_since_midnight() - (60 * _hour)
_second, _minute = math.modf(_minute)
_second *= 60
_millisecond, _second = math.modf(_second)
return datetime.time(int(_hour), int(_minute), int(_second), int(_millisecond * 1000000))
def sunset(self):
_hour = self.__sun_set_since_midnight() // 60
_minute = self.__sun_set_since_midnight() - (60 * _hour)
_second, _minute = math.modf(_minute)
_second *= 60
_millisecond, _second = math.modf(_second)
return datetime.time(int(_hour), int(_minute), int(_second), int(_millisecond * 1000000))
def noon(self):
_hour = self.__solar_noon_since_midnight() // 60
_minute = self.__solar_noon_since_midnight() - (60 * _hour)
_second, _minute = math.modf(_minute)
_second *= 60
_millisecond, _second = math.modf(_second)
return datetime.time(int(_hour), int(_minute), int(_second), int(_millisecond * 1000000))
class SunStatException(Exception):
def __init__(self, parent, msg):
self.message = msg
def __str__(self):
return self.message
|
mit
| 2,214,814,354,007,430,400
| 49.739362
| 120
| 0.622458
| false
| 3.535211
| false
| false
| false
|
xmendez/wfuzz
|
src/wfuzz/plugins/payloads/permutation.py
|
1
|
1709
|
from wfuzz.externals.moduleman.plugin import moduleman_plugin
from wfuzz.plugin_api.base import BasePayload
from wfuzz.exception import FuzzExceptBadOptions
from wfuzz.fuzzobjects import FuzzWordType
@moduleman_plugin
class permutation(BasePayload):
name = "permutation"
author = ("Xavi Mendez (@xmendez)",)
version = "0.1"
description = ()
summary = "Returns permutations of the given charset and length."
category = ["default"]
priority = 99
parameters = (("ch", "", True, "Charset and len to permute in the form of abc-2."),)
default_parameter = "ch"
def __init__(self, params):
BasePayload.__init__(self, params)
self.charset = []
try:
ran = self.params["ch"].split("-")
self.charset = ran[0]
self.width = int(ran[1])
except ValueError:
raise FuzzExceptBadOptions('Bad range format (eg. "0-ffa")')
pset = []
for x in self.charset:
pset.append(x)
words = self.xcombinations(pset, self.width)
self.lista = []
for x in words:
self.lista.append("".join(x))
self.__count = len(self.lista)
def count(self):
return self.__count
def get_type(self):
return FuzzWordType.WORD
def get_next(self):
if self.lista != []:
payl = self.lista.pop()
return payl
else:
raise StopIteration
def xcombinations(self, items, n):
if n == 0:
yield []
else:
for i in range(len(items)):
for cc in self.xcombinations(items[:i] + items[i:], n - 1):
yield [items[i]] + cc
|
gpl-2.0
| -24,799,782,009,886,480
| 26.564516
| 88
| 0.564073
| false
| 3.797778
| false
| false
| false
|
jankeromnes/depot_tools
|
gcl.py
|
1
|
49489
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Wrapper script around Rietveld's upload.py that simplifies working with groups
of files.
"""
import json
import optparse
import os
import random
import re
import string
import sys
import tempfile
import time
import urllib2
import breakpad # pylint: disable=W0611
import fix_encoding
import gclient_utils
import git_cl
import presubmit_support
import rietveld
from scm import SVN
import subprocess2
from third_party import upload
__version__ = '1.2.1'
CODEREVIEW_SETTINGS = {
# To make gcl send reviews to a server, check in a file named
# "codereview.settings" (see |CODEREVIEW_SETTINGS_FILE| below) to your
# project's base directory and add the following line to codereview.settings:
# CODE_REVIEW_SERVER: codereview.yourserver.org
}
# globals that store the root of the current repository and the directory where
# we store information about changelists.
REPOSITORY_ROOT = ""
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
CODEREVIEW_SETTINGS_FILE_NOT_FOUND = (
'No %s file found. Please add one.' % CODEREVIEW_SETTINGS_FILE)
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Global cache of files cached in GetCacheDir().
FILES_CACHE = {}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
def CheckHomeForFile(filename):
"""Checks the users home dir for the existence of the given file. Returns
the path to the file if it's there, or None if it is not.
"""
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
full_path = os.path.join(home, filename)
if os.path.exists(full_path):
return full_path
return None
def UnknownFiles():
"""Runs svn status and returns unknown files."""
return [
item[1] for item in SVN.CaptureStatus([], GetRepositoryRoot())
if item[0][0] == '?'
]
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global REPOSITORY_ROOT
if not REPOSITORY_ROOT:
REPOSITORY_ROOT = SVN.GetCheckoutRoot(os.getcwd())
if not REPOSITORY_ROOT:
raise gclient_utils.Error("gcl run outside of repository")
return REPOSITORY_ROOT
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
return os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
def GetChangesDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'changes')
def GetCacheDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'cache')
def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
"""Retrieves a file from the repository and caches it in GetCacheDir() for
max_age seconds.
use_root: If False, look up the arborescence for the first match, otherwise go
directory to the root repository.
Note: The cache will be inconsistent if the same file is retrieved with both
use_root=True and use_root=False. Don't be stupid.
"""
if filename not in FILES_CACHE:
# Don't try to look up twice.
FILES_CACHE[filename] = None
# First we check if we have a cached version.
try:
cached_file = os.path.join(GetCacheDir(), filename)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
if (not os.path.exists(cached_file) or
(time.time() - os.stat(cached_file).st_mtime) > max_age):
dir_info = SVN.CaptureLocalInfo([], '.')
repo_root = dir_info['Repository Root']
if use_root:
url_path = repo_root
else:
url_path = dir_info['URL']
while True:
# Look in the repository at the current level for the file.
for _ in range(5):
content = None
try:
# Take advantage of the fact that svn won't output to stderr in case
# of success but will do in case of failure so don't mind putting
# stderr into content_array.
content_array = []
svn_path = url_path + '/' + filename
args = ['svn', 'cat', svn_path]
if sys.platform != 'darwin':
# MacOSX 10.5.2 has a bug with svn 1.4.4 that will trigger the
# 'Can\'t get username or password' and can be fixed easily.
# The fix doesn't work if the user upgraded to svn 1.6.x. Bleh.
# I don't have time to fix their broken stuff.
args.append('--non-interactive')
gclient_utils.CheckCallAndFilter(
args, cwd='.', filter_fn=content_array.append)
# Exit the loop if the file was found. Override content.
content = '\n'.join(content_array)
break
except (gclient_utils.Error, subprocess2.CalledProcessError):
if content_array[0].startswith(
'svn: Can\'t get username or password'):
ErrorExit('Your svn credentials expired. Please run svn update '
'to fix the cached credentials')
if content_array[0].startswith('svn: Can\'t get password'):
ErrorExit('If are using a Mac and svn --version shows 1.4.x, '
'please hack gcl.py to remove --non-interactive usage, it\'s'
'a bug on your installed copy')
if (content_array[0].startswith('svn: File not found:') or
content_array[0].endswith('path not found')):
break
# Otherwise, fall through to trying again.
if content:
break
if url_path == repo_root:
# Reached the root. Abandoning search.
break
# Go up one level to try again.
url_path = os.path.dirname(url_path)
if content is not None or filename != CODEREVIEW_SETTINGS_FILE:
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time. codereview.settings must always be present so do
# not cache negative.
gclient_utils.FileWrite(cached_file, content or '')
else:
content = gclient_utils.FileRead(cached_file, 'r')
# Keep the content cached in memory.
FILES_CACHE[filename] = content
return FILES_CACHE[filename]
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
# Use '__just_initialized' as a flag to determine if the settings were
# already initialized.
if '__just_initialized' not in CODEREVIEW_SETTINGS:
settings_file = GetCachedFile(CODEREVIEW_SETTINGS_FILE)
if settings_file:
CODEREVIEW_SETTINGS.update(
gclient_utils.ParseCodereviewSettingsContent(settings_file))
CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
return CODEREVIEW_SETTINGS.get(key, "")
def Warn(msg):
print >> sys.stderr, msg
def ErrorExit(msg):
print >> sys.stderr, msg
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess2.Popen(
command,
cwd=GetRepositoryRoot(),
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
universal_newlines=True)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
return RunShellWithReturnCode(command, print_output)[0]
def FilterFlag(args, flag):
"""Returns True if the flag is present in args list.
The flag is removed from args if present.
"""
if flag in args:
args.remove(flag)
return True
return False
class ChangeInfo(object):
"""Holds information about a changelist.
name: change name.
issue: the Rietveld issue number or 0 if it hasn't been uploaded yet.
patchset: the Rietveld latest patchset number or 0.
description: the description.
files: a list of 2 tuple containing (status, filename) of changed files,
with paths being relative to the top repository directory.
local_root: Local root directory
rietveld: rietveld server for this change
"""
# Kept for unit test support. This is for the old format, it's deprecated.
SEPARATOR = "\n-----\n"
def __init__(self, name, issue, patchset, description, files, local_root,
rietveld_url, needs_upload):
# Defer the description processing to git_cl.ChangeDescription.
self._desc = git_cl.ChangeDescription(description)
self.name = name
self.issue = int(issue)
self.patchset = int(patchset)
self._files = files or []
self.patch = None
self._local_root = local_root
self.needs_upload = needs_upload
self.rietveld = gclient_utils.UpgradeToHttps(
rietveld_url or GetCodeReviewSetting('CODE_REVIEW_SERVER'))
self._rpc_server = None
@property
def description(self):
return self._desc.description
def force_description(self, new_description):
self._desc = git_cl.ChangeDescription(new_description)
self.needs_upload = True
def append_footer(self, line):
self._desc.append_footer(line)
def get_reviewers(self):
return self._desc.get_reviewers()
def NeedsUpload(self):
return self.needs_upload
def GetFileNames(self):
"""Returns the list of file names included in this change."""
return [f[1] for f in self._files]
def GetFiles(self):
"""Returns the list of files included in this change with their status."""
return self._files
def GetLocalRoot(self):
"""Returns the local repository checkout root directory."""
return self._local_root
def Exists(self):
"""Returns True if this change already exists (i.e., is not new)."""
return (self.issue or self.description or self._files)
def _NonDeletedFileList(self):
"""Returns a list of files in this change, not including deleted files."""
return [f[1] for f in self.GetFiles()
if not f[0].startswith("D")]
def _AddedFileList(self):
"""Returns a list of files added in this change."""
return [f[1] for f in self.GetFiles() if f[0].startswith("A")]
def Save(self):
"""Writes the changelist information to disk."""
data = json.dumps({
'issue': self.issue,
'patchset': self.patchset,
'needs_upload': self.NeedsUpload(),
'files': self.GetFiles(),
'description': self.description,
'rietveld': self.rietveld,
}, sort_keys=True, indent=2)
gclient_utils.FileWrite(GetChangelistInfoFile(self.name), data)
def Delete(self):
"""Removes the changelist information from disk."""
os.remove(GetChangelistInfoFile(self.name))
def RpcServer(self):
if not self._rpc_server:
if not self.rietveld:
ErrorExit(CODEREVIEW_SETTINGS_FILE_NOT_FOUND)
self._rpc_server = rietveld.CachingRietveld(self.rietveld, None, None)
return self._rpc_server
def CloseIssue(self):
"""Closes the Rietveld issue for this changelist."""
# Newer versions of Rietveld require us to pass an XSRF token to POST, so
# we fetch it from the server.
xsrf_token = self.SendToRietveld(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
# You cannot close an issue with a GET.
# We pass an empty string for the data so it is a POST rather than a GET.
data = [("description", self.description),
("xsrf_token", xsrf_token)]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/close' % self.issue, payload=body,
content_type=ctype)
def UpdateRietveldDescription(self):
"""Sets the description for an issue on Rietveld."""
data = [("description", self.description),]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/description' % self.issue, payload=body,
content_type=ctype)
self.needs_upload = False
def GetIssueDescription(self):
"""Returns the issue description from Rietveld."""
return self.SendToRietveld('/%d/description' % self.issue)
def UpdateDescriptionFromIssue(self):
"""Updates self.description with the issue description from Rietveld."""
self._desc = git_cl.ChangeDescription(
self.SendToRietveld('/%d/description' % self.issue))
def AddComment(self, comment):
"""Adds a comment for an issue on Rietveld.
As a side effect, this will email everyone associated with the issue."""
return self.RpcServer().add_comment(self.issue, comment)
def PrimeLint(self):
"""Do background work on Rietveld to lint the file so that the results are
ready when the issue is viewed."""
if self.issue and self.patchset:
self.SendToRietveld('/lint/issue%s_%s' % (self.issue, self.patchset),
timeout=10)
def SendToRietveld(self, request_path, timeout=None, **kwargs):
"""Send a POST/GET to Rietveld. Returns the response body."""
try:
return self.RpcServer().Send(request_path, timeout=timeout, **kwargs)
except urllib2.URLError:
if timeout is None:
ErrorExit('Error accessing url %s' % request_path)
else:
return None
def MissingTests(self):
"""Returns True if the change looks like it needs unit tests but has none.
A change needs unit tests if it contains any new source files or methods.
"""
SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
# Ignore third_party entirely.
files = [f for f in self._NonDeletedFileList()
if f.find("third_party") == -1]
added_files = [f for f in self._AddedFileList()
if f.find("third_party") == -1]
# If the change is entirely in third_party, we're done.
if len(files) == 0:
return False
# Any new or modified test files?
# A test file's name ends with "test.*" or "tests.*".
test_files = [test for test in files
if os.path.splitext(test)[0].rstrip("s").endswith("test")]
if len(test_files) > 0:
return False
# Any new source files?
source_files = [item for item in added_files
if os.path.splitext(item)[1] in SOURCE_SUFFIXES]
if len(source_files) > 0:
return True
# Do the long test, checking the files for new methods.
return self._HasNewMethod()
def _HasNewMethod(self):
"""Returns True if the changeset contains any new functions, or if a
function signature has been changed.
A function is identified by starting flush left, containing a "(" before
the next flush-left line, and either ending with "{" before the next
flush-left line or being followed by an unindented "{".
Currently this returns True for new methods, new static functions, and
methods or functions whose signatures have been changed.
Inline methods added to header files won't be detected by this. That's
acceptable for purposes of determining if a unit test is needed, since
inline methods should be trivial.
"""
# To check for methods added to source or header files, we need the diffs.
# We'll generate them all, since there aren't likely to be many files
# apart from source and headers; besides, we'll want them all if we're
# uploading anyway.
if self.patch is None:
self.patch = GenerateDiff(self.GetFileNames())
definition = ""
for line in self.patch.splitlines():
if not line.startswith("+"):
continue
line = line.strip("+").rstrip(" \t")
# Skip empty lines, comments, and preprocessor directives.
# TODO(pamg): Handle multiline comments if it turns out to be a problem.
if line == "" or line.startswith("/") or line.startswith("#"):
continue
# A possible definition ending with "{" is complete, so check it.
if definition.endswith("{"):
if definition.find("(") != -1:
return True
definition = ""
# A { or an indented line, when we're in a definition, continues it.
if (definition != "" and
(line == "{" or line.startswith(" ") or line.startswith("\t"))):
definition += line
# A flush-left line starts a new possible function definition.
elif not line.startswith(" ") and not line.startswith("\t"):
definition = line
return False
@staticmethod
def Load(changename, local_root, fail_on_not_found, update_status):
"""Gets information about a changelist.
Args:
fail_on_not_found: if True, this function will quit the program if the
changelist doesn't exist.
update_status: if True, the svn status will be updated for all the files
and unchanged files will be removed.
Returns: a ChangeInfo object.
"""
info_file = GetChangelistInfoFile(changename)
if not os.path.exists(info_file):
if fail_on_not_found:
ErrorExit("Changelist " + changename + " not found.")
return ChangeInfo(changename, 0, 0, '', None, local_root, None, False)
content = gclient_utils.FileRead(info_file)
save = False
try:
values = ChangeInfo._LoadNewFormat(content)
except ValueError:
try:
values = ChangeInfo._LoadOldFormat(content)
save = True
except ValueError:
ErrorExit(
('Changelist file %s is corrupt.\n'
'Either run "gcl delete %s" or manually edit the file') % (
info_file, changename))
files = values['files']
if update_status:
for item in files[:]:
status_result = SVN.CaptureStatus(item[1], local_root)
if not status_result or not status_result[0][0]:
# File has been reverted.
save = True
files.remove(item)
continue
status = status_result[0][0]
if status != item[0]:
save = True
files[files.index(item)] = (status, item[1])
change_info = ChangeInfo(
changename,
values['issue'],
values['patchset'],
values['description'],
files,
local_root,
values.get('rietveld'),
values['needs_upload'])
if save:
change_info.Save()
return change_info
@staticmethod
def _LoadOldFormat(content):
# The info files have the following format:
# issue_id, patchset\n (, patchset is optional)
# SEPARATOR\n
# filepath1\n
# filepath2\n
# .
# .
# filepathn\n
# SEPARATOR\n
# description
split_data = content.split(ChangeInfo.SEPARATOR, 2)
if len(split_data) != 3:
raise ValueError('Bad change format')
values = {
'issue': 0,
'patchset': 0,
'needs_upload': False,
'files': [],
}
items = split_data[0].split(', ')
if items[0]:
values['issue'] = int(items[0])
if len(items) > 1:
values['patchset'] = int(items[1])
if len(items) > 2:
values['needs_upload'] = (items[2] == "dirty")
for line in split_data[1].splitlines():
status = line[:7]
filename = line[7:]
values['files'].append((status, filename))
values['description'] = split_data[2]
return values
@staticmethod
def _LoadNewFormat(content):
return json.loads(content)
def __str__(self):
out = ['%s:' % self.__class__.__name__]
for k in dir(self):
if k.startswith('__'):
continue
v = getattr(self, k)
if v is self or callable(getattr(self, k)):
continue
out.append(' %s: %r' % (k, v))
return '\n'.join(out)
def GetChangelistInfoFile(changename):
"""Returns the file that stores information about a changelist."""
if not changename or re.search(r'[^\w-]', changename):
ErrorExit("Invalid changelist name: " + changename)
return os.path.join(GetChangesDir(), changename)
def LoadChangelistInfoForMultiple(changenames, local_root, fail_on_not_found,
update_status):
"""Loads many changes and merge their files list into one pseudo change.
This is mainly usefull to concatenate many changes into one for a 'gcl try'.
"""
changes = changenames.split(',')
aggregate_change_info = ChangeInfo(
changenames, 0, 0, '', None, local_root, None, False)
for change in changes:
aggregate_change_info._files += ChangeInfo.Load(
change, local_root, fail_on_not_found, update_status).GetFiles()
return aggregate_change_info
def GetCLs():
"""Returns a list of all the changelists in this repository."""
cls = os.listdir(GetChangesDir())
if CODEREVIEW_SETTINGS_FILE in cls:
cls.remove(CODEREVIEW_SETTINGS_FILE)
return cls
def GenerateChangeName():
"""Generate a random changelist name."""
random.seed()
current_cl_names = GetCLs()
while True:
cl_name = (random.choice(string.ascii_lowercase) +
random.choice(string.digits) +
random.choice(string.ascii_lowercase) +
random.choice(string.digits))
if cl_name not in current_cl_names:
return cl_name
def GetModifiedFiles():
"""Returns a set that maps from changelist name to (status,filename) tuples.
Files not in a changelist have an empty changelist name. Filenames are in
relation to the top level directory of the current repository. Note that
only the current directory and subdirectories are scanned, in order to
improve performance while still being flexible.
"""
files = {}
# Since the files are normalized to the root folder of the repositary, figure
# out what we need to add to the paths.
dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
# Get a list of all files in changelists.
files_in_cl = {}
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
for status, filename in change_info.GetFiles():
files_in_cl[filename] = change_info.name
# Get all the modified files down the current directory.
for line in SVN.CaptureStatus(None, os.getcwd()):
status = line[0]
filename = line[1]
if status[0] == "?":
continue
if dir_prefix:
filename = os.path.join(dir_prefix, filename)
change_list_name = ""
if filename in files_in_cl:
change_list_name = files_in_cl[filename]
files.setdefault(change_list_name, []).append((status, filename))
return files
def GetFilesNotInCL():
"""Returns a list of tuples (status,filename) that aren't in any changelists.
See docstring of GetModifiedFiles for information about path of files and
which directories are scanned.
"""
modified_files = GetModifiedFiles()
if "" not in modified_files:
return []
return modified_files[""]
def ListFiles(show_unknown_files):
files = GetModifiedFiles()
cl_keys = files.keys()
cl_keys.sort()
for cl_name in cl_keys:
if not cl_name:
continue
note = ""
change_info = ChangeInfo.Load(cl_name, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
if len(change_info.GetFiles()) != len(files[cl_name]):
note = " (Note: this changelist contains files outside this directory)"
print "\n--- Changelist " + cl_name + note + ":"
for filename in files[cl_name]:
print "".join(filename)
if show_unknown_files:
unknown_files = UnknownFiles()
if (files.get('') or (show_unknown_files and len(unknown_files))):
print "\n--- Not in any changelist:"
for item in files.get('', []):
print "".join(item)
if show_unknown_files:
for filename in unknown_files:
print "? %s" % filename
return 0
def GenerateDiff(files):
return SVN.GenerateDiff(
files, GetRepositoryRoot(), full_move=False, revision=None)
def OptionallyDoPresubmitChecks(change_info, committing, args):
if FilterFlag(args, "--no_presubmit") or FilterFlag(args, "--force"):
breakpad.SendStack(
breakpad.DEFAULT_URL + '/breakpad',
'GclHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing' %
(change_info.rietveld, change_info.issue),
verbose=False)
return presubmit_support.PresubmitOutput()
return DoPresubmitChecks(change_info, committing, True)
def defer_attributes(a, b):
"""Copy attributes from an object (like a function) to another."""
for x in dir(a):
if not getattr(b, x, None):
setattr(b, x, getattr(a, x))
def need_change(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not len(args) == 1:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(), True, True)
return function(change_info)
defer_attributes(function, hook)
hook.need_change = True
hook.no_args = True
return hook
def need_change_and_args(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not args:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
return function(change_info, args)
defer_attributes(function, hook)
hook.need_change = True
return hook
def no_args(function):
"""Make sure no args are passed."""
# pylint: disable=W0612,W0621
def hook(args):
if args:
ErrorExit("Doesn't support arguments")
return function()
defer_attributes(function, hook)
hook.no_args = True
return hook
def attrs(**kwargs):
"""Decorate a function with new attributes."""
def decorate(function):
for k in kwargs:
setattr(function, k, kwargs[k])
return function
return decorate
@no_args
def CMDopened():
"""Lists modified files in the current directory down."""
return ListFiles(False)
@no_args
def CMDstatus():
"""Lists modified and unknown files in the current directory down."""
return ListFiles(True)
@need_change_and_args
@attrs(usage='[--no_presubmit] [--no_watchlists]')
def CMDupload(change_info, args):
"""Uploads the changelist to the server for review.
This does not submit a try job; use gcl try to submit a try job.
"""
if '-s' in args or '--server' in args:
ErrorExit('Don\'t use the -s flag, fix codereview.settings instead')
if not change_info.GetFiles():
print "Nothing to upload, changelist is empty."
return 0
output = OptionallyDoPresubmitChecks(change_info, False, args)
if not output.should_continue():
return 1
no_watchlists = (FilterFlag(args, "--no_watchlists") or
FilterFlag(args, "--no-watchlists"))
# Map --send-mail to --send_mail
if FilterFlag(args, "--send-mail"):
args.append("--send_mail")
# Replace -m with -t and --message with --title, but make sure to
# preserve anything after the -m/--message.
found_deprecated_arg = [False]
def replace_message(a):
if a.startswith('-m'):
found_deprecated_arg[0] = True
return '-t' + a[2:]
elif a.startswith('--message'):
found_deprecated_arg[0] = True
return '--title' + a[9:]
return a
args = map(replace_message, args)
if found_deprecated_arg[0]:
print >> sys.stderr, (
'\nWARNING: Use -t or --title to set the title of the patchset.\n'
'In the near future, -m or --message will send a message instead.\n'
'See http://goo.gl/JGg0Z for details.\n')
upload_arg = ["upload.py", "-y"]
upload_arg.append("--server=%s" % change_info.rietveld)
reviewers = change_info.get_reviewers() or output.reviewers
if (reviewers and
not any(arg.startswith('-r') or arg.startswith('--reviewer') for
arg in args)):
upload_arg.append('--reviewers=%s' % ','.join(reviewers))
upload_arg.extend(args)
desc_file = None
try:
if change_info.issue:
# Uploading a new patchset.
upload_arg.append("--issue=%d" % change_info.issue)
if not any(i.startswith('--title') or i.startswith('-t') for i in args):
upload_arg.append('--title= ')
else:
# First time we upload.
handle, desc_file = tempfile.mkstemp(text=True)
os.write(handle, change_info.description)
os.close(handle)
# Watchlist processing -- CC people interested in this changeset
# http://dev.chromium.org/developers/contributing-code/watchlists
if not no_watchlists:
import watchlists
watchlist = watchlists.Watchlists(change_info.GetLocalRoot())
watchers = watchlist.GetWatchersForPaths(change_info.GetFileNames())
cc_list = GetCodeReviewSetting("CC_LIST")
if not no_watchlists and watchers:
# Filter out all empty elements and join by ','
cc_list = ','.join(filter(None, [cc_list] + watchers))
if cc_list:
upload_arg.append("--cc=" + cc_list)
upload_arg.append("--file=%s" % desc_file)
if GetCodeReviewSetting("PRIVATE") == "True":
upload_arg.append("--private")
# If we have a lot of files with long paths, then we won't be able to fit
# the command to "svn diff". Instead, we generate the diff manually for
# each file and concatenate them before passing it to upload.py.
if change_info.patch is None:
change_info.patch = GenerateDiff(change_info.GetFileNames())
# Change the current working directory before calling upload.py so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
try:
issue, patchset = upload.RealMain(upload_arg, change_info.patch)
except KeyboardInterrupt:
sys.exit(1)
if issue and patchset:
change_info.issue = int(issue)
change_info.patchset = int(patchset)
change_info.Save()
change_info.PrimeLint()
finally:
os.chdir(previous_cwd)
finally:
if desc_file:
os.remove(desc_file)
print "*** Upload does not submit a try; use gcl try to submit a try. ***"
return 0
@need_change_and_args
@attrs(usage='[--upload]')
def CMDpresubmit(change_info, args):
"""Runs presubmit checks on the change.
The actual presubmit code is implemented in presubmit_support.py and looks
for PRESUBMIT.py files."""
if not change_info.GetFiles():
print('Nothing to presubmit check, changelist is empty.')
return 0
parser = optparse.OptionParser()
parser.add_option('--upload', action='store_true')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % args)
if options.upload:
print('*** Presubmit checks for UPLOAD would report: ***')
return not DoPresubmitChecks(change_info, False, False)
else:
print('*** Presubmit checks for COMMIT would report: ***')
return not DoPresubmitChecks(change_info, True, False)
def TryChange(change_info, args, swallow_exception):
"""Create a diff file of change_info and send it to the try server."""
try:
import trychange
except ImportError:
if swallow_exception:
return 1
ErrorExit("You need to install trychange.py to use the try server.")
trychange_args = []
if change_info:
trychange_args.extend(['--name', change_info.name])
if change_info.issue:
trychange_args.extend(["--issue", str(change_info.issue)])
if change_info.patchset:
trychange_args.extend(["--patchset", str(change_info.patchset)])
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
else:
change = None
trychange_args.extend(args)
return trychange.TryChange(
trychange_args,
change=change,
swallow_exception=swallow_exception,
prog='gcl try',
extra_epilog='\n'
'When called from gcl, use the format gcl try <change_name>.\n')
@need_change_and_args
@attrs(usage='[--no_presubmit]')
def CMDcommit(change_info, args):
"""Commits the changelist to the repository."""
if not change_info.GetFiles():
print "Nothing to commit, changelist is empty."
return 1
# OptionallyDoPresubmitChecks has a side-effect which eats these flags.
bypassed = '--no_presubmit' in args or '--force' in args
output = OptionallyDoPresubmitChecks(change_info, True, args)
if not output.should_continue():
return 1
# We face a problem with svn here: Let's say change 'bleh' modifies
# svn:ignore on dir1\. but another unrelated change 'pouet' modifies
# dir1\foo.cc. When the user `gcl commit bleh`, foo.cc is *also committed*.
# The only fix is to use --non-recursive but that has its issues too:
# Let's say if dir1 is deleted, --non-recursive must *not* be used otherwise
# you'll get "svn: Cannot non-recursively commit a directory deletion of a
# directory with child nodes". Yay...
commit_cmd = ["svn", "commit"]
if change_info.issue:
# Get the latest description from Rietveld.
change_info.UpdateDescriptionFromIssue()
commit_desc = git_cl.ChangeDescription(change_info.description)
if change_info.issue:
server = change_info.rietveld
if not server.startswith("http://") and not server.startswith("https://"):
server = "http://" + server
commit_desc.append_footer('Review URL: %s/%d' % (server, change_info.issue))
handle, commit_filename = tempfile.mkstemp(text=True)
os.write(handle, commit_desc.description)
os.close(handle)
try:
handle, targets_filename = tempfile.mkstemp(text=True)
os.write(handle, "\n".join(change_info.GetFileNames()))
os.close(handle)
try:
commit_cmd += ['--file=' + commit_filename]
commit_cmd += ['--targets=' + targets_filename]
# Change the current working directory before calling commit.
output = ''
try:
output = RunShell(commit_cmd, True)
except subprocess2.CalledProcessError, e:
ErrorExit('Commit failed.\n%s' % e)
finally:
os.remove(commit_filename)
finally:
os.remove(targets_filename)
if output.find("Committed revision") != -1:
change_info.Delete()
if change_info.issue:
revision = re.compile(".*?\nCommitted revision (\d+)",
re.DOTALL).match(output).group(1)
viewvc_url = GetCodeReviewSetting('VIEW_VC')
if viewvc_url and revision:
change_info.append_footer('Committed: ' + viewvc_url + revision)
elif revision:
change_info.append_footer('Committed: ' + revision)
change_info.CloseIssue()
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d manually as r%s" % (patch_num, revision)
comment += ' (presubmit successful).' if not bypassed else '.'
change_info.AddComment(comment)
return 0
def CMDchange(args):
"""Creates or edits a changelist.
Only scans the current directory and subdirectories.
"""
# Verify the user is running the change command from a read-write checkout.
svn_info = SVN.CaptureLocalInfo([], '.')
if not svn_info:
ErrorExit("Current checkout is unversioned. Please retry with a versioned "
"directory.")
if len(args) == 0:
# Generate a random changelist name.
changename = GenerateChangeName()
elif args[0] == '--force':
changename = GenerateChangeName()
else:
changename = args[0]
change_info = ChangeInfo.Load(changename, GetRepositoryRoot(), False, True)
if len(args) == 2:
if not os.path.isfile(args[1]):
ErrorExit('The change "%s" doesn\'t exist.' % args[1])
f = open(args[1], 'rU')
override_description = f.read()
f.close()
else:
override_description = None
if change_info.issue and not change_info.NeedsUpload():
try:
description = change_info.GetIssueDescription()
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = 0
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
if override_description:
description = override_description
else:
description = change_info.description
other_files = GetFilesNotInCL()
# Edited files (as opposed to files with only changed properties) will have
# a letter for the first character in the status string.
file_re = re.compile(r"^[a-z].+\Z", re.IGNORECASE)
affected_files = [x for x in other_files if file_re.match(x[0])]
unaffected_files = [x for x in other_files if not file_re.match(x[0])]
description = description.rstrip() + '\n'
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + change_info.GetLocalRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.GetFiles()]))
if change_info.Exists():
text += (separator2 +
'\n'.join([f[0] + f[1] for f in affected_files]) + '\n')
else:
text += ('\n'.join([f[0] + f[1] for f in affected_files]) + '\n' +
separator2)
text += '\n'.join([f[0] + f[1] for f in unaffected_files]) + '\n'
result = gclient_utils.RunEditor(text, False)
if not result:
ErrorExit('Running editor failed')
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n%r" % result)
# Update the CL description if it has changed.
new_description = split_result[0]
cl_files_text = split_result[1]
if new_description != description or override_description:
change_info.force_description(new_description)
new_cl_files = []
for line in cl_files_text.splitlines():
if not len(line):
continue
if line.startswith("---"):
break
status = line[:7]
filename = line[7:]
new_cl_files.append((status, filename))
if (not len(change_info.GetFiles()) and not change_info.issue and
not len(new_description) and not new_cl_files):
ErrorExit("Empty changelist not saved")
change_info._files = new_cl_files
change_info.Save()
if svn_info.get('URL', '').startswith('http:'):
Warn("WARNING: Creating CL in a read-only checkout. You will need to "
"commit using a commit queue!")
print change_info.name + " changelist saved."
if change_info.MissingTests():
Warn("WARNING: " + MISSING_TEST_MSG)
# Update the Rietveld issue.
if change_info.issue and change_info.NeedsUpload():
change_info.UpdateRietveldDescription()
change_info.Save()
return 0
@need_change_and_args
def CMDlint(change_info, args):
"""Runs cpplint.py on all the files in the change list.
Checks all the files in the changelist for possible style violations.
"""
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
ErrorExit("You need to install cpplint.py to lint C++ files.")
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
# Process cpplints arguments if any.
filenames = cpplint.ParseArguments(args + change_info.GetFileNames())
white_list = GetCodeReviewSetting("LINT_REGEX")
if not white_list:
white_list = DEFAULT_LINT_REGEX
white_regex = re.compile(white_list)
black_list = GetCodeReviewSetting("LINT_IGNORE_REGEX")
if not black_list:
black_list = DEFAULT_LINT_IGNORE_REGEX
black_regex = re.compile(black_list)
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
return 1
def DoPresubmitChecks(change_info, committing, may_prompt):
"""Imports presubmit, then calls presubmit.DoPresubmitChecks."""
root_presubmit = GetCachedFile('PRESUBMIT.py', use_root=True)
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
output = presubmit_support.DoPresubmitChecks(
change=change,
committing=committing,
verbose=False,
output_stream=sys.stdout,
input_stream=sys.stdin,
default_presubmit=root_presubmit,
may_prompt=may_prompt,
rietveld_obj=change_info.RpcServer())
if not output.should_continue() and may_prompt:
# TODO(dpranke): move into DoPresubmitChecks(), unify cmd line args.
print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
return output
@no_args
def CMDchanges():
"""Lists all the changelists and their files."""
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
print "\n--- Changelist " + change_info.name + ":"
for filename in change_info.GetFiles():
print "".join(filename)
return 0
@no_args
def CMDdeleteempties():
"""Delete all changelists that have no files."""
print "\n--- Deleting:"
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
if not len(change_info.GetFiles()):
print change_info.name
change_info.Delete()
return 0
@no_args
def CMDnothave():
"""Lists files unknown to Subversion."""
for filename in UnknownFiles():
print "? " + "".join(filename)
return 0
@attrs(usage='<svn options>')
def CMDdiff(args):
"""Diffs all files in the changelist or all files that aren't in a CL."""
files = None
if args:
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
files = change_info.GetFileNames()
else:
files = [f[1] for f in GetFilesNotInCL()]
root = GetRepositoryRoot()
cmd = ['svn', 'diff']
cmd.extend([os.path.join(root, x) for x in files])
cmd.extend(args)
return RunShellWithReturnCode(cmd, print_output=True)[1]
@no_args
def CMDsettings():
"""Prints code review settings for this checkout."""
# Force load settings
GetCodeReviewSetting("UNKNOWN")
del CODEREVIEW_SETTINGS['__just_initialized']
print '\n'.join(("%s: %s" % (str(k), str(v))
for (k,v) in CODEREVIEW_SETTINGS.iteritems()))
return 0
@need_change
def CMDdescription(change_info):
"""Prints the description of the specified change to stdout."""
print change_info.description
return 0
def CMDdelete(args):
"""Deletes a changelist."""
if not len(args) == 1:
ErrorExit('You need to pass a change list name')
filepath = GetChangelistInfoFile(args[0])
if not os.path.isfile(filepath):
ErrorExit('You need to pass a valid change list name')
os.remove(filepath)
return 0
def CMDtry(args):
"""Sends the change to the tryserver to do a test run on your code.
To send multiple changes as one path, use a comma-separated list of
changenames. Use 'gcl help try' for more information!"""
# When the change contains no file, send the "changename" positional
# argument to trychange.py.
# When the command is 'try' and --patchset is used, the patch to try
# is on the Rietveld server.
if not args:
ErrorExit("You need to pass a change list name")
if args[0].find(',') != -1:
change_info = LoadChangelistInfoForMultiple(args[0], GetRepositoryRoot(),
True, True)
else:
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(),
True, True)
if change_info.GetFiles():
args = args[1:]
else:
change_info = None
return TryChange(change_info, args, swallow_exception=False)
@attrs(usage='<old-name> <new-name>')
def CMDrename(args):
"""Renames an existing change."""
if len(args) != 2:
ErrorExit("Usage: gcl rename <old-name> <new-name>.")
src, dst = args
src_file = GetChangelistInfoFile(src)
if not os.path.isfile(src_file):
ErrorExit("Change '%s' does not exist." % src)
dst_file = GetChangelistInfoFile(dst)
if os.path.isfile(dst_file):
ErrorExit("Change '%s' already exists; pick a new name." % dst)
os.rename(src_file, dst_file)
print "Change '%s' renamed '%s'." % (src, dst)
return 0
def CMDpassthru(args):
"""Everything else that is passed into gcl we redirect to svn.
It assumes a change list name is passed and is converted with the files names.
"""
if not args or len(args) < 2:
ErrorExit("You need to pass a change list name for this svn fall-through "
"command")
cl_name = args[1]
args = ["svn", args[0]]
if len(args) > 1:
root = GetRepositoryRoot()
change_info = ChangeInfo.Load(cl_name, root, True, True)
args.extend([os.path.join(root, x) for x in change_info.GetFileNames()])
return RunShellWithReturnCode(args, print_output=True)[1]
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
def GenUsage(command):
"""Modify an OptParse object with the function's documentation."""
obj = Command(command)
display = command
more = getattr(obj, 'usage', '')
if command == 'help':
display = '<command>'
need_change_val = ''
if getattr(obj, 'need_change', None):
need_change_val = ' <change_list>'
options = ' [options]'
if getattr(obj, 'no_args', None):
options = ''
res = 'Usage: gcl %s%s%s %s\n\n' % (display, need_change_val, options, more)
res += re.sub('\n ', '\n', obj.__doc__)
return res
def CMDhelp(args):
"""Prints this help or help for the given command."""
if args and 'CMD' + args[0] in dir(sys.modules[__name__]):
print GenUsage(args[0])
# These commands defer to external tools so give this info too.
if args[0] == 'try':
TryChange(None, ['--help'], swallow_exception=False)
if args[0] == 'upload':
upload.RealMain(['upload.py', '--help'])
return 0
print GenUsage('help')
print sys.modules[__name__].__doc__
print 'version ' + __version__ + '\n'
print('Commands are:\n' + '\n'.join([
' %-12s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
return 0
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
if not argv:
argv = ['help']
command = Command(argv[0])
# Help can be run from anywhere.
if command == CMDhelp:
return command(argv[1:])
try:
GetRepositoryRoot()
except (gclient_utils.Error, subprocess2.CalledProcessError):
print >> sys.stderr, 'To use gcl, you need to be in a subversion checkout.'
return 1
# Create the directories where we store information about changelists if it
# doesn't exist.
try:
if not os.path.exists(GetInfoDir()):
os.mkdir(GetInfoDir())
if not os.path.exists(GetChangesDir()):
os.mkdir(GetChangesDir())
if not os.path.exists(GetCacheDir()):
os.mkdir(GetCacheDir())
if command:
return command(argv[1:])
# Unknown command, try to pass that to svn
return CMDpassthru(argv)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Got an exception'
print >> sys.stderr, str(e)
return 1
except upload.ClientLoginError, e:
print >> sys.stderr, 'Got an exception logging in to Rietveld'
print >> sys.stderr, str(e)
return 1
except urllib2.HTTPError, e:
if e.code != 500:
raise
print >> sys.stderr, (
'AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e))
return 1
if __name__ == "__main__":
fix_encoding.fix_encoding()
sys.exit(main(sys.argv[1:]))
|
bsd-3-clause
| 6,702,259,805,813,352,000
| 32.757844
| 80
| 0.648548
| false
| 3.736711
| true
| false
| false
|
supersteph/supersteph.github.io
|
word2vec_as_MF.py
|
1
|
12494
|
import matplotlib.pyplot as plt
import os
import csv
import pickle
import operator
import numpy as np
from numpy.linalg import svd, qr
from scipy.spatial.distance import cosine
from scipy.sparse.linalg import svds
class Word2vecMF(object):
def __init__(self):
"""
Main class for working with word2vec as MF.
D -- word-context co-occurrence matrix;
B -- such matrix that B_cw = k*(#c)*(#w)/|D|;
C, W -- factors of matrix D decomposition;
vocab -- vocabulary of words from data;
inv_vocab -- inverse of dictionary.
"""
self.D = None
self.B = None
self.C = None
self.W = None
self.vocab = None
self.inv_vocab = None
############ Create training corpus from raw sentences ################
def create_vocabulary(self, data, r):
"""
Create a vocabulary from a list of sentences,
eliminating words which occur less than r times.
"""
prevocabulary = {}
for sentence in data:
for word in sentence:
if not prevocabulary.has_key(word):
prevocabulary[word] = 1
else:
prevocabulary[word] += 1
vocabulary = {}
idx = 0
for word in prevocabulary:
if (prevocabulary[word] >= r):
vocabulary[word] = idx
idx += 1
return vocabulary
def create_matrix_D(self, data, window_size=5):
"""
Create a co-occurrence matrix D from training corpus.
"""
dim = len(self.vocab)
D = np.zeros((dim, dim))
s = window_size/2
for sentence in data:
l = len(sentence)
for i in xrange(l):
for j in xrange(max(0,i-s), min(i+s+1,l)):
if (i != j and self.vocab.has_key(sentence[i])
and self.vocab.has_key(sentence[j])):
c = self.vocab[sentence[j]]
w = self.vocab[sentence[i]]
D[c][w] += 1
return D
def create_matrix_B(self, k):
"""
Create matrix B (defined in init).
"""
c_ = self.D.sum(axis=1)
w_ = self.D.sum(axis=0)
P = self.D.sum()
w_v, c_v = np.meshgrid(w_, c_)
B = k*(w_v*c_v)/float(P)
return B
######################### Necessary functions #########################
def sigmoid(self, X):
"""
Sigmoid function sigma(x)=1/(1+e^{-x}) of matrix X.
"""
Y = X.copy()
Y[X>20] = 1-1e-6
Y[X<-20] = 1e-6
Y[(X<20)&(X>-20)] = 1 / (1 + np.exp(-X[(X<20)&(X>-20)]))
return Y
def sigma(self, x):
"""
Sigmoid function of element x.
"""
if (x>20):
return 1-1e-6
if (x<-20):
return 1e-6
else:
return 1 / (1 + np.exp(-x))
def MF(self, C, W):
"""
Objective MF(D,C^TW) we want to minimize.
"""
X = C.T.dot(W)
MF = self.D*np.log(self.sigmoid(X)) + self.B*np.log(self.sigmoid(-X))
return -MF.mean()
def grad_MF(self, C, W):
"""
Gradient of the functional MF(D,C^TW) over C^TW.
"""
X = C.T.dot(W)
grad = self.D*self.sigmoid(-X) - self.B*self.sigmoid(X)
return grad
################# Alternating minimization algorithm ##################
def alt_min(self, eta=1e-7, d=100, MAX_ITER=1, from_iter=0, display=0,
init=(False, None, None), save=(False, None)):
"""
Alternating mimimization algorithm for word2vec matrix factorization.
"""
# Initialization
if (init[0]):
self.C = init[1]
self.W = init[2]
else:
self.C = np.random.rand(d, self.D.shape[0])
self.W = np.random.rand(d, self.D.shape[1])
if (save[0] and from_iter==0):
self.save_CW(save[1], 0)
for it in xrange(from_iter, from_iter+MAX_ITER):
if (display):
print "Iter #:", it+1
gradW = (self.C).dot(self.grad_MF(self.C, self.W))
self.W = self.W + eta*gradW
gradC = self.W.dot(self.grad_MF(self.C, self.W).T)
self.C = self.C + eta*gradC
if (save[0]):
self.save_CW(save[1], it+1)
#################### Projector splitting algorithm ####################
def projector_splitting(self, eta=5e-6, d=100,
MAX_ITER=1, from_iter=0, display=0,
init=(False, None, None), save=(False, None)):
"""
Projector splitting algorithm for word2vec matrix factorization.
"""
# Initialization
if (init[0]):
self.C = init[1]
self.W = init[2]
else:
self.C = np.random.rand(d, self.D.shape[0])
self.W = np.random.rand(d, self.D.shape[1])
if (save[0] and from_iter==0):
self.save_CW(save[1], 0)
X = (self.C).T.dot(self.W)
for it in xrange(from_iter, from_iter+MAX_ITER):
if (display):
print "Iter #:", it+1
U, S, V = svds(X, d)
S = np.diag(S)
V = V.T
self.C = U.dot(np.sqrt(S)).T
self.W = np.sqrt(S).dot(V.T)
if (save[0]):
self.save_CW(save[1], it+1)
F = self.grad_MF(self.C, self.W)
#mask = np.random.binomial(1, .5, size=F.shape)
#F = F * mask
U, _ = qr((X + eta*F).dot(V))
V, S = qr((X + eta*F).T.dot(U))
V = V.T
S = S.T
X = U.dot(S).dot(V)
def stochastic_ps(self, eta=5e-6, batch_size=100, d=100,
MAX_ITER=1, from_iter=0, display=0,
init=(False, None, None), save=(False, None)):
"""
Stochastic version of projector splitting."
"""
# Initialization
if (init[0]):
self.C = init[1]
self.W = init[2]
else:
self.C = np.random.rand(d, self.D.shape[0])
self.W = np.random.rand(d, self.D.shape[1])
if (save[0] and from_iter==0):
self.save_CW(save[1], 0)
pw = self.D.sum(axis=0) / self.D.sum()
pc_w = self.D / self.D.sum(axis=0)
X = (self.C).T.dot(self.W)
for it in xrange(from_iter, from_iter+MAX_ITER):
if (display):
print "Iter #:", it+1
U, S, V = svds(X, d)
S = np.diag(S)
V = V.T
self.C = U.dot(np.sqrt(S)).T
self.W = np.sqrt(S).dot(V.T)
if (save[0]):
self.save_CW(save[1], it+1)
# Calculate stochastic gradient matrix
F = np.zeros_like(self.D)
words = np.random.choice(self.D.shape[1], batch_size, p=pw)
for w in words:
contexts = np.random.choice(self.D.shape[0], 4, p=pc_w[:,w])
for c in contexts:
F[c,w] += self.sigma(X[c, w])
negatives = np.random.choice(self.D.shape[0], 5, p=pw)
for c in negatives:
F[c,w] -= 0.2 * self.sigma(X[c, w])
U, _ = qr((X + eta*F).dot(V))
V, S = qr((X + eta*F).T.dot(U))
V = V.T
S = S.T
X = U.dot(S).dot(V)
#######################################################################
############################## Data flow ##############################
#######################################################################
########################## Data to Matrices ###########################
def data_to_matrices(self, sentences, r, k, to_file):
"""
Process raw sentences, create word dictionary, matrix D and matrix B
then save them to file.
"""
self.vocab = self.create_vocabulary(sentences, r)
self.D = self.create_matrix_D(sentences)
self.B = self.create_matrix_B(k)
sorted_vocab = sorted(self.vocab.items(), key=operator.itemgetter(1))
vocab_to_save = np.array([item[0] for item in sorted_vocab])
np.savez(open(to_file, 'wb'), vocab=vocab_to_save, D=self.D, B=self.B)
######################### Matrices to Factors ##########################
def load_matrices(self, from_file):
"""
Load word dictionary, matrix D and matrix B from file.
"""
matrices = np.load(open(from_file, 'rb'))
self.D = matrices['D']
self.B = matrices['B']
self.vocab = {}
for i, word in enumerate(matrices['vocab']):
self.vocab[word] = i
self.inv_vocab = {v: k for k, v in self.vocab.items()}
def save_CW(self, to_folder, iteration):
"""
Save factors C and W (from some iteration) to some folder.
"""
if not os.path.exists(to_folder):
os.makedirs(to_folder)
pref = str(iteration)
np.savez(open(to_folder+'/C'+pref+'.npz', 'wb'), C=self.C)
np.savez(open(to_folder+'/W'+pref+'.npz', 'wb'), W=self.W)
########################### Factors to MF #############################
def load_CW(self, from_folder, iteration):
"""
Load factors C and W (from some iteration) from folder.
"""
if not os.path.exists(from_folder):
raise NameError('No such directory')
pref = str(iteration)
C = np.load(open(from_folder+'/C'+pref+'.npz', 'rb'))['C']
W = np.load(open(from_folder+'/W'+pref+'.npz', 'rb'))['W']
return C, W
def factors_to_MF(self, from_folder, to_file, MAX_ITER, from_iter=0):
"""
Calculate MF for given sequence of factors C and W
and save result to some file.
"""
MFs = np.zeros(MAX_ITER)
for it in xrange(from_iter, from_iter+MAX_ITER):
C, W = self.load_CW(from_folder, it)
MFs[it-from_iter] = self.MF(C, W)
np.savez(open(to_file, 'wb'), MF=MFs)
############################ MF to Figures ############################
def load_MF(self, from_file):
"""
Load MFs from file.
"""
MFs = np.load(open(from_file), 'rb')['MF']
return MFs
#######################################################################
######################### Linquistic metrics ##########################
#######################################################################
def word_vector(self, word, W):
"""
Get vector representation of a word.
"""
if word in self.vocab:
vec = W[:,int(self.vocab[word])]
else:
print "No such word in vocabulary."
vec = None
return vec
def nearest_words(self, word, top=20, display=False):
"""
Find the nearest words to the word
according to the cosine similarity.
"""
W = self.W / np.linalg.norm(self.W, axis=0)
if (type(word)==str):
vec = self.word_vector(word, W)
else:
vec = word / np.linalg.norm(word)
cosines = (vec.T).dot(W)
args = np.argsort(cosines)[::-1]
nws = []
for i in xrange(1, top+1):
nws.append(self.inv_vocab[args[i]])
if (display):
print self.inv_vocab[args[i]], round(cosines[args[i]],3)
return nws
|
mit
| 4,738,436,276,298,297,000
| 30.08209
| 78
| 0.421722
| false
| 3.830166
| false
| false
| false
|
logston/pester
|
models.py
|
1
|
5903
|
import re
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
# phone number regex
pnum_pattern = re.compile(r'[0-9]{10}')
def validate_pnum(pnum):
"""Raise validation error if not a 10 digit phone number"""
if not re.match(pnum_pattern, pnum):
raise ValidationError(u'%s is not a valid phone number'%pnum)
class API(models.Model):
"""Model detialing the API params"""
name = models.CharField(max_length=32, unique=True)
key = models.CharField(max_length=200)
params = models.TextField(blank=True)
def __unicode__(self):
return self.name
class Carrier(models.Model):
"""Model connecting cellular SMS providers to email addresses"""
name = models.CharField(max_length=32)
gateway = models.CharField(max_length=64)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class User(models.Model):
"""Model describing a user of the Pester app"""
first_name = models.CharField(max_length=16)
last_name = models.CharField(max_length=16)
email = models.EmailField(unique=True)
join_date = models.DateTimeField(auto_now_add=True)
phone_number = models.CharField(
validators=[validate_pnum],
unique=True,
max_length=10)
carrier = models.ForeignKey(Carrier)
def __unicode__(self):
return (self.last_name+', '+self.first_name+
' -- e['+self.email+'] p['+self.phone_number+']')
class Recipient(models.Model):
"""Model decribing a potential recipient of a pester"""
first_name = models.CharField(max_length=16)
last_name = models.CharField(max_length=16)
email = models.EmailField(unique=True)
phone_number = models.CharField(
validators=[validate_pnum],
unique=True,
max_length=10)
carrier = models.ForeignKey(Carrier)
created_by = models.ForeignKey(User)
def __unicode__(self):
return (self.last_name+', '+self.first_name+
' -- e['+self.email+'] p['+self.phone_number+']')
class Pattern(models.Model):
"""Model describing a sending pattern for a Pestering"""
name = models.CharField(max_length=32)
description = models.CharField(max_length=256)
code = models.CharField(max_length=32)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class Pestering(models.Model):
"""Model describing a pestering from User to Recipient"""
user = models.ForeignKey(User)
recipient = models.ForeignKey(Recipient)
search_term = models.CharField(max_length=64)
pattern = models.ForeignKey(Pattern)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
NEITHER = 'N'
EMAIL = 'E'
TEXT = 'T'
BOTH = 'B'
NOTIFY_METHODS = (
(NEITHER, 'None!'),
(EMAIL, 'By Email'),
(TEXT, 'By Text'),
(BOTH, 'By Text and Email'),
)
notify_user_method = models.CharField(
max_length=1,
choices=NOTIFY_METHODS,
default=EMAIL)
notify_recipient_method = models.CharField(
max_length=1,
choices=NOTIFY_METHODS,
default=TEXT)
title = models.CharField(max_length=140)
OFF, MEDIUM, HIGH = 'O', 'M', 'H'
ADULT_LEVELS = (
(OFF, 'Off'),
(MEDIUM, 'Medium'),
(HIGH, 'High'))
adult_safety_level = models.CharField(
max_length=1,
choices=ADULT_LEVELS,
default=MEDIUM)
def __unicode__(self):
return ''.join((str(self.user.first_name),
' -> ',
str(self.recipient.first_name),
' | ',
str(self.search_term),
' | ',
str(self.pattern)))
class APICall(models.Model):
"""Model to record api calls"""
api = models.ForeignKey(API, null=True)
call_time = models.DateTimeField(auto_now_add=True)
pestering = models.ForeignKey(Pestering, null=True)
def __unicode__(self):
return str(self.api) + ' | ' + str(self.pestering.search_term)
class ImageData(models.Model):
"""Model describing """
search_term = models.CharField(max_length=64)
url = models.URLField(unique=True)
file_type = models.CharField(max_length=64)
width = models.PositiveSmallIntegerField()
height = models.PositiveSmallIntegerField()
OFF, MEDIUM, HIGH = 'O', 'M', 'H'
ADULT_LEVELS = (
(OFF, 'Off'),
(MEDIUM, 'Medium'),
(HIGH, 'High'))
adult_safety_level = models.CharField(
max_length=1,
choices=ADULT_LEVELS,
default=MEDIUM)
def __unicode__(self):
return self.search_term+' ('+self.url+')'
class PesteringManagerRun(models.Model):
"""Model to record cron jobs and their success"""
run_time = models.DateTimeField(auto_now_add=True)
completed = models.NullBooleanField()
def __unicode__(self):
return str(self.run_time)
class PesteringAttempt(models.Model):
"""Model to record attempted Pesterings"""
pestering = models.ForeignKey(Pestering)
pestering_manager_run = models.ForeignKey(PesteringManagerRun)
image = models.ForeignKey(ImageData, null=True, blank=True, default=None)
attempt_time = models.DateTimeField(auto_now_add=True)
success = models.NullBooleanField()
def __unicode__(self):
return str(self.pestering)+' sent at '+str(self.attempt_time)
class PesteringException(models.Model):
"""Model to record exceptions of Pesterings"""
pestering_attempt = models.ForeignKey(PesteringAttempt)
exception_traceback = models.TextField()
def __unicode__(self):
return 'Exception for Pestering Attempt '+str(self.pestering_attempt)
|
gpl-2.0
| -8,974,291,282,706,841,000
| 31.977654
| 77
| 0.623581
| false
| 3.738442
| false
| false
| false
|
GoogleCloudPlatform/mimus-game-simulator
|
mimus_cfg.py
|
1
|
3414
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=invalid-name
"""Config dictionary for mimus."""
import os
cfg = c = {}
# GCP Auth parameters
c['gcp'] = {}
c['gcp']['project'] = os.getenv('GCP_PROJECT', 'joeholley-mimus01')
# db api parameters (for choosing different db backends)
c['db_api'] = {}
c['db_api']['dir'] = 'db_api'
# db connection parameters
c['db_con'] = {}
c['db_con']['timeout'] = 30
# DB API Cloud Pub/Sub connection parameters
c['pubsub'] = {}
c['pubsub']['topic'] = os.getenv('DB_WORKER_TOPIC', 'queriestoprocess')
c['pubsub']['sub'] = os.getenv('DB_WORKER_SUB', 'dbworkersub')
# DB API Redis connection parameters
c['redis_con'] = {}
c['redis_con']['db'] = 0
# Docker env var format: REDIS_PORT=tcp://172.17.0.2:6379
redis_host, redis_port = os.getenv(
'REDIS_PORT', 'tcp://mimus-redis:6379').split('/')[-1].split(':')
c['redis_con']['hostname'] = redis_host
c['redis_con']['port'] = int(redis_port)
c['redis_con']['password'] = '9dc1b3ae-584c-434e-b899-da2c8ad093fb'
# Player parameters
c['player'] = {}
c['player']['initial_cards'] = {}
c['player']['initial_cards']['std'] = 5
c['player']['initial_cards']['stone'] = 1
c['player']['initial_loadout'] = {}
c['player']['initial_loadout']['stones'] = 5
c['player']['initial_loadout']['points'] = 1000
c['player']['initial_loadout']['slots'] = 50
c['player']['initial_loadout']['stamina'] = 5
# Card (unit) parameters
c['card'] = {}
c['card']['xp_limit'] = 10000 # Max XP for a unit
# Parameters for evolving ('consuming') cards
c['evolve'] = {}
c['evolve']['min_time'] = 3 # min time this action will take
c['evolve']['max_time'] = 3 # max time this action will take
c['evolve']['fail_time'] = 3 # time this action takes if it fails
c['evolve']['min_cards'] = 2 # min number of cards consumed
c['evolve']['max_cards'] = 5 # max number of cards consumed
# Parameters for leveling ('combining') cards
c['level'] = {}
c['level']['min_time'] = 3 # min time this action will take
c['level']['max_time'] = 3 # max time this action will take
c['level']['fail_time'] = 3 # time this action takes if it fails
c['level']['min_cards'] = 1 # min number of cards consumed
c['level']['max_cards'] = 5 # max number of cards consumed
# Stage parameters
c['stage'] = {}
c['stage']['min_time'] = 30 # min time this action will take
c['stage']['max_time'] = 90 # max time this action will take
c['stage']['fail_time'] = 30 # time this action takes if it fails
c['stage']['failure_chance'] = 0.90 # chance to simulate player failing stage
c['stage']['points_per_run'] = 10 # friends points earned per stage played
# Loot tables
c['loot_tables'] = {}
c['loot_tables']['std'] = {'drop_chance': 0.35, 'min': 1, 'max': 500}
c['loot_tables']['point'] = {'drop_chance': 1.00, 'min': 1, 'max': 750}
c['loot_tables']['stone'] = {'drop_chance': 1.00, 'min': 500, 'max': 1000}
|
apache-2.0
| 6,784,159,353,470,306,000
| 36.933333
| 78
| 0.654657
| false
| 3.075676
| false
| false
| false
|
akariv/redash
|
redash/handlers/api.py
|
1
|
6007
|
from flask_restful import Api
from werkzeug.wrappers import Response
from flask import make_response
from redash.utils import json_dumps
from redash.handlers.base import org_scoped_rule
from redash.handlers.alerts import AlertResource, AlertListResource, AlertSubscriptionListResource, AlertSubscriptionResource
from redash.handlers.dashboards import DashboardListResource, RecentDashboardsResource, DashboardResource, DashboardShareResource
from redash.handlers.data_sources import DataSourceTypeListResource, DataSourceListResource, DataSourceSchemaResource, DataSourceResource, DataSourcePauseResource
from redash.handlers.events import EventResource
from redash.handlers.queries import QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource
from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource
from redash.handlers.users import UserResource, UserListResource, UserInviteResource, UserResetPasswordResource
from redash.handlers.visualizations import VisualizationListResource
from redash.handlers.visualizations import VisualizationResource
from redash.handlers.widgets import WidgetResource, WidgetListResource
from redash.handlers.groups import GroupListResource, GroupResource, GroupMemberListResource, GroupMemberResource, \
GroupDataSourceListResource, GroupDataSourceResource
from redash.handlers.destinations import DestinationTypeListResource, DestinationResource, DestinationListResource
class ApiExt(Api):
def add_org_resource(self, resource, *urls, **kwargs):
urls = [org_scoped_rule(url) for url in urls]
return self.add_resource(resource, *urls, **kwargs)
api = ApiExt()
@api.representation('application/json')
def json_representation(data, code, headers=None):
# Flask-Restful checks only for flask.Response but flask-login uses werkzeug.wrappers.Response
if isinstance(data, Response):
return data
resp = make_response(json_dumps(data), code)
resp.headers.extend(headers or {})
return resp
api.add_org_resource(AlertResource, '/api/alerts/<alert_id>', endpoint='alert')
api.add_org_resource(AlertSubscriptionListResource, '/api/alerts/<alert_id>/subscriptions', endpoint='alert_subscriptions')
api.add_org_resource(AlertSubscriptionResource, '/api/alerts/<alert_id>/subscriptions/<subscriber_id>', endpoint='alert_subscription')
api.add_org_resource(AlertListResource, '/api/alerts', endpoint='alerts')
api.add_org_resource(DashboardListResource, '/api/dashboards', endpoint='dashboards')
api.add_org_resource(RecentDashboardsResource, '/api/dashboards/recent', endpoint='recent_dashboards')
api.add_org_resource(DashboardResource, '/api/dashboards/<dashboard_slug>', endpoint='dashboard')
api.add_org_resource(DashboardShareResource, '/api/dashboards/<dashboard_id>/share', endpoint='dashboard_share')
api.add_org_resource(DataSourceTypeListResource, '/api/data_sources/types', endpoint='data_source_types')
api.add_org_resource(DataSourceListResource, '/api/data_sources', endpoint='data_sources')
api.add_org_resource(DataSourceSchemaResource, '/api/data_sources/<data_source_id>/schema')
api.add_org_resource(DataSourcePauseResource, '/api/data_sources/<data_source_id>/pause')
api.add_org_resource(DataSourceResource, '/api/data_sources/<data_source_id>', endpoint='data_source')
api.add_org_resource(GroupListResource, '/api/groups', endpoint='groups')
api.add_org_resource(GroupResource, '/api/groups/<group_id>', endpoint='group')
api.add_org_resource(GroupMemberListResource, '/api/groups/<group_id>/members', endpoint='group_members')
api.add_org_resource(GroupMemberResource, '/api/groups/<group_id>/members/<user_id>', endpoint='group_member')
api.add_org_resource(GroupDataSourceListResource, '/api/groups/<group_id>/data_sources', endpoint='group_data_sources')
api.add_org_resource(GroupDataSourceResource, '/api/groups/<group_id>/data_sources/<data_source_id>', endpoint='group_data_source')
api.add_org_resource(EventResource, '/api/events', endpoint='events')
api.add_org_resource(QuerySearchResource, '/api/queries/search', endpoint='queries_search')
api.add_org_resource(QueryRecentResource, '/api/queries/recent', endpoint='recent_queries')
api.add_org_resource(QueryListResource, '/api/queries', endpoint='queries')
api.add_org_resource(QueryRefreshResource, '/api/queries/<query_id>/refresh', endpoint='query_refresh')
api.add_org_resource(QueryResource, '/api/queries/<query_id>', endpoint='query')
api.add_org_resource(QueryResultListResource, '/api/query_results', endpoint='query_results')
api.add_org_resource(QueryResultResource,
'/api/query_results/<query_result_id>',
'/api/queries/<query_id>/results.<filetype>',
'/api/queries/<query_id>/results/<query_result_id>.<filetype>',
endpoint='query_result')
api.add_org_resource(JobResource, '/api/jobs/<job_id>', endpoint='job')
api.add_org_resource(UserListResource, '/api/users', endpoint='users')
api.add_org_resource(UserResource, '/api/users/<user_id>', endpoint='user')
api.add_org_resource(UserInviteResource, '/api/users/<user_id>/invite', endpoint='user_invite')
api.add_org_resource(UserResetPasswordResource, '/api/users/<user_id>/reset_password', endpoint='user_reset_password')
api.add_org_resource(VisualizationListResource, '/api/visualizations', endpoint='visualizations')
api.add_org_resource(VisualizationResource, '/api/visualizations/<visualization_id>', endpoint='visualization')
api.add_org_resource(WidgetListResource, '/api/widgets', endpoint='widgets')
api.add_org_resource(WidgetResource, '/api/widgets/<int:widget_id>', endpoint='widget')
api.add_org_resource(DestinationTypeListResource, '/api/destinations/types', endpoint='destination_types')
api.add_org_resource(DestinationResource, '/api/destinations/<destination_id>', endpoint='destination')
api.add_org_resource(DestinationListResource, '/api/destinations', endpoint='destinations')
|
bsd-2-clause
| 3,339,268,984,470,864,400
| 64.293478
| 162
| 0.786915
| false
| 3.780365
| false
| false
| false
|
ExcaliburZero/r-dailyprogrammer-solutions
|
2015/11/23-Funny-Plant.py
|
1
|
1154
|
# Problem: 242 [Easy] Funny Plant
# https://www.reddit.com/r/dailyprogrammer/comments/3twuwf/20151123_challenge_242_easy_funny_plant/
# Author: ExcaliburZero
# License: MIT
import fileinput
def main():
# Iterate over each of the input lines
for line in fileinput.input():
# Get the input values
line_contents = line.split()
people = int(line_contents[0])
starting_fruits = int(line_contents[1])
# Create a list of the plants
plants = [0] * starting_fruits
# Create a counter for weeks
weeks = 1
# Keep simulating weeks until there is enough food
while(people > sum(plants)):
# Increment the week counter
weeks += 1
# Increase the growth amount of each of the plants
for i in range(len(plants)):
plants[i] += 1
# Record the number of seeds
seeds = sum(plants)
# Create new plants from the seeds
plants = plants + [0] * seeds
# Print out the calculated result
print(weeks)
# Run the main function
if __name__ == '__main__':
main()
|
mit
| 2,605,174,256,789,166,000
| 30.189189
| 99
| 0.587522
| false
| 3.846667
| false
| false
| false
|
jacobwindsor/pubchem-ranker
|
manage.py
|
1
|
1837
|
import sys
from flask_script import Manager
from CompoundRanker import app
from CompoundRanker.DataManipulators.CIDGatherer import CIDGatherer
from CompoundRanker.DataManipulators.PubChemAssayCounter import PubChemAssayCounter
from CompoundRanker.DataManipulators.PubChemPathwayCounter import PubChemPathwayCounter
from CompoundRanker.DataManipulators.DataGatherer import DataGatherer
from CompoundRanker.database import init_db, query_db
manager = Manager(app)
@manager.command
def initdb():
"""Initializes the database."""
init_db()
print('Initialized the database.')
@manager.command
def fillmetabs(path, dataset):
"""
Fill the metabolites table with data.
"""
# Get the CAS with CIDs
file = open(path, 'r')
gatherer = DataGatherer(file)
data = gatherer.harvest()
# Insert
gatherer.save(data, dataset)
print("Saved")
@manager.command
def fillcids(dataset):
"""Gather the CIDs from PubChem for the metabolites and save to pubchem_compounds table"""
query = "SELECT id FROM datasets WHERE name = ?"
try:
dataset_id = str(query_db(query, [dataset])[0]['id'])
except TypeError:
raise TypeError("No dataset with name '%s'" % dataset)
gatherer = CIDGatherer()
data = gatherer.harvest(dataset_id)
gatherer.save(data)
print("Saved!")
@manager.command
def fillcounts(dataset):
"""Run the counter (ranker) for the metabolites and save to database"""
query = "SELECT id FROM datasets WHERE name = ?"
try:
dataset_id = str(query_db(query, [dataset])[0]['id'])
except TypeError:
raise TypeError("No dataset with name '%s'" % dataset)
PubChemPathwayCounter().count(dataset_id).save()
PubChemAssayCounter().count(dataset_id).save()
print("Saved!")
if __name__ == "__main__":
manager.run()
|
mit
| -5,863,559,762,759,437,000
| 28.15873
| 94
| 0.702232
| false
| 3.420857
| false
| false
| false
|
ooici/coi-services
|
ion/services/sa/tcaa/test/test_terrestrial_endpoint.py
|
1
|
31848
|
#!/usr/bin/env python
"""
@package ion.services.sa.tcaa.test.test_terrestrial_endpoint
@file ion/services/sa/tcaa/test/test_terrestrial_endpoint.py
@author Edward Hunter
@brief Test cases for 2CAA terrestrial endpoint.
"""
__author__ = 'Edward Hunter'
# Pyon log and config objects.
from pyon.public import log
from pyon.public import CFG
# Standard imports.
import time
import os
import signal
import time
import unittest
from datetime import datetime
import uuid
import socket
import re
import random
# 3rd party imports.
import gevent
from gevent import spawn
from gevent.event import AsyncResult
from nose.plugins.attrib import attr
from mock import patch
# Pyon unittest support.
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.core.bootstrap import get_sys_name
from pyon.public import IonObject
from pyon.event.event import EventPublisher, EventSubscriber
from pyon.util.context import LocalContextMixin
from ion.services.sa.tcaa.terrestrial_endpoint import TerrestrialEndpoint
from ion.services.sa.tcaa.terrestrial_endpoint import TerrestrialEndpointClient
from interface.services.icontainer_agent import ContainerAgentClient
from interface.objects import TelemetryStatusType
from ion.services.sa.tcaa.r3pc import R3PCServer
from ion.services.sa.tcaa.r3pc import R3PCClient
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_process_queued
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_process_online
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_remote_late
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_get_clear_queue
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_pop_pending_queue
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_repeated_clear_pop
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_get_pending
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_persistent_queue
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('INT', group='sa')
@patch.dict(CFG, {'endpoint':{'receive':{'timeout': 60}}})
class TestTerrestrialEndpoint(IonIntegrationTestCase):
"""
Test cases for 2CAA terrestrial endpoint.
"""
def setUp(self):
"""
Setup fake remote components.
Start remote server.
Set internal configuration and test variables.
Start container.
Start services.
Spawn endpoint.
Create and start subscribers.
"""
# Create fake remote client and server.
# Add clean up to shut down properly.
# Start remote server on a random port.
self._remote_server = R3PCServer(self.consume_req, self.remote_server_close)
self._remote_client = R3PCClient(self.consume_ack, self.remote_client_close)
self.addCleanup(self._remote_server.stop)
self.addCleanup(self._remote_client.stop)
self._other_port = self._remote_server.start('*', 0)
log.debug('Remote server binding to *:%i', self._other_port)
# Set internal variables.
self._other_host = 'localhost'
self._xs_name = 'remote1'
self._svc_name = 'terrestrial_endpoint'
self._listen_name = self._svc_name + self._xs_name
self._platform_resource_id = 'abc123'
self._resource_id = 'fake_id'
self._rmt_svc_name = 'fake_svc'
self._no_requests = 10
self._requests_sent = {}
self._results_recv = {}
self._workers = []
self._done_evt = AsyncResult()
self._queue_mod_evts = []
self._cmd_tx_evts = []
self._telem_evts = []
self._no_telem_evts = 0
self._no_queue_mod_evts = 0
self._no_cmd_tx_evts = 0
self._done_queue_mod_evts = AsyncResult()
self._done_telem_evts = AsyncResult()
self._done_cmd_tx_evts = AsyncResult()
# Start container.
log.debug('Staring capability container.')
self._start_container()
# Bring up services in a deploy file (no need to message)
log.info('Staring deploy services.')
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
# Create a container client.
log.debug('Creating container client.')
self._container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
# The following spawn config creates the process with the remote
# name tagged to the service name.
"""
listen_name = terrestrial_endpointremote1
2012-10-10 11:34:46,654 DEBUG ion.services.sa.tcaa.terrestrial_endpoint recv name: NP (ion_test_8257ab,terrestrial_endpointremote1,B: terrestrial_endpointremote1)
2012-10-10 11:34:46,654 DEBUG ion.services.sa.tcaa.terrestrial_endpoint startup listener recv name: NP (ion_test_8257ab,terrestrial_endpointremote1,B: terrestrial_endpointremote1)
2012-10-10 11:34:46,654 DEBUG ion.services.sa.tcaa.terrestrial_endpoint startup listener recv name: NP (ion_test_8257ab,Edwards-MacBook-Pro_local_2624.33,B: Edwards-MacBook-Pro_local_2624.33)
"""
# Create agent config.
endpoint_config = {
'other_host' : self._other_host,
'other_port' : self._other_port,
'this_port' : 0,
'xs_name' : self._xs_name,
'platform_resource_id' : self._platform_resource_id,
'process' : {
'listen_name' : self._listen_name
}
}
# Spawn the terrestrial enpoint process.
log.debug('Spawning terrestrial endpoint process.')
self._te_pid = self._container_client.spawn_process(
name='remote_endpoint_1',
module='ion.services.sa.tcaa.terrestrial_endpoint',
cls='TerrestrialEndpoint',
config=endpoint_config)
log.debug('Endpoint pid=%s.', str(self._te_pid))
# Create an endpoint client.
# The to_name may be either the process pid or
# the listen_name, which for this remote bridge
# is svc_name + remote_name as above.
self.te_client = TerrestrialEndpointClient(
process=FakeProcess(),
to_name=self._listen_name)
log.debug('Got te client %s.', str(self.te_client))
# Remember the terrestrial port.
self._this_port = self.te_client.get_port()
# Start the event publisher.
self._event_publisher = EventPublisher()
# Start the event subscriber.
self._event_subscriber = EventSubscriber(
event_type='PlatformEvent',
callback=self.consume_event,
origin=self._xs_name)
self._event_subscriber.start()
self._event_subscriber._ready_event.wait(timeout=CFG.endpoint.receive.timeout)
self.addCleanup(self._event_subscriber.stop)
# Start the result subscriber.
self._result_subscriber = EventSubscriber(
event_type='RemoteCommandResult',
origin=self._resource_id,
callback=self.consume_event)
self._result_subscriber.start()
self._result_subscriber._ready_event.wait(timeout=CFG.endpoint.receive.timeout)
self.addCleanup(self._result_subscriber.stop)
def consume_event(self, evt, *args, **kwargs):
"""
Test callback for events.
"""
log.debug('Got event: %s, args: %s, kwargs: %s',
str(evt), str(args), str(kwargs))
if evt.type_ == 'PublicPlatformTelemetryEvent':
self._telem_evts.append(evt)
if self._no_telem_evts > 0 and self._no_telem_evts == len(self._telem_evts):
self._done_telem_evts.set()
elif evt.type_ == 'RemoteQueueModifiedEvent':
self._queue_mod_evts.append(evt)
if self._no_queue_mod_evts > 0 and self._no_queue_mod_evts == len(self._queue_mod_evts):
self._done_queue_mod_evts.set()
elif evt.type_ == 'RemoteCommandTransmittedEvent':
self._cmd_tx_evts.append(evt)
if self._no_cmd_tx_evts > 0 and self._no_cmd_tx_evts == len(self._cmd_tx_evts):
self._done_cmd_tx_evts.set()
elif evt.type_ == 'RemoteCommandResult':
cmd = evt.command
self._results_recv[cmd.command_id] = cmd
if len(self._results_recv) == self._no_requests:
self._done_evt.set()
def on_link_up(self):
"""
Called by a test to simulate turning the link on.
"""
log.debug('Remote client connecting to localhost:%i.',
self._this_port)
self._remote_client.start('localhost', self._this_port)
# Publish a link up event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin = self._platform_resource_id,
status = TelemetryStatusType.AVAILABLE)
def on_link_down(self):
"""
Called by a test to simulate turning the link off.
"""
self._remote_client.stop()
# Publish a link down event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin=self._platform_resource_id,
status = TelemetryStatusType.UNAVAILABLE)
def consume_req(self, request):
"""
Remote request callback.
Fire a greenlet to do some fake work before returning via
the remote client to terrestrial endpoint.
"""
# Spawn a greenlet to sleep briefly with each request and
# then respond with a result through the remote client.
log.debug('Remote endpoint got request: %s', str(request))
greenlet = gevent.spawn(self.process_remote_request, request)
self._workers.append(greenlet)
def consume_ack(self, request):
"""
Remote ack callback.
"""
log.debug('Remote endpoint got ack: %s', str(request))
def process_remote_request(self, request):
"""
Process remote request.
Do random amount of fake work and enqueue result for return to
terrestrial endpoint.
"""
worktime = random.uniform(.1,3)
gevent.sleep(worktime)
result = {
'command_id' : request.command_id,
'result' : 'fake_result'
}
log.debug('Finished processing request: %s', str(request))
self._remote_client.enqueue(result)
def remote_server_close(self):
"""
Remote server closed callback.
"""
log.debug('The remote server closed.')
def remote_client_close(self):
"""
Remoe client closed callback.
"""
log.debug('The remote client closed.')
def make_fake_command(self, no):
"""
Build a fake command for use in tests.
"""
cmdstr = 'fake_cmd_%i' % no
cmd = IonObject('RemoteCommand',
resource_id=self._resource_id,
command=cmdstr,
args=['arg1', 23],
kwargs={'kwargs1':'someval'})
return cmd
def make_fake_svc_command(self, no):
"""
Build a fake command for use in tests.
"""
cmdstr = 'fake_cmd_%i' % no
cmd = IonObject('RemoteCommand',
svc_name=self._rmt_svc_name,
command=cmdstr,
args=['arg1', 23],
kwargs={'kwargs1':'someval'})
return cmd
def test_process_queued(self):
"""
test_process_queued
Test forwarding of queued commands upon link up.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_process_online(self):
"""
test_process_online
Test forwarding commands when the link is up.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
self.on_link_up()
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
gevent.sleep(.2)
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_remote_late(self):
"""
test_remote_late
Test simulates behavior when the remote side is initially unavailable.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
self.on_link_up()
gevent.sleep(2)
self._remote_server.stop()
self._remote_client.stop()
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
gevent.sleep(3)
self._remote_client.start('localhost', self._this_port)
self._remote_server.start('*', self._other_port)
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_get_clear_queue(self):
"""
test_get_clear_queue
Test endpoint queue get and clear manipulators.
"""
# Set up for events expected.
self._no_queue_mod_evts = self._no_requests * 2
# Queue commands.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Queue commands.
for i in range(self._no_requests):
cmd = self.make_fake_svc_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests * 2)
# Confirm get queue with id.
queue = self.te_client.get_queue(resource_id=self._resource_id)
self.assertEqual(len(queue), self._no_requests)
# Confirm get queue with svc name.
queue = self.te_client.get_queue(svc_name=self._rmt_svc_name)
self.assertEqual(len(queue), self._no_requests)
# Confirm get queue with bogus id.
queue = self.te_client.get_queue(resource_id='bogus_id')
self.assertEqual(len(queue), 0)
# Confirm get queue with bogus id.
queue = self.te_client.get_queue(svc_name='bogus_svc')
self.assertEqual(len(queue), 0)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with no id.
poped = self.te_client.clear_queue()
# Confirm queue mod event and mods.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests * 2)
self.assertEqual(len(queue), 0)
# Queue new commands and confirm event.
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests * 2
self._done_queue_mod_evts = AsyncResult()
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
for i in range(self._no_requests):
cmd = self.make_fake_svc_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with id.
poped = self.te_client.clear_queue(resource_id=self._resource_id)
# Confirm mods and mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests)
self.assertEqual(len(queue), self._no_requests)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with id.
poped = self.te_client.clear_queue(svc_name=self._rmt_svc_name)
# Confirm mods and mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests)
self.assertEqual(len(queue), 0)
# Queue new commands and confirm events.
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests
self._done_queue_mod_evts = AsyncResult()
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Clear queue with bogus id.
poped = self.te_client.clear_queue(resource_id='bogus id')
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 0)
self.assertEqual(len(queue), self._no_requests)
# Clear queue with bogus svc name.
poped = self.te_client.clear_queue(svc_name='bogus id')
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 0)
self.assertEqual(len(queue), self._no_requests)
# Clear queue and confirm empty.
self.te_client.clear_queue()
queue = self.te_client.get_queue()
self.assertEqual(len(queue), 0)
# Turn on link and wait a few seconds.
# Confirm no data or tx events arrive.
self.on_link_up()
gevent.sleep(2)
self.assertEqual(len(self._cmd_tx_evts), 0)
self.assertEqual(len(self._results_recv), 0)
self._no_telem_evts = 2
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
def test_pop_pending_queue(self):
"""
test_pop_pending_queue
Test endpoint queue pop manipulators.
"""
# Set up for events expected.
self._no_queue_mod_evts = self._no_requests
# Queue commands.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Pop a few commands from the queue, confirm events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 3
self._done_queue_mod_evts = AsyncResult()
cmd_ids = self._requests_sent.keys()[:3]
poped = []
for x in cmd_ids:
poped.append(self.te_client.pop_queue(x))
self._requests_sent.pop(x)
# Try poping with illegal args. This should have no effect
poped.append(self.te_client.pop_queue())
poped.append(self.te_client.pop_queue('bogus id'))
poped = [x for x in poped if x != None]
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 3)
self.assertEqual(len(queue), self._no_requests - 3)
# Turn on the link and verify that only the remaining commands
# get processed.
self._no_telem_evts = 2
self._no_requests = self._no_requests - 3
self._no_cmd_tx_evts = self._no_requests
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
def test_repeated_clear_pop(self):
"""
test_repeated_clear_pop
Test endpoint queue pop manipulators.
"""
# Set up for events expected.
self._no_queue_mod_evts = self._no_requests
for i in range(3):
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests
self._done_queue_mod_evts = AsyncResult()
# Queue commands.
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with no id.
poped = self.te_client.clear_queue()
# Confirm queue mod event and mods.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests)
self.assertEqual(len(queue), 0)
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests
self._done_queue_mod_evts = AsyncResult()
# Queue commands.
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Pop a few commands from the queue, confirm events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 3
self._done_queue_mod_evts = AsyncResult()
cmd_ids = self._requests_sent.keys()[:3]
poped = []
for x in cmd_ids:
poped.append(self.te_client.pop_queue(x))
self._requests_sent.pop(x)
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 3)
self.assertEqual(len(queue), self._no_requests - 3)
self._no_telem_evts = 2
self._no_requests = self._no_requests - 3
self._no_cmd_tx_evts = self._no_requests
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
def test_get_pending(self):
"""
test_process_queued
Test forwarding of queued commands upon link up.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self.on_link_up()
self._no_requests = 3
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending(resource_id=self._resource_id)
for x in pending:
self.assertIn(x.command_id, self._requests_sent.keys())
self._no_requests = 10
self._done_evt = AsyncResult()
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
#@unittest.skip('Wait for verification of resource registry use.')
def test_persistent_queue(self):
"""
test_persistent_queue
Test ability of endpoint to restore a persistent queue, survive
reboot, etc.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Stop and restart the endpoint process.
# Verify the queue is restored.
self._container_client.terminate_process(self._te_pid)
# Create agent config.
endpoint_config = {
'other_host' : self._other_host,
'other_port' : self._other_port,
'this_port' : 0,
'xs_name' : self._xs_name,
'platform_resource_id' : self._platform_resource_id,
'process' : {
'listen_name' : self._listen_name
}
}
# Spawn the terrestrial enpoint process.
log.debug('Spawning terrestrial endpoint process.')
self._te_pid = self._container_client.spawn_process(
name='remote_endpoint_1',
module='ion.services.sa.tcaa.terrestrial_endpoint',
cls='TerrestrialEndpoint',
config=endpoint_config)
log.debug('Endpoint pid=%s.', str(self._te_pid))
# Create an endpoint client.
# The to_name may be either the process pid or
# the listen_name, which for this remote bridge
# is svc_name + remote_name as above.
self.te_client = TerrestrialEndpointClient(
process=FakeProcess(),
to_name=self._listen_name)
log.debug('Got te client %s.', str(self.te_client))
# Remember the terrestrial port.
self._this_port = self.te_client.get_port()
# Confirm we restored the queue with the previous commands.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
|
bsd-2-clause
| -1,828,002,971,545,074,700
| 36.735782
| 202
| 0.587101
| false
| 3.827425
| true
| false
| false
|
durandj/dockerscript
|
dockerscript/operations/run.py
|
1
|
1025
|
"""
Run operation for adding shell commands to the Dockerfile
"""
import typing
from .base_operation import Operation
# pylint: disable=too-few-public-methods
class RunOperation(Operation):
"""
An operation for running a shell command
"""
commands: typing.List[str]
def __init__(self, commands: typing.List[str]) -> None:
"""
Creates a shell command operation
"""
self.commands = commands if len(commands) == 1 else ['set -ex'] + commands
def build(self) -> str:
commands = ' \\\n && '.join(self.commands)
return f'RUN {commands}'
# pylint: enable=too-few-public-methods
def run(
image,
command: typing.Union[str, typing.List[str]]):
"""
Adds one or more shell commands to the Docker image
"""
if not command:
raise ValueError('Cannot have an empty set of commands')
operation = RunOperation(
[command] if isinstance(command, str) else command,
)
image.add_operation(operation)
|
mit
| 624,003,003,467,350,400
| 22.295455
| 82
| 0.626341
| false
| 4.051383
| false
| false
| false
|
ARudiuk/mne-python
|
mne/epochs.py
|
1
|
129314
|
# -*- coding: utf-8 -*-
"""Tools for working with epoched data"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Denis Engemann <denis.engemann@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
from copy import deepcopy
import json
import os.path as op
from distutils.version import LooseVersion
import numpy as np
import scipy
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_float_matrix, write_float,
write_id, write_string, _get_split_size)
from .io.meas_info import read_meas_info, write_meas_info, _merge_info
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.pick import (pick_types, channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_pick_aux_channels, _DATA_CH_TYPES_SPLIT)
from .io.proj import setup_proj, ProjMixin, _proj_equal
from .io.base import _BaseRaw, ToDataFrameMixin, TimeMixin
from .bem import _check_origin
from .evoked import EvokedArray
from .baseline import rescale, _log_rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import resample, detrend, FilterMixin
from .event import _read_events_fif
from .fixes import in1d, _get_args
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs)
from .utils import (check_fname, logger, verbose, _check_type_picks,
_time_mask, check_random_state, object_hash, warn,
_check_copy_dep)
from .externals.six import iteritems, string_types
from .externals.six.moves import zip
def _save_split(epochs, fname, part_idx, n_parts):
"""Split epochs"""
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
if part_idx > 0:
fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
next_fname = None
if part_idx < n_parts - 1:
next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
base[idx + 1:]))
next_idx = part_idx + 1
fid = start_file(fname)
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
mapping_ = ';'.join([k + ':' + str(v) for k, v in
epochs.event_id.items()])
write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
bmin = epochs.times[0] if bmin is None else bmin
bmax = epochs.times[-1] if bmax is None else bmax
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
class _BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
ToDataFrameMixin, TimeMixin):
"""Abstract base class for Epochs-type classes
This class provides basic functionality and should never be instantiated
directly. See Epochs below for an explanation of the parameters.
"""
def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None,
picks=None, name='Unknown', reject=None, flat=None,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, proj=True, on_missing='error',
preload_at_end=False, selection=None, drop_log=None,
verbose=None):
self.verbose = verbose
self.name = name
if on_missing not in ['error', 'warning', 'ignore']:
raise ValueError('on_missing must be one of: error, '
'warning, ignore. Got: %s' % on_missing)
# check out event_id dict
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
elif isinstance(event_id, dict):
if not all(isinstance(v, int) for v in event_id.values()):
raise ValueError('Event IDs must be of type integer')
if not all(isinstance(k, string_types) for k in event_id):
raise ValueError('Event names must be of type str')
elif isinstance(event_id, list):
if not all(isinstance(v, int) for v in event_id):
raise ValueError('Event IDs must be of type integer')
event_id = dict(zip((str(i) for i in event_id), event_id))
elif isinstance(event_id, int):
event_id = {str(event_id): event_id}
else:
raise ValueError('event_id must be dict or int.')
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
if events.dtype.kind not in ['i', 'u']:
raise ValueError('events must be an array of type int')
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be 2D with 3 columns')
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
if on_missing == 'error':
raise ValueError(msg)
elif on_missing == 'warning':
warn(msg)
else: # on_missing == 'ignore':
pass
values = list(self.event_id.values())
selected = in1d(events[:, 2], values)
if selection is None:
self.selection = np.where(selected)[0]
else:
self.selection = selection
if drop_log is None:
self.drop_log = [list() if k in self.selection else ['IGNORED']
for k in range(len(events))]
else:
self.drop_log = drop_log
events = events[selected]
n_events = len(events)
if n_events > 1:
if np.diff(events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
self.events = events
del events
else:
self.drop_log = list()
self.selection = np.array([], int)
# do not set self.events here, let subclass do it
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError('reject_tmin needs to be < reject_tmax')
if detrend not in [None, 0, 1]:
raise ValueError('detrend must be None, 0, or 1')
# check that baseline is in available data
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
_check_baseline(baseline, tmin, tmax, info['sfreq'])
_log_rescale(baseline)
self.baseline = baseline
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.detrend = detrend
self._raw = raw
self.info = info
del info
if picks is None:
picks = list(range(len(self.info['ch_names'])))
else:
self.info = pick_info(self.info, picks)
self.picks = _check_type_picks(picks)
if len(picks) == 0:
raise ValueError("Picks cannot be empty.")
if data is None:
self.preload = False
self._data = None
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
self.preload = True
self._data = data
self._offset = None
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self.times = self._raw_times.copy()
self._decim = 1
self.decimate(decim)
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
def load_data(self):
"""Load the data if not already preloaded
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return
self._data = self._get_data()
self.preload = True
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
return self
def decimate(self, decim, offset=0):
"""Decimate the epochs
Parameters
----------
decim : int
The amount to decimate data.
offset : int
Apply an offset to where the decimation starts relative to the
sample corresponding to t=0. The offset is in samples at the
current sampling rate.
.. versionadded:: 0.12
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
Notes
-----
Decimation can be done multiple times. For example,
``epochs.decimate(2).decimate(2)`` will be the same as
``epochs.decimate(4)``.
.. versionadded:: 0.10.0
"""
if decim < 1 or decim != int(decim):
raise ValueError('decim must be an integer > 0')
decim = int(decim)
new_sfreq = self.info['sfreq'] / float(decim)
lowpass = self.info['lowpass']
if decim > 1 and lowpass is None:
warn('The measurement information indicates data is not low-pass '
'filtered. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (decim, new_sfreq))
elif decim > 1 and new_sfreq < 2.5 * lowpass:
warn('The measurement information indicates a low-pass frequency '
'of %g Hz. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # > 50% nyquist lim
offset = int(offset)
if not 0 <= offset < decim:
raise ValueError('decim must be at least 0 and less than %s, got '
'%s' % (decim, offset))
self._decim *= decim
start_idx = int(round(self._raw_times[0] * (self.info['sfreq'] *
self._decim)))
i_start = start_idx % self._decim
decim_slice = slice(i_start + offset, len(self._raw_times),
self._decim)
self.info['sfreq'] = new_sfreq
if self.preload:
self._data = self._data[:, :, decim_slice].copy()
self._raw_times = self._raw_times[decim_slice].copy()
self._decim_slice = slice(None, None, None)
self._decim = 1
self.times = self._raw_times
else:
self._decim_slice = decim_slice
self.times = self._raw_times[self._decim_slice]
return self
@verbose
def apply_baseline(self, baseline, verbose=None):
"""Baseline correct epochs
Parameters
----------
baseline : tuple of length 2
The time interval to apply baseline correction. (a, b) is the
interval is between "a (s)" and "b (s)". If a is None the beginning
of the data is used and if b is None then b is set to the end of
the interval. If baseline is equal to (None, None) all the time
interval is used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times.
.. versionadded:: 0.10.0
"""
if not self.preload:
# Eventually we can relax this restriction, but it will require
# more careful checking of baseline (e.g., refactor with the
# _BaseEpochs.__init__ checks)
raise RuntimeError('Data must be loaded to apply a new baseline')
_check_baseline(baseline, self.tmin, self.tmax, self.info['sfreq'])
picks = _pick_data_channels(self.info, exclude=[], with_ref_meg=True)
picks_aux = _pick_aux_channels(self.info, exclude=[])
picks = np.sort(np.concatenate((picks, picks_aux)))
data = self._data
data[:, picks, :] = rescale(data[:, picks, :], self.times, baseline,
copy=False)
self.baseline = baseline
return self
def _reject_setup(self, reject, flat):
"""Sets self._reject_time and self._channel_type_idx"""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
for key in set(reject.keys()).union(old_reject.keys()):
old = old_reject.get(key, np.inf)
new = reject.get(key, np.inf)
if new > old:
raise ValueError(bad_msg.format(kind='reject', key=key,
new=new, old=old, op='>'))
for key in set(flat.keys()).union(old_flat.keys()):
old = old_flat.get(key, -np.inf)
new = flat.get(key, -np.inf)
if new < old:
raise ValueError(bad_msg.format(kind='flat', key=key,
new=new, old=old, op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good"""
if isinstance(data, string_types):
return False, [data]
if data is None:
return False, ['NO_DATA']
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ['TOO_SHORT']
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, string_types):
return epoch
# Detrend
if self.detrend is not None:
picks = _pick_data_channels(self.info, exclude=[])
epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
# Baseline correct
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=True, eog=True, ecg=True, seeg=True,
emg=True, bio=True, ecog=True, exclude=[])
epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
copy=False, verbose=False)
# handle offset
if self._offset is not None:
epoch += self._offset
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
return epoch
def iter_evoked(self):
"""Iterate over epochs as a sequence of Evoked objects
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
"""
self._current = 0
while True:
out = self.next(True)
if out is None:
return # properly signal the end of iteration
data, event_id = out
tmin = self.times[0]
info = deepcopy(self.info)
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1].
References
----------
[1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
def __next__(self, *args, **kwargs):
"""Wrapper for Py3k"""
return self.next(*args, **kwargs)
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash epochs unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def average(self, picks=None):
"""Compute average of epochs
Parameters
----------
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The averaged epochs.
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
"""
return self._compute_mean_or_stderr(picks, 'ave')
def standard_error(self, picks=None):
"""Compute standard error over epochs
Parameters
----------
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The standard error over epochs.
"""
return self._compute_mean_or_stderr(picks, 'stderr')
def _compute_mean_or_stderr(self, picks, mode='ave'):
"""Compute the mean or std over epochs and return Evoked"""
_do_std = True if mode == 'stderr' else False
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = np.std if _do_std else np.mean
data = fun(self._data, axis=0)
assert len(self.events) == len(self._data)
else:
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if _do_std:
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if not _do_std:
kind = 'average'
else:
kind = 'standard_error'
data /= np.sqrt(n_events)
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind)
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind):
"""Helper to create an evoked object from epoch data"""
info = deepcopy(info)
evoked = EvokedArray(data, info, tmin=self.times[0],
comment=self.name, nave=n_events, kind=kind,
verbose=self.verbose)
# XXX: above constructor doesn't recreate the times object precisely
evoked.times = self.times.copy()
# pick channels
if picks is None:
picks = _pick_data_channels(evoked.info, exclude=[])
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def plot(self, picks=None, scalings=None, show=True,
block=False, n_epochs=20,
n_channels=20, title=None):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side
of the main axes. Calling this function drops all the selected bad
epochs as well as bad epochs marked beforehand with rejection
parameters.
Parameters
----------
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scaling factors for the traces. If any fields in scalings are
'auto', the scaling factor is set to match the 99.5th percentile of
a subset of the corresponding data. If scalings == 'auto', all
scalings fields are set to 'auto'. If any fields are 'auto' and
data is not preloaded, a subset of epochs up to 100mb will be
loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1,
chpi=1e-4)
show : bool
Whether to show the figure or not.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
n_epochs : int
The number of epochs per view.
n_channels : int
The number of channels per view on mne_browse_epochs. If trellis is
True, this parameter has no effect. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
If trellis is True, this parameter has no effect.
Defaults to None.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can
be used to navigate between channels and epochs and the scaling can be
adjusted with - and + (or =) keys, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
Full screen mode can be toggled with f11 key. The amount of epochs
and channels per view can be adjusted with home/end and
page down/page up keys. Butterfly plot can be toggled with ``b`` key.
Right mouse click adds a vertical line to the plot.
.. versionadded:: 0.10.0
"""
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, show=show, block=block)
def plot_psd(self, fmin=0, fmax=np.inf, proj=False, bandwidth=None,
adaptive=False, low_bias=True, normalization='length',
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, dB=True, n_jobs=1, verbose=None, show=True):
"""Plot the power spectral density across epochs
Parameters
----------
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across
channels) will be plotted. If 'range', the min and max (across
channels) will be plotted. Bad channels will be excluded from
these calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, proj=proj,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
picks=picks, ax=ax, color=color,
area_mode=area_mode, area_alpha=area_alpha,
dB=dB, n_jobs=n_jobs, verbose=None, show=show)
def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, proj=False,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', ch_type=None,
layout=None, cmap='RdBu_r', agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='%0.3f',
outlines='head', show=True, verbose=None):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the
output equals vmax(data). Defaults to None.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
The channel type to plot. For 'grad', the gradiometers are
collected in
pairs and the RMS for each pair is plotted. If None, defaults to
'mag' if MEG data are present and to 'eeg' if only EEG data are
present.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize
is False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd_topomap(
self, bands=bands, vmin=vmin, vmax=vmax, proj=proj,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
ch_type=ch_type, layout=layout, cmap=cmap,
agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, show=show, verbose=None)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', font_color='w',
show=True):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along the
epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor, font_color=font_color,
show=show)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :func:`mne.Epochs.load_data()`.
Parameters
----------
reject : dict | str | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. If 'existing',
then the rejection parameters set at instantiation are used.
flat : dict | str | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done. If 'existing',
then the flat parameters set at instantiation are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy` should be used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, string_types) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
perc : float
Total percentage of epochs dropped.
fig : Instance of matplotlib.figure.Figure
The figure.
"""
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
from .viz import plot_drop_log
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
def plot_image(self, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is
applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two
axes for drawing the single trials and evoked responses. If
None a new figure is created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in
seconds and is added to the image. It is typically useful to
display reaction times. Note that it is defined with respect
to the order of epochs such that overlay_times[0] corresponds
to epochs[0].
Returns
-------
figs : list of matplotlib figures
One figure per channel displayed.
"""
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig,
overlay_times=overlay_times)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :func:`mne.Epochs.drop_bad` or
:func:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of ints or bools
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
out_of_bounds = (indices < 0) | (indices >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
for ii in indices:
self.drop_log[self.selection[ii]].append(reason)
self.selection = np.delete(self.selection, indices)
self.events = np.delete(self.events, indices, axis=0)
if self.preload:
self._data = np.delete(self._data, indices, axis=0)
count = len(indices)
logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Method to get a given epoch from disk"""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Helper to process a raw epoch based on the delayed param"""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, string_types):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, verbose=None):
"""Load all data, dropping bad epochs along the way
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
n_events = len(self.events)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
logger.info('Loading data for %s events and %s original time '
'points ...' % (n_events, len(self._raw_times)))
if self._bad_dropped:
if not out:
return
if self.preload:
return data
# we need to load from disk, drop, and return data
for idx in range(n_events):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if idx == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[idx] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
assert n_events == len(self.selection)
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, offending_reason = self._is_good_epoch(epoch)
if not is_good:
self.drop_log[sel] += offending_reason
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# Now update our properties
if len(good_idx) == 0: # silly fix for old numpy index error
self.selection = np.array([], int)
self.events = np.empty((0, 3))
else:
self.selection = self.selection[good_idx]
self.events = np.atleast_2d(self.events[good_idx])
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
data.resize((n_out,) + data.shape[1:], refcheck=False)
return data if out else None
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A copy of the epochs data.
"""
return self._get_data()
def __len__(self):
"""The number of epochs
Returns
-------
n_epochs : int
The number of remaining epochs.
Notes
-----
This function only works if bad epochs have been dropped.
Examples
--------
This can be used as::
>>> epochs.drop_bad() # doctest: +SKIP
>>> len(epochs) # doctest: +SKIP
43
>>> len(epochs.events) # doctest: +SKIP
43
"""
if not self._bad_dropped:
raise RuntimeError('Since bad epochs have not been dropped, the '
'length of the Epochs is not known. Load the '
'Epochs with preload=True, or call '
'Epochs.drop_bad(). To find the number '
'of events in the Epochs, use '
'len(Epochs.events).')
return len(self.events)
def __iter__(self):
"""Function to make iteration over epochs easy
Notes
-----
This enables the use of this Python pattern::
>>> for epoch in epochs: # doctest: +SKIP
>>> print(epoch) # doctest: +SKIP
Where ``epoch`` is given by successive outputs of
:func:`mne.Epochs.next`.
"""
self._current = 0
while True:
x = self.next()
if x is None:
return
yield x
def next(self, return_event_id=False):
"""Iterate over epoch data.
Parameters
----------
return_event_id : bool
If True, return both the epoch data and an event_id.
Returns
-------
epoch : array of shape (n_channels, n_times)
The epoch data.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if self.preload:
if self._current >= len(self._data):
return # signal the end
epoch = self._data[self._current]
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
return # signal the end properly
epoch_noproj = self._get_epoch_from_raw(self._current)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._do_delayed_proj:
epoch = epoch_noproj
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
return epoch if not return_event_id else epoch, self.event_id
@property
def tmin(self):
return self.times[0]
@property
def tmax(self):
return self.times[-1]
def __repr__(self):
""" Build string representation"""
s = 'n_events : %s ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
if len(self.event_id) > 1:
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
s += ',\n %s' % ', '.join(counts)
class_name = self.__class__.__name__
if class_name == '_BaseEpochs':
class_name = 'Epochs'
return '<%s | %s>' % (class_name, s)
def _key_match(self, key):
"""Helper function for event dict use"""
if key not in self.event_id:
raise KeyError('Event "%s" is not in Epochs.' % key)
return self.events[:, 2] == self.event_id[key]
def __getitem__(self, item):
"""Return an Epochs object with a copied subset of epochs
Parameters
----------
item : slice, array-like, str, or list
See below for use cases.
Returns
-------
epochs : instance of Epochs
See below for use cases.
Notes
-----
Epochs can be accessed as ``epochs[...]`` in several ways:
1. ``epochs[idx]``: Return ``Epochs`` object with a subset of
epochs (supports single index and python-style slicing).
2. ``epochs['name']``: Return ``Epochs`` object with a copy of the
subset of epochs corresponding to an experimental condition as
specified by 'name'.
If conditions are tagged by names separated by '/' (e.g.
'audio/left', 'audio/right'), and 'name' is not in itself an
event key, this selects every event whose condition contains
the 'name' tag (e.g., 'left' matches 'audio/left' and
'visual/left'; but not 'audio_left'). Note that tags like
'auditory/left' and 'left/auditory' will be treated the
same way when accessed using tags.
3. ``epochs[['name_1', 'name_2', ... ]]``: Return ``Epochs`` object
with a copy of the subset of epochs corresponding to multiple
experimental conditions as specified by
``'name_1', 'name_2', ...`` .
If conditions are separated by '/', selects every item containing
every list tag (e.g. ['audio', 'left'] selects 'audio/left' and
'audio/center/left', but not 'audio/right').
"""
data = self._data
del self._data
epochs = self.copy()
self._data, epochs._data = data, data
del self
key = item
del item
if isinstance(key, string_types):
key = [key]
if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
if any('/' in k_i for k_i in epochs.event_id.keys()):
if any(k_e not in epochs.event_id for k_e in key):
# Select a given key if the requested set of
# '/'-separated types are a subset of the types in that key
key = [k for k in epochs.event_id.keys()
if all(set(k_i.split('/')).issubset(k.split('/'))
for k_i in key)]
if len(key) == 0:
raise KeyError('Attempting selection of events via '
'multiple/partial matching, but no '
'event matches all criteria.')
select = np.any(np.atleast_2d([epochs._key_match(k)
for k in key]), axis=0)
epochs.name = '+'.join(key)
else:
select = key if isinstance(key, slice) else np.atleast_1d(key)
key_selection = epochs.selection[select]
for k in np.setdiff1d(epochs.selection, key_selection):
epochs.drop_log[k] = ['IGNORED']
epochs.selection = key_selection
epochs.events = np.atleast_2d(epochs.events[select])
if epochs.preload:
# ensure that each Epochs instance owns its own data so we can
# resize later if necessary
epochs._data = np.require(epochs._data[select], requirements=['O'])
# update event id to reflect new content of epochs
epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
if v in epochs.events[:, 2])
return epochs
def crop(self, tmin=None, tmax=None):
"""Crops a time interval from epochs object.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
Returns
-------
epochs : instance of Epochs
The cropped epochs.
Notes
-----
Unlike Python slices, MNE time intervals include both their end points;
crop(tmin, tmax) returns the interval tmin <= t <= tmax.
"""
# XXX this could be made to work on non-preloaded data...
if not self.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'])
self.times = self.times[tmask]
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
return self
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample preloaded data
Parameters
----------
sfreq : float
New sample rate to use
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The resampled epochs object.
See Also
--------
mne.Epochs.savgol_filter
mne.io.Raw.resample
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
# XXX this could operate on non-preloaded data, too
if not self.preload:
raise RuntimeError('Can only resample preloaded data')
o_sfreq = self.info['sfreq']
self._data = resample(self._data, sfreq, o_sfreq, npad, window=window,
n_jobs=n_jobs)
# adjust indirectly affected variables
self.info['sfreq'] = float(sfreq)
self.times = (np.arange(self._data.shape[2], dtype=np.float) /
sfreq + self.times[0])
return self
def copy(self):
"""Return copy of Epochs instance"""
raw = self._raw
del self._raw
new = deepcopy(self)
self._raw = raw
new._raw = raw
return new
def save(self, fname, split_size='2GB'):
"""Save epochs in a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or
-epo.fif.gz.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
split_size = _get_split_size(split_size)
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
total_size = self[0].get_data().nbytes * len(self)
n_parts = int(np.ceil(total_size / float(split_size)))
epoch_idxs = np.array_split(np.arange(len(self)), n_parts)
for part_idx, epoch_idx in enumerate(epoch_idxs):
this_epochs = self[epoch_idx] if n_parts > 1 else self
# avoid missing event_ids in splits
this_epochs.event_id = self.event_id
_save_split(this_epochs, fname, part_idx, n_parts)
def equalize_event_counts(self, event_ids, method='mintime', copy=None):
"""Equalize the number of trials in each condition
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[20, 21].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
In the case where partial matching is used (using '/' in
`event_ids`), `event_ids` will be matched according to the
provided tags, that is, processing works as if the event_ids
matched by the provided tags had been supplied instead.
The event_ids must identify nonoverlapping subsets of the epochs.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list
will be minimized.
copy : bool
This parameter has been deprecated and will be removed in 0.14.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
-----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
If multiple indices are provided (e.g. 'Left' and 'Right' in the
example above), it is not guaranteed that after equalization, the
conditions will contribute evenly. E.g., it is possible to end up
with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
"""
epochs = _check_copy_dep(self, copy)
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not epochs._bad_dropped:
epochs.drop_bad()
# figure out how to equalize
eq_inds = list()
# deal with hierarchical tags
ids = epochs.event_id
orig_ids = list(event_ids)
tagging = False
if "/" in "".join(ids):
# make string inputs a list of length 1
event_ids = [[x] if isinstance(x, string_types) else x
for x in event_ids]
for ids_ in event_ids: # check if tagging is attempted
if any([id_ not in ids for id_ in ids_]):
tagging = True
# 1. treat everything that's not in event_id as a tag
# 2a. for tags, find all the event_ids matched by the tags
# 2b. for non-tag ids, just pass them directly
# 3. do this for every input
event_ids = [[k for k in ids if all((tag in k.split("/")
for tag in id_))] # find ids matching all tags
if all(id__ not in ids for id__ in id_)
else id_ # straight pass for non-tag inputs
for id_ in event_ids]
for ii, id_ in enumerate(event_ids):
if len(id_) == 0:
raise KeyError(orig_ids[ii] + "not found in the "
"epoch object's event_id.")
elif len(set([sub_id in ids for sub_id in id_])) != 1:
err = ("Don't mix hierarchical and regular event_ids"
" like in \'%s\'." % ", ".join(id_))
raise ValueError(err)
# raise for non-orthogonal tags
if tagging is True:
events_ = [set(epochs[x].events[:, 0]) for x in event_ids]
doubles = events_[0].intersection(events_[1])
if len(doubles):
raise ValueError("The two sets of epochs are "
"overlapping. Provide an "
"orthogonal selection.")
for eq in event_ids:
eq = np.atleast_1d(eq)
# eq is now a list of types
key_match = np.zeros(epochs.events.shape[0])
for key in eq:
key_match = np.logical_or(key_match, epochs._key_match(key))
eq_inds.append(np.where(key_match)[0])
event_times = [epochs.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
epochs.drop(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return epochs, indices
def _check_baseline(baseline, tmin, tmax, sfreq):
"""Helper to check for a valid baseline"""
if baseline is not None:
if not isinstance(baseline, tuple) or len(baseline) != 2:
raise ValueError('`baseline=%s` is an invalid argument.'
% str(baseline))
baseline_tmin, baseline_tmax = baseline
tstep = 1. / float(sfreq)
if baseline_tmin is None:
baseline_tmin = tmin
baseline_tmin = float(baseline_tmin)
if baseline_tmax is None:
baseline_tmax = tmax
baseline_tmax = float(baseline_tmax)
if baseline_tmin < tmin - tstep:
raise ValueError(
"Baseline interval (tmin = %s) is outside of epoch "
"data (tmin = %s)" % (baseline_tmin, tmin))
if baseline_tmax > tmax + tstep:
raise ValueError(
"Baseline interval (tmax = %s) is outside of epoch "
"data (tmax = %s)" % (baseline_tmax, tmax))
if baseline_tmin > baseline_tmax:
raise ValueError(
"Baseline min (%s) must be less than baseline max (%s)"
% (baseline_tmin, baseline_tmax))
del baseline_tmin, baseline_tmax
def _drop_log_stats(drop_log, ignore=('IGNORED',)):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
class Epochs(_BaseEpochs):
"""Epochs extracted from a Raw instance
Parameters
----------
raw : Raw object
An instance of Raw.
events : array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event. If nothing is provided, defaults to -0.2
tmax : float
End time after event. If nothing is provided, defaults to 0.5
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels are used).
name : string
Comment that describes the Epochs data created.
preload : boolean
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
on_missing : str
What to do if one or several event ids are not found in the recording.
Valid keys are 'error' | 'warning' | 'ignore'
Default is 'error'. If on_missing is 'warning' it will proceed but
warn, if 'ignore' it will proceed silently. Note.
If none of the event ids are found in the data, an error will be
automatically generated irrespective of this parameter.
reject_by_annotation : bool
Whether to reject based on annotations. If True (default), epochs
overlapping with segments whose description begins with ``'bad'`` are
rejected. If False, no rejection based on annotations is performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Attributes
----------
info: dict
Measurement info.
event_id : dict
Names of conditions corresponding to event_ids.
ch_names : list of string
List of channel names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : list of lists
A list of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty list; otherwise it will be
a list of the reasons the event is not longer in the selection, e.g.:
'IGNORED' if it isn't part of the current subset defined by the user;
'NO_DATA' or 'TOO_SHORT' if epoch didn't contain enough data;
names of channels that exceeded the amplitude threshold;
'EQUALIZED_COUNTS' (see equalize_event_counts);
or 'USER' for user-defined reasons (see drop method).
verbose : bool, str, int, or None
See above.
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
Notes
-----
When accessing data, Epochs are detrended, baseline-corrected, and
decimated, then projectors are (optionally) applied.
For indexing and slicing using ``epochs[...]``, see
:func:`mne.Epochs.__getitem__`.
"""
@verbose
def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), picks=None, name='Unknown', preload=False,
reject=None, flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, add_eeg_ref=True,
on_missing='error', reject_by_annotation=True, verbose=None):
if not isinstance(raw, _BaseRaw):
raise ValueError('The first argument to `Epochs` must be an '
'instance of `mne.io.Raw`')
info = deepcopy(raw.info)
# proj is on when applied in Raw
proj = proj or raw.proj
self.reject_by_annotation = reject_by_annotation
# call _BaseEpochs constructor
super(Epochs, self).__init__(
info, None, events, event_id, tmin, tmax, baseline=baseline,
raw=raw, picks=picks, name=name, reject=reject, flat=flat,
decim=decim, reject_tmin=reject_tmin, reject_tmax=reject_tmax,
detrend=detrend, add_eeg_ref=add_eeg_ref, proj=proj,
on_missing=on_missing, preload_at_end=preload, verbose=verbose)
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk
Returns
-------
data : array | str | None
If string it's details on rejection reason.
If None it means no data.
"""
if self._raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found.'
' Please report this to the mne-python '
'developers.')
sfreq = self._raw.info['sfreq']
event_samp = self.events[idx, 0]
# Read a data segment
first_samp = self._raw.first_samp
start = int(round(event_samp + self.tmin * sfreq)) - first_samp
stop = start + len(self._raw_times)
data = self._raw._check_bad_segment(start, stop, self.picks,
self.reject_by_annotation)
return data
class EpochsArray(_BaseEpochs):
"""Epochs object from numpy array
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
events : None | array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
If None (default), all event values are set to 1 and event time-samples
are set to range(n_epochs).
tmin : float
Start time before event. If nothing provided, defaults to -0.2.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
baseline : None or tuple of length 2 (default: None)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
proj : bool | 'delayed'
Apply SSP projection vectors. See :class:`mne.Epochs` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
io.RawArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, events=None, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, proj=True, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != data.shape[1]:
raise ValueError('Info and data must have same number of '
'channels.')
if events is None:
n_epochs = len(data)
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
if data.shape[0] != len(events):
raise ValueError('The number of epochs and the number of events'
'must match')
info = deepcopy(info) # do not modify original info
tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
super(EpochsArray, self).__init__(info, data, events, event_id, tmin,
tmax, baseline, reject=reject,
flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, decim=1,
add_eeg_ref=False, proj=proj)
if len(events) != in1d(self.events[:, 2],
list(self.event_id.values())).sum():
raise ValueError('The events must only contain event numbers from '
'event_id')
for ii, e in enumerate(self._data):
# This is safe without assignment b/c there is no decim
self._detrend_offset_decim(e)
self.drop_bad()
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
Whether to return a new instance or modify in place.
Notes
-----
This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
epochs = epochs.copy() if copy else epochs
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
if not isinstance(new_event_num, int):
raise ValueError('new_event_id value must be an integer')
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
for key in old_event_ids:
epochs.event_id.pop(key)
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Note that this operates on the Epochs instances in-place.
Example:
equalize_epoch_counts(epochs1, epochs2)
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
"""
if not all(isinstance(e, _BaseEpochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
for e in epochs_list:
if not e._bad_dropped:
e.drop_bad()
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Helper to get indices to drop from multiple event timing lists"""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
if method not in ['mintime', 'truncate']:
raise ValueError('method must be either mintime or truncate, not '
'%s' % method)
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _fix_fill(fill):
"""Helper to fix bug on old scipy"""
if LooseVersion(scipy.__version__) < LooseVersion('0.12'):
fill = fill[:, np.newaxis]
return fill
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences"""
from scipy.interpolate import interp1d
keep = np.ones((len(t_longer)), dtype=bool)
if len(t_shorter) == 0:
keep.fill(False)
return keep
scores = np.ones((len(t_longer)))
x1 = np.arange(len(t_shorter))
# The first set of keep masks to test
kwargs = dict(copy=False, bounds_error=False)
# this is a speed tweak, only exists for certain versions of scipy
if 'assume_sorted' in _get_args(interp1d.__init__):
kwargs['assume_sorted'] = True
shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
**kwargs)
for ii in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# set up the keep masks to test, eliminating any rows that are already
# gone
keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
keep_mask[:, ~keep] = False
# Check every possible removal to see if it minimizes
x2 = np.arange(len(t_longer) - ii - 1)
t_keeps = np.array([t_longer[km] for km in keep_mask])
longer_interp = interp1d(x2, t_keeps, axis=1,
fill_value=_fix_fill(t_keeps[:, -1]),
**kwargs)
d1 = longer_interp(x1) - t_shorter
d2 = shorter_interp(x2) - t_keeps
scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
keep[np.argmin(scores)] = False
return keep
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to the criteria
defined in reject and flat. If full_report=True, it will give
True/False as well as a list of all offending channels.
"""
bad_list = list()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in iteritems(refl):
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
ch_name = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, ch_name))
has_printed = True
if not full_report:
return False
else:
bad_list.extend(ch_name)
if not full_report:
return True
else:
if bad_list == []:
return True, None
else:
return False, bad_list
def _read_one_epoch_file(f, tree, fname, preload):
"""Helper to read a single FIF file"""
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
info['filename'] = fname
events, mappings = _read_events_fif(fid, tree)
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
# before version 0.11 we errantly saved with this tag instead of
# an MNE tag
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11
if len(epochs_node) == 0:
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
name = None
data = None
data_tag = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
name = tag.data
elif kind == FIFF.FIFF_EPOCH:
# delay reading until later
fid.seek(pos, 0)
data_tag = read_tag_info(fid)
data_tag.pos = pos
elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]:
# Constant 304 was used before v0.11
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]:
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
n_samp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], name))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Inspect the data
if data_tag is None:
raise ValueError('Epochs data not found')
epoch_shape = (len(info['ch_names']), n_samp)
expected = len(events) * np.prod(epoch_shape)
if data_tag.size // 4 - 4 != expected: # 32-bit floats stored
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (data_tag.size // 4, expected))
# Calibration factors
cals = np.array([[info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)]
for k in range(info['nchan'])], np.float64)
# Read the data
if preload:
data = read_tag(fid, data_tag.pos).data.astype(np.float64)
data *= cals[np.newaxis, :, :]
# Put it all together
tmin = first / info['sfreq']
tmax = last / info['sfreq']
event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
if drop_log is None:
drop_log = [[] for _ in range(len(epochs))] # noqa, analysis:ignore
return (info, data, data_tag, events, event_id, tmin, tmax, baseline, name,
selection, drop_log, epoch_shape, cals)
@verbose
def read_epochs(fname, proj=True, add_eeg_ref=False, preload=True,
verbose=None):
"""Read epochs from a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
epochs : instance of Epochs
The epochs
"""
return EpochsFIF(fname, proj, add_eeg_ref, preload, verbose)
class _RawContainer(object):
def __init__(self, fid, data_tag, event_samps, epoch_shape, cals):
self.fid = fid
self.data_tag = data_tag
self.event_samps = event_samps
self.epoch_shape = epoch_shape
self.cals = cals
self.proj = False
def __del__(self):
self.fid.close()
class EpochsFIF(_BaseEpochs):
"""Epochs read from disk
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
mne.Epochs
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, fname, proj=True, add_eeg_ref=True, preload=True,
verbose=None):
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
fnames = [fname]
ep_list = list()
raw = list()
for fname in fnames:
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
next_fname = _get_next_fname(fid, fname, tree)
(info, data, data_tag, events, event_id, tmin, tmax, baseline,
name, selection, drop_log, epoch_shape, cals) = \
_read_one_epoch_file(fid, tree, fname, preload)
# here we ignore missing events, since users should already be
# aware of missing events if they have saved data that way
epoch = _BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline,
on_missing='ignore', selection=selection, drop_log=drop_log,
add_eeg_ref=False, proj=False, verbose=False)
ep_list.append(epoch)
if not preload:
# store everything we need to index back to the original data
raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
events[:, 0].copy(), epoch_shape,
cals))
if next_fname is not None:
fnames.append(next_fname)
(info, data, events, event_id, tmin, tmax, baseline, selection,
drop_log, _) = _concatenate_epochs(ep_list, with_data=preload)
# we need this uniqueness for non-preloaded data to work properly
if len(np.unique(events[:, 0])) != len(events):
raise RuntimeError('Event time samples were not unique')
# correct the drop log
assert len(drop_log) % len(fnames) == 0
step = len(drop_log) // len(fnames)
offsets = np.arange(step, len(drop_log) + 1, step)
for i1, i2 in zip(offsets[:-1], offsets[1:]):
other_log = drop_log[i1:i2]
for k, (a, b) in enumerate(zip(drop_log, other_log)):
if a == ['IGNORED'] and b != ['IGNORED']:
drop_log[k] = b
drop_log = drop_log[:step]
# call _BaseEpochs constructor
super(EpochsFIF, self).__init__(
info, data, events, event_id, tmin, tmax, baseline, raw=raw,
name=name, proj=proj, add_eeg_ref=add_eeg_ref,
preload_at_end=False, on_missing='ignore', selection=selection,
drop_log=drop_log, verbose=verbose)
# use the private property instead of drop_bad so that epochs
# are not all read from disk for preload=False
self._bad_dropped = True
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk"""
# Find the right file and offset to use
event_samp = self.events[idx, 0]
for raw in self._raw:
idx = np.where(raw.event_samps == event_samp)[0]
if len(idx) == 1:
idx = idx[0]
size = np.prod(raw.epoch_shape) * 4
offset = idx * size
break
else:
# read the correct subset of the data
raise RuntimeError('Correct epoch could not be found, please '
'contact mne-python developers')
# the following is equivalent to this, but faster:
#
# >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
# >>> data *= raw.cals[np.newaxis, :, :]
# >>> data = data[idx]
#
# Eventually this could be refactored in io/tag.py if other functions
# could make use of it
raw.fid.seek(raw.data_tag.pos + offset + 16, 0) # 16 = Tag header
data = np.fromstring(raw.fid.read(size), '>f4').astype(np.float64)
data.shape = raw.epoch_shape
data *= raw.cals
return data
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
random_state : None | int | np.random.RandomState
To specify the random generator state
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng.randint(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function"""
if len(set(tuple(epochs.event_id.items()) for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for event_id")
if len(set(epochs.tmin for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for tmin")
if len(set(epochs.tmax for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for tmax")
if len(set(epochs.baseline for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for baseline")
@verbose
def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
verbose=None):
"""Concatenate channels, info and data from two Epochs objects
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
name : str
Comment that describes the Epochs data created.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless there is no
EEG in the data).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : instance of Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs.get_data() for epochs in epochs_list]
_check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list) or add_eeg_ref
if verbose is None:
verbose = any(e.verbose for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.picks = None
epochs.name = name
epochs.verbose = verbose
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
activate=proj)
return epochs
def _compare_epochs_infos(info1, info2, ind):
"""Compare infos"""
info1._check_consistency()
info2._check_consistency()
if info1['nchan'] != info2['nchan']:
raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind)
if info1['bads'] != info2['bads']:
raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind)
if info1['sfreq'] != info2['sfreq']:
raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind)
if set(info1['ch_names']) != set(info2['ch_names']):
raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind)
if len(info2['projs']) != len(info1['projs']):
raise ValueError('SSP projectors in epochs files must be the same')
if any(not _proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError('SSP projectors in epochs files must be the same')
def _concatenate_epochs(epochs_list, with_data=True):
"""Auxiliary function for concatenating epochs."""
out = epochs_list[0]
data = [out.get_data()] if with_data else None
events = [out.events]
baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
info = deepcopy(out.info)
verbose = out.verbose
drop_log = deepcopy(out.drop_log)
event_id = deepcopy(out.event_id)
selection = out.selection
for ii, epochs in enumerate(epochs_list[1:]):
_compare_epochs_infos(epochs.info, info, ii)
if not np.array_equal(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
if epochs.baseline != baseline:
raise ValueError('Baseline must be same for all epochs')
if with_data:
data.append(epochs.get_data())
events.append(epochs.events)
selection = np.concatenate((selection, epochs.selection))
drop_log.extend(epochs.drop_log)
event_id.update(epochs.event_id)
events = np.concatenate(events, axis=0)
if with_data:
data = np.concatenate(data, axis=0)
return (info, data, events, event_id, tmin, tmax, baseline, selection,
drop_log, verbose)
def _finish_concat(info, data, events, event_id, tmin, tmax, baseline,
selection, drop_log, verbose):
"""Helper to finish concatenation for epochs not read from disk"""
events[:, 0] = np.arange(len(events)) # arbitrary after concat
selection = np.where([len(d) == 0 for d in drop_log])[0]
out = _BaseEpochs(info, data, events, event_id, tmin, tmax,
baseline=baseline, add_eeg_ref=False,
selection=selection, drop_log=drop_log,
proj=False, on_missing='ignore', verbose=verbose)
out.drop_bad()
return out
def concatenate_epochs(epochs_list):
"""Concatenate a list of epochs into one epochs object
Parameters
----------
epochs_list : list
list of Epochs instances to concatenate (in order).
Returns
-------
epochs : instance of Epochs
The result of the concatenation (first Epochs instance passed in).
Notes
-----
.. versionadded:: 0.9.0
"""
return _finish_concat(*_concatenate_epochs(epochs_list))
@verbose
def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None,
origin='auto', weight_all=True, int_order=8, ext_order=3,
destination=None, ignore_ref=False, return_mapping=False,
verbose=None):
"""Average data using Maxwell filtering, transforming using head positions
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
head_pos : array | tuple | None
The array should be of shape ``(N, 10)``, holding the position
parameters as returned by e.g. `read_head_pos`. For backward
compatibility, this can also be a tuple of ``(trans, rot t)``
as returned by `head_pos_to_trans_rot_t`.
orig_sfreq : float | None
The original sample frequency of the data (that matches the
event sample numbers in ``epochs.events``). Can be ``None``
if data have not been decimated or resampled.
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in head
coords and in meters. The default is ``'auto'``, which means
a head-digitization-based origin fit.
weight_all : bool
If True, all channels are weighted by the SSS basis weights.
If False, only MEG channels are weighted, other channels
receive uniform weight per epoch.
int_order : int
Order of internal component of spherical expansion.
ext_order : int
Order of external component of spherical expansion.
regularize : str | None
Basis regularization type, must be "in" or None.
See :func:`mne.preprocessing.maxwell_filter` for details.
Regularization is chosen based only on the destination position.
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
.. versionadded:: 0.12
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
return_mapping : bool
If True, return the mapping matrix.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : instance of Evoked
The averaged epochs.
See Also
--------
mne.preprocessing.maxwell_filter
mne.chpi.read_head_pos
Notes
-----
The Maxwell filtering version of this algorithm is described in [1]_,
in section V.B "Virtual signals and movement correction", equations
40-44. For additional validation, see [2]_.
Regularization has not been added because in testing it appears to
decrease dipole localization accuracy relative to using all components.
Fine calibration and cross-talk cancellation, however, could be added
to this algorithm based on user demand.
.. versionadded:: 0.11
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
.. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements
of children in MEG: Quantification, effects on source
estimation, and compensation. NeuroImage 40:541–550, 2008.
"""
from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads,
_check_usable, _col_norm_pinv,
_get_n_moments, _get_mf_picks,
_prep_mf_coils, _check_destination,
_remove_meg_projs)
if head_pos is None:
raise TypeError('head_pos must be provided and cannot be None')
from .chpi import head_pos_to_trans_rot_t
if not isinstance(epochs, _BaseEpochs):
raise TypeError('epochs must be an instance of Epochs, not %s'
% (type(epochs),))
orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq
orig_sfreq = float(orig_sfreq)
if isinstance(head_pos, np.ndarray):
head_pos = head_pos_to_trans_rot_t(head_pos)
trn, rot, t = head_pos
del head_pos
_check_usable(epochs)
origin = _check_origin(origin, epochs.info, 'head')
recon_trans = _check_destination(destination, epochs.info, True)
logger.info('Aligning and averaging up to %s epochs'
% (len(epochs.events)))
if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])):
raise RuntimeError('Epochs must have monotonically increasing events')
meg_picks, _, _, good_picks, coil_scale, _ = \
_get_mf_picks(epochs.info, int_order, ext_order, ignore_ref)
n_channels, n_times = len(epochs.ch_names), len(epochs.times)
other_picks = np.setdiff1d(np.arange(n_channels), meg_picks)
data = np.zeros((n_channels, n_times))
count = 0
# keep only MEG w/bad channels marked in "info_from"
info_from = pick_info(epochs.info, good_picks, copy=True)
all_coils_recon = _prep_mf_coils(epochs.info, ignore_ref=ignore_ref)
all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
info_to = deepcopy(epochs.info)
_reset_meg_bads(info_to)
# set up variables
w_sum = 0.
n_in, n_out = _get_n_moments([int_order, ext_order])
S_decomp = 0. # this will end up being a weighted average
last_trans = None
decomp_coil_scale = coil_scale[good_picks]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
for ei, epoch in enumerate(epochs):
event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq
use_idx = np.where(t <= event_time)[0]
if len(use_idx) == 0:
trans = epochs.info['dev_head_t']['trans']
else:
use_idx = use_idx[-1]
trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]),
[[0., 0., 0., 1.]]])
loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000))
if last_trans is None or not np.allclose(last_trans, trans):
logger.info(' Processing epoch %s (device location: %s mm)'
% (ei + 1, loc_str))
reuse = False
last_trans = trans
else:
logger.info(' Processing epoch %s (device location: same)'
% (ei + 1,))
reuse = True
epoch = epoch.copy() # because we operate inplace
if not reuse:
S = _trans_sss_basis(exp, all_coils, trans,
coil_scale=decomp_coil_scale)
# Get the weight from the un-regularized version
weight = np.sqrt(np.sum(S * S)) # frobenius norm (eq. 44)
# XXX Eventually we could do cross-talk and fine-cal here
S *= weight
S_decomp += S # eq. 41
epoch[slice(None) if weight_all else meg_picks] *= weight
data += epoch # eq. 42
w_sum += weight
count += 1
del info_from
mapping = None
if count == 0:
data.fill(np.nan)
else:
data[meg_picks] /= w_sum
data[other_picks] /= w_sum if weight_all else count
# Finalize weighted average decomp matrix
S_decomp /= w_sum
# Get recon matrix
# (We would need to include external here for regularization to work)
exp['ext_order'] = 0
S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans)
exp['ext_order'] = ext_order
# We could determine regularization on basis of destination basis
# matrix, restricted to good channels, as regularizing individual
# matrices within the loop above does not seem to work. But in
# testing this seemed to decrease localization quality in most cases,
# so we do not provide the option here.
S_recon /= coil_scale
# Invert
pS_ave = _col_norm_pinv(S_decomp)[0][:n_in]
pS_ave *= decomp_coil_scale.T
# Get mapping matrix
mapping = np.dot(S_recon, pS_ave)
# Apply mapping
data[meg_picks] = np.dot(mapping, data[good_picks])
info_to['dev_head_t'] = recon_trans # set the reconstruction transform
evoked = epochs._evoked_from_epoch_data(data, info_to, picks,
n_events=count, kind='average')
_remove_meg_projs(evoked) # remove MEG projectors, they won't apply now
logger.info('Created Evoked dataset from %s epochs' % (count,))
return (evoked, mapping) if return_mapping else evoked
|
bsd-3-clause
| -8,414,976,939,089,250,000
| 40.604569
| 80
| 0.568995
| false
| 4.072918
| false
| false
| false
|
Ximpia/ximpia
|
ximpia/xpsite/constants.py
|
1
|
3312
|
# coding: utf-8
class Services:
USERS = 'Users'
class Slugs:
LOGIN = 'login'
LOGOUT = 'logout'
REMINDER_NEW_PASSWORD = 'reminder-new-password'
CHANGE_PASSWORD = 'change-password'
SIGNUP = 'signup'
ACTIVATION_USER = 'activation-user'
HOME_LOGIN = 'in'
SITE = 'site'
REQUEST_REMINDER = 'request-reminder'
FINALIZE_REMINDER = 'finalize-reminder'
ACTIVATE_USER = 'activate-user'
class Views:
LOGIN = 'login'
LOGOUT = 'logout'
REMINDER_NEW_PASSWORD = 'reminderNewPassword'
CHANGE_PASSWORD = 'changePassword'
SIGNUP = 'signup'
ACTIVATION_USER = 'activationUser'
HOME_LOGIN = 'homeLogin'
class Actions:
LOGIN = 'login'
REQUEST_REMINDER = 'requestReminder'
FINALIZE_REMINDER = 'finalizeReminder'
LOGOUT = 'logout'
SIGNUP = 'signup'
CHANGE_PASSWORD = 'changePassword'
ACTIVATE_USER = 'activateUser'
class Menus:
SYS = 'sys'
SIGN_OUT = 'signOut'
CHANGE_PASSWORD = 'changePassword'
HOME_LOGIN = 'homeLogin'
HOME = 'home'
LOGIN = 'login'
SIGNUP = 'signup'
class Tmpls:
LOGIN = 'login'
PASSWORD_REMINDER = 'password_reminder'
LOGOUT = 'logout'
CHANGE_PASSWORD = 'change_password'
SIGNUP = 'signup'
REMINDER_NEW_PASSWORD = 'reminder_new_password'
ACTIVATION_USER = 'activation_user'
HOME_LOGIN = 'home_login'
class Flows:
pass
#APP = 'site'
XIMPIA = 'ximpia'
TWITTER = 'twitter'
FACEBOOK = 'facebook'
XING = 'xing'
LINKEDIN = 'linkedin'
GOOGLE = 'google'
EMAIL = 'email'
PASSWORD = 'password'
SMS = 'sms'
FILE_QUOTA_DEFAULT = 2000
FILE_QUOTA_ORG = 5000
MSG_MODE_REC = 'received'
MSG_MODE_SENT = 'sent'
USER_SETTINGS = 'user_settings'
SETTINGS_ALLOW_PRIVATE_GRP_SUBS = 'ALLOW_PRIVATE_GRP_SUBS'
NUMBER_MATCHES = 10
OK = 'ok'
BLOCKED = 'blocked'
UNBLOCKED = 'unblocked'
ERROR = 'error'
ARCHIVE = 'archive'
UNARCHIVE = 'unarchive'
PROFESSIONAL = 'professional'
USER = 'user'
ORGANIZATION = 'organization'
SETTINGS_DEFAULT = ''
IMPORT = 'import'
GMAIL = 'gmail'
YAHOO = 'yahoo'
MSN = 'msn'
HOME = 'home'
WORK = 'work'
MOBILE = 'mobile'
WORK_MOBILE = 'work_mobile'
FAX = 'fax'
NETWORK = 'network'
SITE = 'site'
BLOG = 'blog'
FACEBOOK_PAGE = 'facebook_page'
IM = 'im'
RESERVED_GROUP_NAME_LIST = ['ximpia']
PENDING = 'pending'
USED = 'used'
NUMBER_INVITATIONS_USER = 15
NUMBER_INVITATIONS_STAFF = 500
SOCIAL_NETWORK = 'social_network'
# Signup constants
SIGNUP_USER_GROUP_ID = '1'
# Parameters
PARAM_LOGIN = 'LOGIN'
PARAM_REMINDER_DAYS = 'REMINDER_DAYS'
PARAM_USER_STATUS = 'USER_STATUS'
PARAM_USER_STATUS_PENDING = 'PENDING'
PARAM_USER_STATUS_ACTIVE = 'ACTIVE'
PARAM_ADDRESS_TYPE = 'ADDRESS_TYPE'
PARAM_ADDRESS_TYPE_PERSONAL = 'PERSONAL'
PARAM_CATEGORY_TYPE = 'CATEGORY_TYPE'
KEY_HAS_VALIDATED_EMAIL = 'HAS_VALIDATED_EMAIL'
# Cookies
COOKIE_LOGIN_REDIRECT = 'XP_LR'
# Meta Keys
META_REMINDER_ID = 'REMINDER_ID'
META_RESET_PASSWORD_DATE = 'RESET_PASSWORD_DATE'
# Settings
SET_SITE_SIGNUP_INVITATION = 'SITE_SIGNUP_INVITATION'
SET_SIGNUP_SOCIAL_NETWORK = 'SIGNUP_SOCIAL_NETWORK'
SET_SIGNUP_USER_PASSWORD = 'SIGNUP_USER_PASSWORD'
SET_REMINDER_DAYS = 'REMINDER_DAYS'
SET_NUMBER_RESULTS_LIST = 'NUMBER_RESULTS_LIST'
|
apache-2.0
| 3,870,277,391,610,701,300
| 22.533333
| 58
| 0.658514
| false
| 2.723684
| false
| false
| false
|
profxj/desispec
|
py/desispec/io/frame.py
|
1
|
3003
|
"""
desispec.io.frame
=================
IO routines for frame.
"""
import os.path
import numpy as np
import scipy,scipy.sparse
from astropy.io import fits
from desispec.frame import Frame
from desispec.io import findfile
from desispec.io.util import fitsheader, native_endian, makepath
from desispec.log import get_logger
log = get_logger()
def write_frame(outfile, frame, header=None):
"""Write a frame fits file and returns path to file written.
Args:
outfile: full path to output file, or tuple (night, expid, channel)
frame: desispec.frame.Frame object with wave, flux, ivar...
header: optional astropy.io.fits.Header or dict to override frame.header
Returns:
full filepath of output file that was written
Note:
to create a Frame object to pass into write_frame,
frame = Frame(wave, flux, ivar, resolution_data)
"""
outfile = makepath(outfile, 'frame')
if header is not None:
hdr = fitsheader(header)
else:
hdr = fitsheader(frame.header)
if 'SPECMIN' not in hdr:
hdr['SPECMIN'] = 0
if 'SPECMAX' not in hdr:
hdr['SPECMAX'] = hdr['SPECMIN'] + frame.nspec
hdus = fits.HDUList()
x = fits.PrimaryHDU(frame.flux, header=hdr)
x.header['EXTNAME'] = 'FLUX'
hdus.append(x)
hdus.append( fits.ImageHDU(frame.ivar, name='IVAR') )
hdus.append( fits.ImageHDU(frame.mask, name='MASK') )
hdus.append( fits.ImageHDU(frame.wave, name='WAVELENGTH') )
hdus.append( fits.ImageHDU(frame.resolution_data, name='RESOLUTION' ) )
hdus.writeto(outfile, clobber=True)
return outfile
def read_frame(filename, nspec=None):
"""Reads a frame fits file and returns its data.
Args:
filename: path to a file, or (night, expid, camera) tuple where
night = string YEARMMDD
expid = integer exposure ID
camera = b0, r1, .. z9
Returns:
desispec.Frame object with attributes wave, flux, ivar, etc.
"""
#- check if filename is (night, expid, camera) tuple instead
if not isinstance(filename, (str, unicode)):
night, expid, camera = filename
filename = findfile('frame', night, expid, camera)
if not os.path.isfile(filename) :
raise IOError("cannot open"+filename)
fx = fits.open(filename, uint=True)
hdr = fx[0].header
flux = native_endian(fx['FLUX'].data)
ivar = native_endian(fx['IVAR'].data)
wave = native_endian(fx['WAVELENGTH'].data)
if 'MASK' in fx:
mask = native_endian(fx['MASK'].data)
else:
mask = None #- let the Frame object create the default mask
resolution_data = native_endian(fx['RESOLUTION'].data)
fx.close()
if nspec is not None:
flux = flux[0:nspec]
ivar = ivar[0:nspec]
resolution_data = resolution_data[0:nspec]
# return flux,ivar,wave,resolution_data, hdr
return Frame(wave, flux, ivar, mask, resolution_data, hdr)
|
bsd-3-clause
| 1,386,145,176,748,292,600
| 29.03
| 80
| 0.638362
| false
| 3.504084
| false
| false
| false
|
wpoa/wiki-imports
|
lib/python2.7/site-packages/pywikibot-2.0b1-py2.7.egg/pywikibot/userinterfaces/terminal_interface_win32.py
|
1
|
2982
|
# -*- coding: utf-8 -*-
#
# (C) Pywikipedia bot team, 2003-2012
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: d6e1d28165e1e0b54b746762992665e7d30cab04 $'
import re
from . import terminal_interface_base
try:
import ctypes
ctypes_found = True
except ImportError:
ctypes_found = False
windowsColors = {
'default': 7,
'black': 0,
'blue': 1,
'green': 2,
'aqua': 3,
'red': 4,
'purple': 5,
'yellow': 6,
'lightgray': 7,
'gray': 8,
'lightblue': 9,
'lightgreen': 10,
'lightaqua': 11,
'lightred': 12,
'lightpurple': 13,
'lightyellow': 14,
'white': 15,
}
colorTagR = re.compile('\03{(?P<name>%s)}' % '|'.join(list(windowsColors.keys())))
# Compat for python <= 2.5
class Win32BaseUI(terminal_interface_base.UI):
def __init__(self):
terminal_interface_base.UI.__init__(self)
self.encoding = 'ascii'
class Win32CtypesUI(Win32BaseUI):
def __init__(self):
Win32BaseUI.__init__(self)
from .win32_unicode import stdin, stdout, stderr, argv
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.argv = argv
self.encoding = 'utf-8'
def printColorized(self, text, targetStream):
std_out_handle = ctypes.windll.kernel32.GetStdHandle(-11)
# Color tags might be cascaded, e.g. because of transliteration.
# Therefore we need this stack.
colorStack = []
tagM = True
while tagM:
tagM = colorTagR.search(text)
if tagM:
# print the text up to the tag.
targetStream.write(text[:tagM.start()].encode(self.encoding, 'replace'))
newColor = tagM.group('name')
if newColor == 'default':
if len(colorStack) > 0:
colorStack.pop()
if len(colorStack) > 0:
lastColor = colorStack[-1]
else:
lastColor = 'default'
ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, windowsColors[lastColor])
else:
colorStack.append(newColor)
# set the new color
ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, windowsColors[newColor])
text = text[tagM.end():]
# print the rest of the text
targetStream.write(text.encode(self.encoding, 'replace'))
# just to be sure, reset the color
ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, windowsColors['default'])
def _raw_input(self):
data = self.stdin.readline()
if '\x1a' in data:
raise EOFError()
return data.strip()
if ctypes_found:
Win32UI = Win32CtypesUI
else:
Win32UI = Win32BaseUI
|
gpl-3.0
| -2,789,536,079,870,463,000
| 29.742268
| 112
| 0.550302
| false
| 3.690594
| false
| false
| false
|
ZeikJT/DisgaeaHacking
|
unpacker.py
|
1
|
4726
|
#!/usr/bin/python
import os,struct,sys
''' Version 0.1.0
ARC, DAT and MPP unpacker. '''
class FileBundle:
def __init__(self):
self.files = []
def addFiles(self):
raise NotImplementedError()
def extractFiles(self, outputFolder):
if not os.path.exists(outputFolder):
os.mkdir(outputFolder)
class FileBundleWithSizes(FileBundle):
def addFile(self, fileName, fileStart, fileSize):
self.files.append({'fileName': fileName, 'fileStart': fileStart, 'fileSize': fileSize})
def extractFiles(self, outputFolder, inputFile):
super().extractFiles(outputFolder)
for fileData in self.files:
inputFile.seek(fileData['fileStart'])
outputFile = open(os.path.join(outputFolder, fileData['fileName']), 'wb')
outputFile.write(inputFile.read(fileData['fileSize']))
outputFile.close()
class FileBundleWithOffsets(FileBundle):
def addFile(self, fileName, fileStart):
self.files.append({'fileName': fileName, 'fileStart': fileStart})
def extractFiles(self, outputFolder, inputFile):
super().extractFiles(outputFolder)
fileSize = os.fstat(inputFile.fileno()).st_size
for i in range(0, len(self.files)):
fileData = self.files[i]
fileEnd = fileSize if (i == len(self.files) - 1) else self.files[i + 1]['fileStart']
inputFile.seek(fileData['fileStart'])
outputFile = open(os.path.join(outputFolder, fileData['fileName']), 'wb')
outputFile.write(inputFile.read(fileEnd - fileData['fileStart']))
outputFile.close()
def unpackMPP(filePath):
mpp = open(filePath, 'rb')
u1,isNew,unknown1,unknown2,fileSize,dataOffset = struct.unpack('<HHHHLL', mpp.read(16))
if fileSize != os.fstat(mpp.fileno()).st_size or dataOffset <= 0xF:
print('Invalid header', filePath)
else:
fileBundle = FileBundleWithOffsets()
fileBundle.addFile('0', dataOffset)
i = 1
while mpp.tell() < dataOffset:
fileOffset, = struct.unpack('<L', mpp.read(4))
if fileOffset == 0:
break
fileBundle.addFile(str(i), fileOffset)
i += 1
fileBundle.extractFiles(filePath + ' Files', mpp)
mpp.close()
def unpackPSPFS_V1(file, filePath):
fileCount,unknown1 = struct.unpack('<LL', file.read(8))
if fileCount == 0:
print('Invalid fileCount %d:'.format(fileCount), filePath)
else:
fileBundle = FileBundleWithSizes()
for i in range(0, fileCount):
name = file.read(44).split(b'\x00')[0].decode()
size,offset = struct.unpack('<LL', file.read(8))
fileBundle.addFile(name, offset, size)
fileBundle.extractFiles(filePath + ' Files', file)
def unpack0x00020000(file, filePath):
fileCount,unknown1 = struct.unpack('<LL', file.read(8))
if fileCount == 0:
print('Invalid file count %d:'.format(fileCount), filePath)
elif unknown1 != 0x00020000:
print('Invalid header:', filePath)
else:
fileBundle = FileBundleWithOffsets()
for i in range(0, fileCount):
fileBundle.addFile(str(i), struct.unpack('<L', file.read(4))[0])
fileBundle.extractFiles(filePath + ' Files', file)
def unpackDAT(filePath):
dat = open(filePath, 'rb')
if dat.read(8).decode() == 'PSPFS_V1':
unpackPSPFS_V1(dat, filePath)
else:
dat.seek(0)
unpack0x00020000(dat, filePath)
dat.close()
def unpackARC(filePath):
arc = open(filePath, 'rb')
dsarcidx,fileCount,unknown1 = struct.unpack('<8sLL', arc.read(16))
if dsarcidx.decode() != 'DSARCIDX' or unknown1 != 0:
print('Invalid header:', filePath)
elif fileCount == 0:
print('Invalid file count %d:'.format(fileCount), filePath)
else:
arc.seek(int((0x1F + (fileCount * 2)) / 0x10) * 0x10)
fileBundle = FileBundleWithSizes()
for i in range(0, fileCount):
name = arc.read(40).split(b'\x00')[0].decode()
size,offset = struct.unpack('<LL', arc.read(8))
fileBundle.addFile(name, offset, size)
fileBundle.extractFiles(filePath + ' Files', arc)
arc.close()
for arg in sys.argv[1:]:
if os.path.isfile(arg):
if arg.endswith('.ARC'):
unpackARC(arg)
elif arg.endswith('.DAT'):
unpackDAT(arg)
elif arg.endswith('.MPP'):
unpackMPP(arg)
else:
print('Unknown file extension:', arg)
else:
print('File not accessible:', arg)
|
mit
| 4,212,476,860,702,476,000
| 37.383333
| 96
| 0.596064
| false
| 3.649421
| false
| false
| false
|
tanghaibao/goatools
|
goatools/semantic.py
|
1
|
9628
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Compute semantic similarities between GO terms. Borrowed from book chapter from
Alex Warwick Vesztrocy and Christophe Dessimoz (thanks). For details, please
check out:
notebooks/semantic_similarity.ipynb
"""
from __future__ import print_function
import sys
from collections import Counter
from collections import defaultdict
from goatools.godag.consts import NAMESPACE2GO
from goatools.godag.consts import NAMESPACE2NS
from goatools.godag.go_tasks import get_go2ancestors
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.godag.relationship_combos import RelationshipCombos
from goatools.anno.update_association import clean_anno
from goatools.utils import get_b2aset
class TermCounts:
'''
TermCounts counts the term counts for each
'''
# pylint: disable=too-many-instance-attributes
def __init__(self, go2obj, annots, relationships=None, **kws):
'''
Initialise the counts and
'''
_prt = kws.get('prt')
# Backup
self.go2obj = go2obj # Full GODag
self.annots, go_alts = clean_anno(annots, go2obj, _prt)[:2]
# Genes annotated to all associated GO, including inherited up ancestors'
_relationship_set = RelationshipCombos(go2obj).get_set(relationships)
self.go2genes = self._init_go2genes(_relationship_set, go2obj)
self.gene2gos = get_b2aset(self.go2genes)
# Annotation main GO IDs (prefer main id to alt_id)
self.goids = set(self.go2genes.keys())
self.gocnts = Counter({go:len(geneset) for go, geneset in self.go2genes.items()})
# Get total count for each branch: BP MF CC
self.aspect_counts = {
'biological_process': self.gocnts.get(NAMESPACE2GO['biological_process'], 0),
'molecular_function': self.gocnts.get(NAMESPACE2GO['molecular_function'], 0),
'cellular_component': self.gocnts.get(NAMESPACE2GO['cellular_component'], 0)}
self._init_add_goid_alt(go_alts)
self.gosubdag = GoSubDag(
set(self.gocnts.keys()),
go2obj,
tcntobj=self,
relationships=_relationship_set,
prt=None)
if _prt:
self.prt_objdesc(_prt)
def get_annotations_reversed(self):
"""Return go2geneset for all GO IDs explicitly annotated to a gene"""
return set.union(*get_b2aset(self.annots))
def _init_go2genes(self, relationship_set, godag):
'''
Fills in the genes annotated to each GO, including ancestors
Due to the ontology structure, gene products annotated to
a GO Terma are also annotated to all ancestors.
'''
go2geneset = defaultdict(set)
go2up = get_go2ancestors(set(godag.values()), relationship_set)
# Fill go-geneset dict with GO IDs in annotations and their corresponding counts
for geneid, goids_anno in self.annots.items():
# Make a union of all the terms for a gene, if term parents are
# propagated but they won't get double-counted for the gene
allterms = set()
for goid_main in goids_anno:
allterms.add(goid_main)
if goid_main in go2up:
allterms.update(go2up[goid_main])
# Add 1 for each GO annotated to this gene product
for ancestor in allterms:
go2geneset[ancestor].add(geneid)
return dict(go2geneset)
def _init_add_goid_alt(self, not_main):
'''
Add alternate GO IDs to term counts. Report GO IDs not found in GO DAG.
'''
if not not_main:
return
for go_id in not_main:
if go_id in self.go2obj:
goid_main = self.go2obj[go_id].item_id
self.gocnts[go_id] = self.gocnts[goid_main]
self.go2genes[go_id] = self.go2genes[goid_main]
def get_count(self, go_id):
'''
Returns the count of that GO term observed in the annotations.
'''
return self.gocnts[go_id]
def get_total_count(self, aspect):
'''
Gets the total count that's been precomputed.
'''
return self.aspect_counts[aspect]
def get_term_freq(self, go_id):
'''
Returns the frequency at which a particular GO term has
been observed in the annotations.
'''
num_ns = float(self.get_total_count(self.go2obj[go_id].namespace))
return float(self.get_count(go_id))/num_ns if num_ns != 0 else 0
def get_gosubdag_all(self, prt=sys.stdout):
'''
Get GO DAG subset include descendants which are not included in the annotations
'''
goids = set()
for gos in self.gosubdag.rcntobj.go2descendants.values():
goids.update(gos)
return GoSubDag(goids, self.go2obj, self.gosubdag.relationships, tcntobj=self, prt=prt)
def prt_objdesc(self, prt=sys.stdout):
"""Print TermCount object description"""
ns_tot = sorted(self.aspect_counts.items())
cnts = ['{NS}({N:,})'.format(NS=NAMESPACE2NS[ns], N=n) for ns, n in ns_tot if n != 0]
go_msg = "TermCounts {CNT}".format(CNT=' '.join(cnts))
prt.write('{GO_MSG} {N:,} genes\n'.format(GO_MSG=go_msg, N=len(self.gene2gos)))
self.gosubdag.prt_objdesc(prt, go_msg)
def get_info_content(go_id, termcounts):
'''
Retrieve the information content of a GO term.
'''
ntd = termcounts.gosubdag.go2nt.get(go_id)
return ntd.tinfo if ntd else 0.0
def resnik_sim(go_id1, go_id2, godag, termcounts):
'''
Computes Resnik's similarity measure.
'''
goterm1 = godag[go_id1]
goterm2 = godag[go_id2]
if goterm1.namespace == goterm2.namespace:
msca_goid = deepest_common_ancestor([go_id1, go_id2], godag)
return get_info_content(msca_goid, termcounts)
return None
def lin_sim(goid1, goid2, godag, termcnts, dfltval=None):
'''
Computes Lin's similarity measure.
'''
sim_r = resnik_sim(goid1, goid2, godag, termcnts)
return lin_sim_calc(goid1, goid2, sim_r, termcnts, dfltval)
def lin_sim_calc(goid1, goid2, sim_r, termcnts, dfltval=None):
'''
Computes Lin's similarity measure using pre-calculated Resnik's similarities.
'''
# If goid1 and goid2 are in the same namespace
if sim_r is not None:
tinfo1 = get_info_content(goid1, termcnts)
tinfo2 = get_info_content(goid2, termcnts)
info = tinfo1 + tinfo2
# Both GO IDs must be annotated
if tinfo1 != 0.0 and tinfo2 != 0.0 and info != 0:
return (2*sim_r)/info
if termcnts.go2obj[goid1].item_id == termcnts.go2obj[goid2].item_id:
return 1.0
# The GOs are separated by the root term, so are not similar
if sim_r == 0.0:
return 0.0
return dfltval
def common_parent_go_ids(goids, godag):
'''
This function finds the common ancestors in the GO
tree of the list of goids in the input.
'''
# Find main GO ID candidates from first main or alt GO ID
rec = godag[goids[0]]
candidates = rec.get_all_parents()
candidates.update({rec.item_id})
# Find intersection with second to nth GO ID
for goid in goids[1:]:
rec = godag[goid]
parents = rec.get_all_parents()
parents.update({rec.item_id})
# Find the intersection with the candidates, and update.
candidates.intersection_update(parents)
return candidates
def deepest_common_ancestor(goterms, godag):
'''
This function gets the nearest common ancestor
using the above function.
Only returns single most specific - assumes unique exists.
'''
# Take the element at maximum depth.
return max(common_parent_go_ids(goterms, godag), key=lambda t: godag[t].depth)
def min_branch_length(go_id1, go_id2, godag, branch_dist):
'''
Finds the minimum branch length between two terms in the GO DAG.
'''
# First get the deepest common ancestor
goterm1 = godag[go_id1]
goterm2 = godag[go_id2]
if goterm1.namespace == goterm2.namespace:
dca = deepest_common_ancestor([go_id1, go_id2], godag)
# Then get the distance from the DCA to each term
dca_depth = godag[dca].depth
depth1 = goterm1.depth - dca_depth
depth2 = goterm2.depth - dca_depth
# Return the total distance - i.e., to the deepest common ancestor and back.
return depth1 + depth2
if branch_dist is not None:
return goterm1.depth + goterm2.depth + branch_dist
return None
def semantic_distance(go_id1, go_id2, godag, branch_dist=None):
'''
Finds the semantic distance (minimum number of connecting branches)
between two GO terms.
'''
return min_branch_length(go_id1, go_id2, godag, branch_dist)
def semantic_similarity(go_id1, go_id2, godag, branch_dist=None):
'''
Finds the semantic similarity (inverse of the semantic distance)
between two GO terms.
'''
dist = semantic_distance(go_id1, go_id2, godag, branch_dist)
if dist is not None:
return 1.0 / float(dist) if dist != 0 else 1.0
return None
# 1. Schlicker, Andreas et al.
# "A new measure for functional similarity of gene products based on Gene Ontology"
# BMC Bioinformatics (2006)
#
# 2. Yang, Haixuan et al.
# Improving GO semantic similarity measures by exploring the ontology
# beneath the terms and modelling uncertainty
# Bioinformatics (2012)
|
bsd-2-clause
| -4,900,397,585,464,170,000
| 35.608365
| 95
| 0.634919
| false
| 3.317712
| false
| false
| false
|
gajim/gajim
|
test/gtk/htmltextview.py
|
1
|
5593
|
from unittest.mock import MagicMock
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gajim.common import app
from gajim.common import configpaths
configpaths.init()
from gajim import gui
gui.init('gtk')
from gajim.common.helpers import AdditionalDataDict
from gajim.conversation_textview import ConversationTextview
from gajim.gui_interface import Interface
app.settings = MagicMock()
app.plugin_manager = MagicMock()
app.logger = MagicMock()
app.cert_store = MagicMock()
app.storage = MagicMock()
app.interface = Interface()
XHTML = [
'''
<div>
<span style="color: red; text-decoration:underline">Hello</span>
<br/>\n
<img src="http://images.slashdot.org/topics/topicsoftware.gif"/>
<br/>\n
<span style="font-size: 500%; font-family: serif">World</span>\n
</div>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p xmlns='http://www.w3.org/1999/xhtml'>Look here
<a href='http://google.com/'>Google</a>
</p>
<br/>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p style='font-size:large'>
<span style='font-style: italic'>O
<span style='font-size:larger'>M</span>G
</span>, I'm <span style='color:green'>green</span> with
<span style='font-weight: bold'>envy</span>!
</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p>
As Emerson said in his essay
<span style='font-style: italic; background-color:cyan'>
Self-Reliance</span>:
</p>
<p style='margin-left: 5px; margin-right: 2%'>
"A foolish consistency is the hobgoblin of little minds."
</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p style='text-align:center'>
Hey, are you licensed to <a href='http://www.jabber.org/'>Jabber</a>?
</p>
<p style='text-align:right'>
<img src='http://www.xmpp.org/images/psa-license.jpg'
alt='A License to Jabber' width='50%' height='50%'/>
</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<ul style='background-color:rgb(120,140,100)'>
<li> One </li>
<li> Two </li>
<li> Three </li>
</ul>
<hr />
<pre style="background-color:rgb(120,120,120)">def fac(n):
def faciter(n,acc):
if n==0: return acc
return faciter(n-1, acc*n)
if n<0: raise ValueError('Must be non-negative')
return faciter(n,1)</pre>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<ol style='background-color:rgb(120,140,100)'>
<li> One </li>
<li>
Two is nested:
<ul style='background-color:rgb(200,200,100)'>
<li> One </li>
<li style='font-size:50%'> Two </li>
<li style='font-size:200%'> Three </li>
<li style='font-size:9999pt'> Four </li>
</ul>
</li>
<li> Three </li>
</ol>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p>
<strong>
<a href='xmpp:example@example.org'>xmpp link</a>
</strong>:
</p>
<div xmlns='http://www.w3.org/1999/xhtml'>
<cite style='margin: 7px;' title='xmpp:examples@example.org'>
<p>
<strong>examples@example.org wrote:</strong>
</p>
<p>this cite - bla bla bla, smile- :-) …</p>
</cite>
<div>
<p>some text</p>
</div>
</div>
<p/>
<p>#232/1</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<img src='data:image/png;base64,R0lGODdhMAAwAPAAAAAAAP///ywAAAAAMAAw\
AAAC8IyPqcvt3wCcDkiLc7C0qwyGHhSWpjQu5yqmCYsapyuvUUlvONmOZtfzgFz\
ByTB10QgxOR0TqBQejhRNzOfkVJ+5YiUqrXF5Y5lKh/DeuNcP5yLWGsEbtLiOSp\
a/TPg7JpJHxyendzWTBfX0cxOnKPjgBzi4diinWGdkF8kjdfnycQZXZeYGejmJl\
ZeGl9i2icVqaNVailT6F5iJ90m6mvuTS4OK05M0vDk0Q4XUtwvKOzrcd3iq9uis\
F81M1OIcR7lEewwcLp7tuNNkM3uNna3F2JQFo97Vriy/Xl4/f1cf5VWzXyym7PH\
hhx4dbgYKAAA7' alt='Larry'/>
</body>
''',
]
class TextviewWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Textview Test")
self.set_default_size(600, 600)
self._textview = ConversationTextview(None)
scrolled = Gtk.ScrolledWindow()
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled.add(self._textview.tv)
self.add(scrolled)
self.show()
self._print_xhtml()
def _print_xhtml(self):
for xhtml in XHTML:
additional_data = AdditionalDataDict()
additional_data.set_value('gajim', 'xhtml', xhtml)
self._textview.print_real_text(None, additional_data=additional_data)
self._textview.print_real_text('\n')
win = TextviewWindow()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
|
gpl-3.0
| 3,587,926,064,607,005,000
| 25.126168
| 81
| 0.528349
| false
| 3.094079
| false
| false
| false
|
AlexisTM/flyingros
|
flyingros_nav/nodes/control_thread.py
|
1
|
6591
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
control_thread.py
This script sends positions to control the UAV in X, Y, Z
ILPS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ILPS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ILPS. If not, see <http://www.gnu.org/licenses/>.
Software created by Alexis Paques and Nabil Nehri for the UCL
in a Drone-Based Additive Manufacturing of Architectural Structures
project financed by the MIT Seed Fund
Originaly inspired of Vladimir Ermakov work (c) 2015 under GNU GPLv3
Copyright (c) Alexis Paques 2016
Copyright (c) Nabil Nehri 2016
"""
from __future__ import division
import rospy
import mavros
import time
import tf
import numpy as np
from threading import Thread
from mavros.utils import *
from geometry_msgs.msg import PoseStamped, Point, Pose
from sensor_msgs.msg import Imu, Range
from mavros_msgs.srv import SetMode, CommandBool
from mavros_msgs.msg import State, PositionTarget
from math import pi
# Returns a radian from a degree
def deg2radf(a):
return float(a)*pi/180
# Returns a degree from a radian
def rad2degf(a):
return float(a)*180/pi
class _GetchUnix:
"""Fetch and character using the termios module."""
def __init__(self):
import tty, sys
from select import select
def __call__(self):
import sys, tty, termios
from select import select
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
[i, o, e] = select([sys.stdin.fileno()], [], [], 1)
if i:
ch = sys.stdin.read(1)
else:
ch = None
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = _GetchUnix()
def State_Callback(data):
global state
state = data
def Pose_Callback(data):
global pose
pose = data
def sendSetpoint():
# Input data
global setpoint, yawSetPoint, run, position_control
# Output data
local_setpoint_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=0)
rate = rospy.Rate(5)
while run:
q = tf.transformations.quaternion_from_euler(0, 0, deg2radf(yawSetPoint), axes="sxyz")
msg = PoseStamped()
msg.header.stamp = rospy.Time.now()
msg.pose.position.x = float(setpoint.x)
msg.pose.position.y = float(setpoint.y)
msg.pose.position.z = float(setpoint.z)
msg.pose.orientation.x = q[0]
msg.pose.orientation.y = q[1]
msg.pose.orientation.z = q[2]
msg.pose.orientation.w = q[3]
local_setpoint_pub.publish(msg)
rate.sleep()
def InterfaceKeyboard():
# Input data
global pose
# Output data
global setpoint, yawSetPoint, run, position_control
# Publishers
global arming_client, set_mode_client, lasers_yaw
what = getch()
if what == "t":
setpoint.x = setpoint.x - 0.1
if what == "g":
setpoint.x = setpoint.x + 0.1
if what == "f":
setpoint.y = setpoint.y - 0.1
if what == "h":
setpoint.y = setpoint.y + 0.1
if what == "i":
setpoint.z = setpoint.z + 0.1
if what == "k":
setpoint.z = setpoint.z - 0.1
if what == "b":
yawSetPoint = yawSetPoint + 45
if what == "n":
yawSetPoint = yawSetPoint - 45
if what == "c":
setpoint.x = pose.pose.position.x
setpoint.y = pose.pose.position.y
setpoint.z = pose.pose.position.z
if what == "q":
arming_client(False)
if what == "a":
arming_client(True)
if what == "e":
set_mode_client(custom_mode = "OFFBOARD")
if what == "m":
run = False
time.sleep(1)
exit()
Q = (
pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(Q)
rospy.loginfo("Position x: %s y: %s z: %s", pose.pose.position.x, pose.pose.position.y, pose.pose.position.z)
rospy.loginfo("Setpoint is now x:%s, y:%s, z:%s", setpoint.x, setpoint.y, setpoint.z)
rospy.loginfo("IMU :")
rospy.loginfo("roll : %s", rad2degf(euler[0]))
rospy.loginfo("pitch : %s", rad2degf(euler[1]))
rospy.loginfo("yaw : %s and from lasers %s", rad2degf(euler[2]), rad2degf(lasers_yaw))
rospy.loginfo("wanted yaw : %s", yawSetPoint)
def init():
# Input data
# Output data
global state, setpoint, yawSetPoint, \
run, laserposition, pose, lasers_raw, position_control
# Publishers
global local_pos_pub, arming_client, set_mode_client, lasers_yaw
# Objects
lasers_yaw = 0
# Global variable initialisation
pose = PoseStamped()
laserposition = PoseStamped()
setpoint = Point()
setpoint.x = 1
setpoint.y = 1
setpoint.z = 1
# When true, setpoints are positions
# When false, setpoints is a velocity
position_control = True
yawSetPoint = 0
run = True
state = State()
# Node initiation
rospy.init_node('control_position_setpoint_py')
time.sleep(1)
# Publishers, subscribers and services
pose_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, Pose_Callback)
state_sub = rospy.Subscriber('/mavros/state', State, State_Callback)
rospy.wait_for_service('mavros/set_mode')
set_mode_client = rospy.ServiceProxy('mavros/set_mode', SetMode)
rospy.wait_for_service('mavros/cmd/arming')
arming_client = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
# Thread to send setpoints
tSetPoints = Thread(target=sendSetpoint).start()
while not rospy.is_shutdown():
InterfaceKeyboard()
if __name__ == '__main__':
rospy.loginfo("We are ready")
try:
init()
except rospy.ROSInterruptException:
rospy.loginfo("init failed")
pass
|
gpl-3.0
| 7,663,449,591,026,991,000
| 28.799065
| 113
| 0.62206
| false
| 3.38347
| false
| false
| false
|
robbje/eis
|
src/eqc/eqc.py
|
1
|
3399
|
#!/usr/bin/env python2
from parser import Node
from copy import deepcopy
import numpy as np
from eqclib import getClassDefinition, resetClassDefinition
class CircuitTree(Node):
def __init__(
self,
params=[],
eqc=lambda w,
p: 0,
name="",
pNames="",
jac=[],
constraints=[]):
"""Constructor for a CircuitTree node
params: a list of numerical values
eqc: frequency dependent equivalent circuit function
name: name of the element
pNames: names of the elements' parameters
jac: elements jacobian vector of parameters
"""
Node.__init__(self)
self.p = params
self.eqc = eqc
self.name = name
self.pNames = pNames
self.jac = jac
self.constraints = constraints
def collapseCircuit(self):
"""Builds the function describing the frequency dependence of circuit
Returns the root node of the parser tree, with all equivalent
circuit functions generated.
"""
if self.value.type == 'SYMBOL':
cdef = getClassDefinition(self.value.value)
new = CircuitTree(**cdef)
elif self.value.type == 'PARALLEL':
new = self.left.collapseCircuit()
new |= self.right.collapseCircuit()
elif self.value.type == 'SERIES':
new = self.left.collapseCircuit()
new += self.right.collapseCircuit()
else:
raise ValueError('BUG: Unknown type in parse tree')
return None
self.eqc = deepcopy(new.eqc)
self.p = deepcopy(new.p)
self.name = deepcopy(new.name)
self.pNames = deepcopy(new.pNames)
self.jac = deepcopy(new.jac)
self.constraints = deepcopy(new.constraints)
return self
def getCircuit(self):
resetClassDefinition()
self.collapseCircuit()
return self.eqc, self.jac
def getParameterSet(self):
np = len(self.pNames)
return [[self.pNames[i], self.constraints[i]] for i in xrange(np)]
def __add__(self, other):
pu = len(self.p)
self.p = np.append(self.p, other.p)
self.pNames += other.pNames
self.constraints += other.constraints
f = self.eqc
self.name = "(%s+%s)" % (self.name, other.name)
self.eqc = lambda w, p: f(w, p) + other.eqc(w, p[pu:])
self.jac += [lambda w, p: j(w, p[pu:]) for j in other.jac]
return self
def __or__(self, other):
pu = len(self.p)
self.p = np.append(self.p, other.p)
self.pNames += other.pNames
self.constraints += other.constraints
f = self.eqc
self.name = "(%s|%s)" % (self.name, other.name)
self.eqc = lambda w, p: \
1.0 / (1.0 / f(w, p) + 1.0 / other.eqc(w, p[pu:]))
for i, jac in enumerate(self.jac):
self.jac[i] = lambda w, p: np.power(other.eqc(
w, p[pu:]), 2.0) * jac(w, p) / np.power(other.eqc(w, p[pu:]) + f(w, p), 2.0)
for jac in other.jac:
self.jac.append(lambda w, p: np.power(f(w, p), 2.0) *
jac(w, p[pu:]) /
np.power(other.eqc(w, p[pu:]) +
f(w, p), 2.0))
return self
|
mit
| -6,485,698,705,719,781,000
| 32.653465
| 92
| 0.531921
| false
| 3.759956
| false
| false
| false
|
zbqf109/goodo
|
openerp/addons/account/__init__.py
|
1
|
1247
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
import wizard
import report
from openerp import SUPERUSER_ID
def _auto_install_l10n(cr, registry):
#check the country of the main company (only) and eventually load some module needed in that country
country_code = registry['res.users'].browse(cr, SUPERUSER_ID, SUPERUSER_ID, {}).company_id.country_id.code
if country_code:
#auto install localization module(s) if available
module_list = []
if country_code in ['BJ', 'BF', 'CM', 'CF', 'KM', 'CG', 'CI', 'GA', 'GN', 'GW', 'GQ', 'ML', 'NE', 'CD', 'SN', 'TD', 'TG']:
#countries using OHADA Chart of Accounts
module_list.append('l10n_syscohada')
else:
module_list.append('l10n_' + country_code.lower())
if country_code == 'US':
module_list.append('account_plaid')
if country_code in ['US', 'AU', 'NZ']:
module_list.append('account_yodlee')
module_ids = registry['ir.module.module'].search(cr, SUPERUSER_ID, [('name', 'in', module_list), ('state', '=', 'uninstalled')])
registry['ir.module.module'].button_install(cr, SUPERUSER_ID, module_ids, {})
|
gpl-3.0
| 3,208,437,055,561,920,000
| 43.535714
| 136
| 0.61668
| false
| 3.48324
| false
| false
| false
|
StephDC/MiniBioKit
|
bioChemTool/wigUtil.py
|
1
|
4604
|
from . import commonUtil
class ucscFile():
'''Universal file structure for UCSC Genome Sequence files including wig and bedgraph'''
def __init__(self,name,description='',visibility='hide',color='0,0,0',priority='100',additionConf='',browserConf=None):
self.config = commonUtil.equalDict()
self.config['type'] = 'unknown'
self.config['name'] = name
self.config['description'] = description
self.config['visibility'] = visibility
self.config['color'] = color
self.config['priority'] = priority
self.addn = additionConf
if browserConf is None:
self.brow = commonUtil.equalDict()
else:
self.brow = browserConf
self.data = []
def __str__(self):
result = str(self.brow) if self.brow else ''
result += '\ntrack '
result += str(self.config)
if self.addn.strip():
result += ' '+self.addn.strip()
result += '\n'
for item in self.data:
result += str(item)
return result
def addItem(self,item):
self.data.append(item)
def remItem(self,item):
self.data.remove(item)
def writeFile(self,fName):
stdout = open(fName,'w')
stdout.write(str(self))
stdout.close()
class wigFile(ucscFile):
'''A write-only wig file creator'''
def __init__(self,name,description='',visibility='hide',color='255,255,255',priority='100',additionConf='',browserConf=''):
self.config = commonUtil.equalDict()
self.config['type'] = 'wiggle_0'
self.config['name'] = name
self.config['description'] = description
self.config['visibility'] = visibility
self.config['color'] = color
self.config['priority'] = priority
self.addn = additionConf
self.brow = browserConf
self.data = []
class bedFile(ucscFile):
'''UCSC BED File'''
pass
class wigItem():
'''Items that could be joined into a wig file
Has two types:
variableStep - varStep = True (default)
fixedStep - varStep = False
Need to specify chromosome when initializing.'''
def __init__(self,chromosome,span,varStep=True,start=None):
self.chr = chromosome
self.type = varStep
self.start = start
if not varStep and not start:
raise SyntaxError('fixedStep requires start position.')
self.span = span
self.data = []
def __str__(self):
if self.type:
result = 'variableStep '
else:
result = 'fixedStep '
result += 'chrom='+self.chr
if self.type:
if self.span:
result += ' span='+str(self.span)
result += '\n'
else:
result += ' start='+str(self.start)
result += ' step='+str(self.span)
for item in self.data:
result += str(item)+'\n'
return result
def __getitem__(self,key):
return self.data[key]
def __setitem__(self,key,item):
self.data[key] = item
def __iter__(self):
return self.data.__iter__()
def append(self,item):
self.data.append(item)
add = append
def pop(self):
return self.data.pop()
def bedParse(line):
if not bool(line.strip()) or line.strip()[0] == '#':
return None
result = []
typeList = [str,int,int,str,float]
tmp = line.strip().split()
for item in range(5):
result.append(typeList[item](tmp[item]))
return result
def readBED(fName):
'''Read the BED file that was created before.
First attempt to ever read a file.
Alert: Modifying the file in a thread safe way is not supported.'''
from . import dsvUtil
result = bedFile(fName,browserConf = '')
stdin = open(fName,'r')
confLine = ''
while confLine is not None:
confLine = stdin.readline().strip()
if len(confLine) > 5 and confLine[:5] == 'track':
for item in commonUtil.splitQuote(confLine,' '):
if item.strip():
try:
result.config[item[:item.index('=')]] = item[item.index('=')+1:]
except ValueError:
print('Unquoted parameter: '+item)
confLine = None
elif len(confLine) > 7 and confLine[:7] == 'browser':
result.brow += confLine+'\n'
## Configuration stored.
fileContent = dsvUtil.iterParse_iterator(stdin,['chrom','start','end','value'],bedParse)
for item in fileContent:
result.data.append(item)
return result
|
gpl-3.0
| 7,058,044,371,761,962,000
| 33.878788
| 127
| 0.570808
| false
| 3.938409
| true
| false
| false
|
faisalp4p/slang-python
|
step6/AST.py
|
1
|
14814
|
from abc import ABCMeta, abstractmethod
from Lexer import RELATIONAL_OPERATOR
class OPERATOR:
PLUS = "+"
MINUS = "-"
DIV = "/"
MUL = "*"
class TYPE_INFO:
TYPE_ILLEGAL = -1
TYPE_NUMERIC = 0
TYPE_BOOL = 1
TYPE_STRING = 2
class SYMBOL_INFO:
def __init__(self, symbol_name=None, type=None, val=None):
self.symbol_name = symbol_name
self.type = type
self.val = val
class Exp:
__metaclass__ = ABCMeta
@abstractmethod
def Evaluate(self): pass
@abstractmethod
def TypeCheck(self): pass
@abstractmethod
def GetType(self): pass
class BooleanConstant(Exp):
def __init__(self, pvalue):
self.info = SYMBOL_INFO(symbol_name=None,
val=pvalue,
type=TYPE_INFO.TYPE_BOOL)
def Evaluate(self, run_cntxt):
return self.info
def TypeCheck(self, run_cntxt):
return self.info.type
def GetType(self):
return self.info.type
class NumericConstant(Exp):
def __init__(self, value):
self.info = SYMBOL_INFO(symbol_name=None,
val=value,
type=TYPE_INFO.TYPE_NUMERIC)
def Evaluate(self, run_cntxt):
return self.info
def TypeCheck(self, run_cntxt):
return self.info.type
def GetType(self):
return self.info.type
def __str__(self):
return u'NumericConstant(%d)' % self.info.val
class StringLiteral(Exp):
def __init__(self, pvalue):
self.info = SYMBOL_INFO(symbol_name=None,
val=pvalue,
type=TYPE_INFO.TYPE_STRING)
def Evaluate(self, run_cntxt):
return self.info
def TypeCheck(self, run_cntxt):
return self.info.type
def GetType(self):
return self.info.type
class Variable(Exp):
def __init__(self, info=None, com_cntxt=None, name=None, _val=None):
if info:
self.var_name = info.symbol_name
return
if type(_val) in [int, long]:
t = TYPE_INFO.TYPE_NUMERIC
elif type(_val) == bool:
t = TYPE_INFO.TYPE_BOOL
elif type(_val) == str:
t = TYPE_INFO.TYPE_STRING
else:
raise Exception("Fuck")
s = SYMBOL_INFO(symbol_name=name,
type=t,
val=_val)
com_cntxt.add(s)
self.var_name = name
def GetName(self):
return self.var_name
def Evaluate(self, run_cntxt):
if not run_cntxt.TABLE:
return None
else:
return run_cntxt.get(self.var_name)
def TypeCheck(self, com_cntxt):
if not com_cntxt.TABLE:
return TYPE_INFO.TYPE_ILLEGAL
else:
a = com_cntxt.get(self.var_name)
if a:
self._type = a.type
return a.type
return TYPE_INFO.TYPE_ILLEGAL
def GetType(self):
return self._type
class BinaryPlus(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if (eval_left.type == TYPE_INFO.TYPE_STRING and eval_right.type == TYPE_INFO.TYPE_STRING) or (eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC):
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val + eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left != TYPE_INFO.TYPE_BOOL:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class BinaryMinus(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val - eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class Mul(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val * eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class Div(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val / eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class UnaryPlus(Exp):
def __init__(self, ex1):
self.ex1 = ex1
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
if eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class UnaryMinus(Exp):
def __init__(self, ex1):
self.ex1 = ex1
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=-eval_left.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
if eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class RelationExp(Exp):
def __init__(self, op, ex1, ex2):
self.m_op = op
self._ex1 = ex1
self._ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self._ex1.Evaluate(run_cntxt)
eval_right = self._ex2.Evaluate(run_cntxt)
retval = SYMBOL_INFO()
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == RELATIONAL_OPERATOR.TOK_EQ:
retval.val = eval_left.val == eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_NEQ:
retval.val = eval_left.val != eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_GT:
retval.val = eval_left.val > eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_GTE:
retval.val = eval_left.val >= eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_LT:
retval.val = eval_left.val < eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_LTE:
retval.val = eval_left.val <= eval_right.val
return retval
elif eval_left.type == TYPE_INFO.TYPE_STRING and eval_right.type == TYPE_INFO.TYPE_STRING:
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == RELATIONAL_OPERATOR.TOK_EQ:
retval.val = eval_left.val == eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_NEQ:
retval.val = eval_left.val != eval_right.val
else:
retval = False
return retval
elif eval_left.type == TYPE_INFO.TYPE_BOOL and eval_right.type == TYPE_INFO.TYPE_BOOL:
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == RELATIONAL_OPERATOR.TOK_EQ:
retval.val = eval_left.val == eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_NEQ:
retval.val = eval_left.val != eval_right.val
else:
retval = False
return retval
return None
def TypeCheck(self, com_cntxt):
eval_left = self._ex1.TypeCheck(com_cntxt)
eval_right = self._ex2.TypeCheck(com_cntxt)
if eval_right != eval_left:
raise Exception("Wrong Type in Expression")
if eval_left == TYPE_INFO.TYPE_STRING and not (self.m_op == RELATIONAL_OPERATOR.TOK_EQ or self.m_op == RELATIONAL_OPERATOR.TOK_NEQ):
raise Exception("Only == and != supported for string type")
if eval_left == TYPE_INFO.TYPE_BOOL and not (self.m_op == RELATIONAL_OPERATOR.TOK_EQ or self.m_op == RELATIONAL_OPERATOR.TOK_NEQ):
raise Exception("Only == and != supported for boolean type")
self._optype = eval_left
self._type = TYPE_INFO.TYPE_BOOL
return self._type
def GetType(self):
return self._type
class LogicalExp(Exp):
def __init__(self, op, ex1, ex2):
self.m_op = op
self._ex1 = ex1
self._ex2 = ex2
def TypeCheck(self, com_cntxt):
eval_left = self._ex1.TypeCheck(com_cntxt)
eval_right = self._ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_BOOL:
self._type = TYPE_BOOL.TYPE_BOOL
return self._type
else:
raise "Wrong Type in Expression"
def Evaluate(self, run_cntxt):
eval_left = self._ex1.Evaluate(run_cntxt)
eval_right = self._ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_BOOL and eval_right == TYPE_INFO.TYPE_BOOL:
retval = SYMBOL_INFO()
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == TOKEN.TOK_AND:
retval.val = eval_left.val and eval_right.val
elif self.m_op == TOKEN.TOK_OR:
retval.val = eval_left.val or eval_right.val
else:
return None
return retval
return None
def GetType(self):
return self._type
class LogicalNot(Exp):
def __init__(self, ex):
self._ex = ex
def Evaluate(self, run_cntxt):
eval_left = self._ex.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_BOOL:
retval = SYMBOL_INFO(type=TYPE_INFO.TYPE_BOOL, symbol_name="", val=not eval_left.val)
return retval
else:
return None
def TypeCheck(self, com_cntxt):
eval_left = self._ex.TypeCheck(com_cntxt)
if eval_left == TYPE_INFO.TYPE_BOOL:
self._type = TYPE_INFO.TYPE_BOOL
return self._type
else:
raise Exception("Wrong Type in Expression")
def GetType(self):
return self._type
if __name__ == "__main__":
# Abstract Syntax Tree(AST) for 5*10
exp1 = BinaryExp(NumericConstant(5), NumericConstant(10), OPERATOR.MUL)
print (exp1.Evaluate())
# AST for - (10 + (30 + 50))
exp2 = UnaryExp(
BinaryExp(NumericConstant(10),
BinaryExp(NumericConstant(30),
NumericConstant(50),
OPERATOR.PLUS
),
OPERATOR.PLUS
),
OPERATOR.PLUS
)
print (exp2.Evaluate())
# AST for (400 + exp2)
exp3 = BinaryExp(NumericConstant(400), exp2, OPERATOR.PLUS)
print (exp3.Evaluate())
|
mit
| 8,989,551,126,098,030,000
| 27.765049
| 192
| 0.524706
| false
| 3.815091
| false
| false
| false
|
percyfal/ratatosk
|
ratatosk/lib/tools/fastqc.py
|
1
|
2849
|
# Copyright (c) 2013 Per Unneberg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Provide wrappers for `fastqc <http://www.bioinformatics.babraham.ac.uk/projects/fastqc/>`_
Classes
-------
"""
import os
import luigi
import ratatosk.lib.files.input
from ratatosk.job import JobTask
from ratatosk.jobrunner import DefaultShellJobRunner
from ratatosk.log import get_logger
import ratatosk.shell as shell
logger = get_logger()
class InputBamFile(ratatosk.lib.files.input.InputBamFile):
pass
class InputFastqFile(ratatosk.lib.files.input.InputFastqFile):
pass
# This was a nightmare to get right. Temporary output is a directory,
# so would need custom _fix_paths for cases like this
class FastQCJobRunner(DefaultShellJobRunner):
"""This job runner must take into account that there is no default
output file but rather an output directory"""
def _make_arglist(self, job):
arglist = [job.exe()]
if job.opts():
arglist += job.opts()
(tmp_files, job_args) = DefaultShellJobRunner._fix_paths(job)
(tmpdir, outdir) = tmp_files[0]
arglist += ['-o', tmpdir.path]
arglist += [job_args[0]]
return (arglist, tmp_files)
def run_job(self, job):
(arglist, tmp_files) = self._make_arglist(job)
(tmpdir, outdir) = tmp_files[0]
os.makedirs(os.path.join(os.curdir, tmpdir.path))
# Need to send output to temporary *directory*, not file
cmd = ' '.join(arglist)
logger.info("Job runner '{0}'; running command '{1}'".format(self.__class__, cmd))
(stdout, stderr, returncode) = shell.exec_cmd(cmd, shell=True)
if returncode == 0:
logger.info("Shell job completed")
for a, b in tmp_files:
logger.info("renaming {0} to {1}".format(a.path, b.path))
a.move(os.path.join(os.curdir, b.path))
else:
raise Exception("Job '{}' failed: \n{}".format(cmd.replace("= ", "="), " ".join([stderr])))
class FastQC(JobTask):
executable = luigi.Parameter(default="fastqc")
parent_task = luigi.Parameter(default = "ratatosk.lib.tools.fastqc.InputFastqFile")
suffix = luigi.Parameter(default="_fastqc")
def job_runner(self):
return FastQCJobRunner()
def args(self):
return [self.input()[0], self.output()]
|
apache-2.0
| -1,005,853,423,500,576,800
| 35.063291
| 103
| 0.665146
| false
| 3.574655
| false
| false
| false
|
prlosana/BuzzBoards
|
fortitoServer_broadcast.py
|
1
|
14915
|
import time
import pygame
import buzzbox
import tricolourleds8
import buttons8
import wear_multiplexer
import wear_sensor_heat
import wear_sensor_light
import wear_sensor_motion
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor, task
i2cBus = 1 #This depends on the model of the Raspberry Pi
box = None
tricolor = None
buttons = None
multiplexer = None
temperatureSensor = None
lightSensor = None
motionSensor = None
connected_multiplexer = False
connected_sensor_light = False
connected_sensor_temp = False
connected_sensor_motion = False
current_channel = 0
#Creates box instance
try:
box = buzzbox.BuzzBox(i2cBus)
box.clean()
except Exception as e:
print ("ERROR: BUZZBOX Unexpected error:", e) #sys.exc_info()[0]
#exitFlag = 1
#Creates tricolour leds instance
try:
# Colour code: 0=yellow, 1=green, 2=red, 3=off
tricolor = tricolourleds8.TricolourLeds8(i2cBus)
tricolor.clean()
except Exception as e:
print ("ERROR: TRICOLOUR LEDS Unexpected error:", e)
#exitFlag = 1
#Creates buttons instance
try:
buttons = buttons8.Buttons8(i2cBus)
buttons.clean()
except Exception as e:
print ("ERROR: BUTTONS Unexpected error:", e)
#exitFlag = 1
#Creates multiplexer instance
try:
multiplexer = wear_multiplexer.WearMultiplexer(i2cBus)
connected_multiplexer = True
except Expection as e:
connected_multiplexer = False
print ("ERROR: Multiplexer Unexpected error:", e)
#exitFlag = 1
class Fortito(Protocol):
def __init__(self, factory):
self.factory = factory
#Starts service for buttons
self.buttons_checker = task.LoopingCall(self.get_BUTTONS_STATUS)
self.buttons_checker.start(1, True)
#Starts service for sensors
self.sensors_checker = task.LoopingCall(self.sensorDiscoveryService)
self.sensors_checker.start(1, True)
def connectionMade(self):
self.factory.clients.append(self)
print ("Client connected.")#, self.factory.clients)
def connectionLost(self, reason):
self.factory.clients.remove(self)
def dataReceived(self, data):
print ("Data received: ", data)
self.get_BUZZBOX_STATUS(data)
def handle_MESSAGE(self, message):
for client in self.factory.clients:
client.transport.write(message)
def get_BUTTONS_STATUS(self):
global buttons
#print ("get_BUTTONS_STATUS running......")
result = buttons.readValue()
if result <> "":
print (str(result), " pressed..............")
self.handle_MESSAGE(str(result) + "\n")
result = ""
def get_BUZZBOX_STATUS(self, data):
global i2cBus
global box
global tricolor
global buttons
global multiplexer
global temperatureSensor
global lightSensor
global motionSensor
global connected_multiplexer
global connected_sensor_light
global connected_sensor_temp
global connected_sensor_motion
global current_channel
#Evaluates data
data = data.upper()
msg = "OK"
subdata_pos = data.find("%")
subdata = data[0:subdata_pos]
subvalue = 0
#print "character % found at ", subdata_pos, " command ", subdata
subdata1_pos = data.find("&")
subdata1 = data[0:subdata1_pos]
subvalue1 = 0
if data == "HELLO\n":
msg = "Greetings!"
print ("Greetings!")
elif data == "PLAY1\n":
pygame.mixer.init()
pygame.mixer.music.load("/home/pi/BuzzBoards/music1.mp3")
pygame.mixer.music.play()
elif data == "PLAY2\n":
pygame.mixer.init()
pygame.mixer.music.load("/home/pi/BuzzBoards/music2.mp3")
pygame.mixer.music.play()
elif data == "STOP\n":
pygame.mixer.stop()
pygame.mixer.music.stop()
pygame.mixer.quit()
elif data == "LIGHT1_ON\n":
print ("Lighting set 1 ON")
box.setLighting1 (True, 0, False)
elif data == "LIGHT1_BLINK\n":
print ("Lighting set 1 BLINK")
box.setLighting1 (True, 0, True)
elif subdata == "LIGHT1_DIM": #format for dimmable values LIGHT1_DIM%5
try:
subvalue = float (data[subdata_pos+1:])
except ValueError:
msg = "ERROR: INVALID DIM VALUE"
subvalue = 0
#print "subvalue=", subvalue
if subvalue > 100 or subvalue < 0:
msg = "ERROR: VALUE OUT OF RANGE"
print ("Lighting set 1 DIMMABLE", msg)
else:
dim = float(subvalue / 100)
print ("Lighting set 1 DIMMABLE ", subvalue , " % - ", dim)
box.setLighting1 (True, dim, False)
elif data == "GET_LIGHT1\n":
msg = box.getLighting1()
print ("Lighting set 1 - Get status ",msg)
elif data == "LIGHT2_ON\n":
print ("Lighting set 2 ON")
box.setLighting2 (True, 0, False)
elif data == "LIGHT2_BLINK\n":
print ("Lighting set 2 BLINK")
box.setLighting2 (True, 0, True)
elif subdata == "LIGHT2_DIM": #format for dimmable values LIGHT1_DIM%5
try:
subvalue = float (data[subdata_pos+1:])
except ValueError:
msg = "ERROR: INVALID DIM VALUE"
subvalue = 0
#print "subvalue=", subvalue
if subvalue > 100 or subvalue < 0:
msg = "ERROR: VALUE OUT OF RANGE"
print ("Lighting set 2 DIMMABLE", msg)
else:
dim = float(subvalue / 100)
print ("Lighting set 2 DIMMABLE ", subvalue , " % - ", dim)
box.setLighting2 (True, dim, False)
elif data == "GET_LIGHT2\n":
msg = box.getLighting2()
print ("Lighting set 2 - Get status ",msg)
elif data == "FAN_ON\n":
print ("Fan ON")
box.setFan (True)
elif data == "HEATER_ON\n":
print ("Heater ON")
box.setHeater (True)
elif data == "LIGHT1_OFF\n":
print ("Lighting set 1 OFF")
box.setLighting1 (False, 0, False)
elif data == "LIGHT2_OFF\n":
print ("Lighting set 2 OFF")
box.setLighting2 (False, 0, False)
elif data == "FAN_OFF\n":
print ("Fan OFF")
box.setFan (False)
elif data == "HEATER_OFF\n":
print ("Heater OFF")
box.setHeater (False)
elif data == "GET_FAN\n":
msg = box.getFan()
print ("Fan - Get status ",msg)
elif data == "GET_HEATER\n":
msg = box.getHeater()
print ("Heater - Get status ",msg)
elif data == "PRESS_BTN1\n":
msg = buttons.readValueVirtualBtn("BTN1")
print ("Virtual BTN1 - Get status ", msg)
elif data == "PRESS_BTN2\n":
msg = buttons.readValueVirtualBtn("BTN2")
print ("Virtual BTN2 - Get status ", msg)
elif data == "PRESS_BTN3\n":
msg = buttons.readValueVirtualBtn("BTN3")
print ("Virtual BTN3 - Get status ", msg)
elif data == "PRESS_BTN4\n":
msg = buttons.readValueVirtualBtn("BTN4")
print ("Virtual BTN4 - Get status ", msg)
elif data == "PRESS_BTN5\n":
msg = buttons.readValueVirtualBtn("BTN5")
print ("Virtual BTN5 - Get status ", msg)
elif data == "PRESS_BTN6\n":
msg = buttons.readValueVirtualBtn("BTN6")
print ("Virtual BTN6 - Get status ", msg)
elif data == "PRESS_BTN7\n":
msg = buttons.readValueVirtualBtn("BTN7")
print ("Virtual BTN7 - Get status ", msg)
elif data == "PRESS_BTN8\n":
msg = buttons.readValueVirtualBtn("BTN8")
print ("Virtual BTN8 - Get status ", msg)
elif data == "GET_LED1\n":
msg = tricolor.getLed1()
print ("Led 1 - Get status ",msg)
elif data == "GET_LED2\n":
msg = tricolor.getLed2()
print ("Led 2 - Get status ",msg)
elif data == "GET_LED3\n":
msg = tricolor.getLed3()
print ("Led 3 - Get status ",msg)
elif data == "GET_LED4\n":
msg = tricolor.getLed4()
print ("Led 4 - Get status ",msg)
elif data == "GET_LED5\n":
msg = tricolor.getLed5()
print ("Led 5 - Get status ",msg)
elif data == "GET_LED6\n":
msg = tricolor.getLed6()
print ("Led 6 - Get status ",msg)
elif data == "GET_LED7\n":
msg = tricolor.getLed7()
print ("Led 7 - Get status ",msg)
elif data == "GET_LED8\n":
msg = tricolor.getLed8()
print ("Led 8 - Get status ",msg)
elif data == "LED1_R\n":
print ("Led 1 RED")
tricolor.turnOnLed (1,2)
elif data == "LED1_G\n":
print ("Led 1 GREEN")
tricolor.turnOnLed (1,1)
elif data == "LED1_Y\n":
print ("Led 1 YELLOW")
tricolor.turnOnLed (1,0)
elif data == "LED1_OFF\n":
print ("Led 1 OFF")
tricolor.turnOnLed (1,3)
elif data == "LED2_R\n":
print ("Led 2 RED")
tricolor.turnOnLed (2,2)
elif data == "LED2_G\n":
print ("Led 2 GREEN")
tricolor.turnOnLed (2,1)
elif data == "LED2_Y\n":
print ("Led 2 YELLOW")
tricolor.turnOnLed (2,0)
elif data == "LED2_OFF\n":
print ("Led 2 OFF")
tricolor.turnOnLed (2,3)
elif data == "LED3_R\n":
print ("Led 3 RED")
tricolor.turnOnLed (3,2)
elif data == "LED3_G\n":
print ("Led 3 GREEN")
tricolor.turnOnLed (3,1)
elif data == "LED3_Y\n":
print ("Led 3 YELLOW")
tricolor.turnOnLed (3,0)
elif data == "LED3_OFF\n":
print ("Led 3 OFF")
tricolor.turnOnLed (3,3)
elif data == "LED4_R\n":
print ("Led 4 RED")
tricolor.turnOnLed (4,2)
elif data == "LED4_G\n":
print ("Led 4 GREEN")
tricolor.turnOnLed (4,1)
elif data == "LED4_Y\n":
print ("Led 4 YELLOW")
tricolor.turnOnLed (4,0)
elif data == "LED4_OFF\n":
print ("Led 4 OFF")
tricolor.turnOnLed (4,3)
elif data == "LED5_R\n":
print ("Led 5 RED")
tricolor.turnOnLed (5,2)
elif data == "LED5_G\n":
print ("Led 5 GREEN")
tricolor.turnOnLed (5,1)
elif data == "LED5_Y\n":
print ("Led 5 YELLOW")
tricolor.turnOnLed (5,0)
elif data == "LED5_OFF\n":
print ("Led 5 OFF")
tricolor.turnOnLed (5,3)
elif data == "LED6_R\n":
print ("Led 6 RED")
tricolor.turnOnLed (6,2)
elif data == "LED6_G\n":
print ("Led 6 GREEN")
tricolor.turnOnLed (6,1)
elif data == "LED6_Y\n":
print ("Led 6 YELLOW")
tricolor.turnOnLed (6,0)
elif data == "LED6_OFF\n":
print ("Led 6 OFF")
tricolor.turnOnLed (6,3)
elif data == "LED7_R\n":
print ("Led 7 RED")
tricolor.turnOnLed (7,2)
elif data == "LED7_G\n":
print ("Led 7 GREEN")
tricolor.turnOnLed (7,1)
elif data == "LED7_Y\n":
print ("Led 7 YELLOW")
tricolor.turnOnLed (7,0)
elif data == "LED7_OFF\n":
print ("Led 7 OFF")
tricolor.turnOnLed (7,3)
elif data == "LED8_R\n":
print ("Led 8 RED")
tricolor.turnOnLed (8,2)
elif data == "LED8_G\n":
print ("Led 8 GREEN")
tricolor.turnOnLed (8,1)
elif data == "LED8_Y\n":
print ("Led 8 YELLOW")
tricolor.turnOnLed (8,0)
elif data == "LED8_OFF\n":
print ("Led 8 OFF")
tricolor.turnOnLed (8,3)
elif data == "GET_CHANNEL\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
print "MULTIPLEXER - Current channel selected ", msg
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
elif data == "GET_TEMPERATURE\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
#print "MULTIPLEXER - Current channel selected ", msg
try:
temperatureSensor = wear_sensor_heat.WearSensorHeat(i2cBus)
read_val = temperatureSensor.setPrecision(4)
msg = str(temperatureSensor.getTemperature())
print "HEAT SENSOR - Temperature ", msg, " C"
except Exception as e:
msg = "ERROR: HEAT SENSOR BOARD NOT CONNECTED"
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
elif data == "GET_LUX\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
#print "MULTIPLEXER - Current channel selected ", msg
try:
lightSensor = wear_sensor_light.WearSensorLight(i2cBus)
msg = str(lightSensor.getLux())
print "LIGHT SENSOR - Light ", msg, " Lux"
except Exception as e:
msg = "ERROR: LIGHT SENSOR BOARD NOT CONNECTED"
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
elif data == "GET_MOTION\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
#print "MULTIPLEXER - Current channel selected ", msg
try:
motionSensor = wear_sensor_motion.WearSensorMotion(i2cBus)
x = motionSensor.getXAxis()
y = motionSensor.getYAxis()
z = motionSensor.getZAxis()
msg = str(x) + "X&" + str(y) + "Y&" + str(z) + "Z"
print "MOTION SENSOR - values ", msg
except Exception as e:
msg = "ERROR: MOTION SENSOR BOARD NOT CONNECTED"
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
else:
msg = "ERROR: WRONG CODE"
print ("Result: ", msg + "\n")
self.handle_MESSAGE(msg + "\n")
def sensorDiscoveryService(self):
global i2cBus
global multiplexer
global temperatureSensor
global lightSensor
global motionSensor
global connected_multiplexer
global connected_sensor_light
global connected_sensor_temp
global connected_sensor_motion
global current_channel
#print ("sensorDiscoveryService running......")
if (connected_sensor_temp or connected_sensor_light or connected_sensor_motion):
pass
else:
print ("sensorDiscoveryService running......")
for channel in range(1,17):
try:
result = multiplexer.setChannel(channel)
print ("MULTIPLEXER - Enabling channel ",channel," in the board... ", result)
current_channel = channel
if (not connected_sensor_temp):
try:
#Start temperature sensor
temperatureSensor = wear_sensor_heat.WearSensorHeat(i2cBus)
#Set precision
decimals = 4
result = temperatureSensor.setPrecision(decimals)
connected_sensor_temp = True
except Exception as e:
#print "ERROR: HEAT SENSOR - ", e
connected_sensor_temp = False
if (not connected_sensor_light):
try:
#Start light sensor
lightSensor = wear_sensor_light.WearSensorLight(i2cBus)
connected_sensor_light = True
except Exception as e:
#print "ERROR: LIGHT SENSOR - ", e
connected_sensor_light = False
if (not connected_sensor_motion):
try:
#Start motion sensor
motionSensor = wear_sensor_motion.WearSensorMotion(bus)
connected_sensor_motion = True
except Exception as e:
#print "ERROR: MOTION SENSOR - ", e
connected_sensor_motion = False
if (connected_sensor_temp or connected_sensor_light or connected_sensor_motion):
break
except Exception as e:
pass
#Start reading sensors
if (connected_sensor_temp):
try:
result = temperatureSensor.getTemperature()
#print ("HEAT SENSOR - Temperature ", result, " C")
except Exception as e:
#print ("ERROR: HEAT SENSOR - ", e)
connected_sensor_temp = False
if (connected_sensor_light):
try:
result = lightSensor.getLux()
#print ("LIGHT SENSOR - Lux ", result)
except Exception as e:
#print ("ERROR: LIGHT SENSOR - ", e)
connected_sensor_light = False
if (connected_sensor_motion):
try:
x = motionSensor.getXAxis()
y = motionSensor.getYAxis()
z = motionSensor.getZAxis()
#print ("MOTION SENSOR - X=", x, ", Y=", y, ", Z=", z)
except Exception as e:
#print ("ERROR: MOTION SENSOR - ", e)
connected_sensor_motion = False
class FortitoFactory(Factory):
def __init__(self):
self.clients = []
def buildProtocol(self, addr):
return Fortito(self)
reactor.listenTCP(50000, FortitoFactory())
print ("Fortito server started.")
reactor.run()
|
agpl-3.0
| 8,068,275,674,875,720,000
| 28.132813
| 85
| 0.648676
| false
| 2.812559
| false
| false
| false
|
cmpe-295/project-backend
|
safe_ride/ride/migrations/0001_initial.py
|
1
|
1399
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_client_activation_link_offset'),
]
operations = [
migrations.CreateModel(
name='Ride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('pickup_latitude', models.FloatField()),
('pickup_longitude', models.FloatField()),
('drop_latitude', models.FloatField()),
('drop_longitude', models.FloatField()),
('request_received_at', models.DateTimeField(null=True, blank=True)),
('request_processed_at', models.DateTimeField(null=True, blank=True)),
('initial_eta', models.FloatField()),
('pickup_at', models.DateTimeField(null=True, blank=True)),
('drop_at', models.DateTimeField(null=True, blank=True)),
('client', models.ForeignKey(related_name='rides', to='core.Client')),
('serviced_by', models.ForeignKey(related_name='rides', to='core.Driver')),
],
),
]
|
mit
| 6,725,087,668,841,999,000
| 41.393939
| 114
| 0.565404
| false
| 4.427215
| false
| false
| false
|
dbdd4us/compose
|
compose/config/types.py
|
1
|
6996
|
"""
Types for objects parsed from the configuration.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from collections import namedtuple
import six
from compose.config.config import V1
from compose.config.errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
from compose.utils import splitdrive
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
# TODO: drop service_names arg when v1 is removed
@classmethod
def parse(cls, volume_from_config, service_names, version):
func = cls.parse_v1 if version == V1 else cls.parse_v2
return func(service_names, volume_from_config)
@classmethod
def parse_v1(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"volume_from {} has incorrect format, should be "
"service[:mode]".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
type = 'service' if source in service_names else 'container'
return cls(source, mode, type)
@classmethod
def parse_v2(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"volume_from {} has incorrect format, should be one of "
"'<service name>[:<mode>]' or "
"'container:<container name>[:<mode>]'".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
return cls(source, 'rw', 'service')
if len(parts) == 2:
if parts[0] == 'container':
type, source = parts
return cls(source, 'rw', type)
source, mode = parts
return cls(source, mode, 'service')
if len(parts) == 3:
type, source, mode = parts
if type not in ('service', 'container'):
raise ConfigurationError(
"Unknown volumes_from type '{}' in '{}'".format(
type,
volume_from_config))
return cls(source, mode, type)
def repr(self):
return '{v.type}:{v.source}:{v.mode}'.format(v=self)
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def serialize_restart_spec(restart_spec):
parts = [restart_spec['Name']]
if restart_spec['MaximumRetryCount']:
parts.append(six.text_type(restart_spec['MaximumRetryCount']))
return ':'.join(parts)
def parse_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, dict):
return dict(extra_hosts_config)
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
# TODO: validate string contains ':' ?
host, ip = extra_hosts_line.split(':', 1)
extra_hosts_dict[host.strip()] = ip.strip()
return extra_hosts_dict
def normalize_path_for_engine(path):
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
drive, tail = splitdrive(path)
if drive:
path = '/' + drive.lower().rstrip(':') + tail
return path.replace('\\', '/')
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
@classmethod
def _parse_unix(cls, volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return cls(external, internal, mode)
@classmethod
def _parse_win32(cls, volume_config):
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
mode = 'rw'
def separate_next_section(volume_config):
drive, tail = splitdrive(volume_config)
parts = tail.split(':', 1)
if drive:
parts[0] = drive + parts[0]
return parts
parts = separate_next_section(volume_config)
if len(parts) == 1:
internal = normalize_path_for_engine(os.path.normpath(parts[0]))
external = None
else:
external = parts[0]
parts = separate_next_section(parts[1])
external = normalize_path_for_engine(os.path.normpath(external))
internal = normalize_path_for_engine(os.path.normpath(parts[0]))
if len(parts) > 1:
if ':' in parts[1]:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config
)
mode = parts[1]
return cls(external, internal, mode)
@classmethod
def parse(cls, volume_config):
"""Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM:
return cls._parse_win32(volume_config)
else:
return cls._parse_unix(volume_config)
def repr(self):
external = self.external + ':' if self.external else ''
return '{ext}{v.internal}:{v.mode}'.format(ext=external, v=self)
@property
def is_named_volume(self):
return self.external and not self.external.startswith(('.', '/', '~'))
class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
@classmethod
def parse(cls, link_spec):
target, _, alias = link_spec.partition(':')
if not alias:
alias = target
return cls(target, alias)
def repr(self):
if self.target == self.alias:
return self.target
return '{s.target}:{s.alias}'.format(s=self)
@property
def merge_field(self):
return self.alias
|
apache-2.0
| 5,409,932,919,929,041,000
| 30.656109
| 83
| 0.575472
| false
| 4.13231
| true
| false
| false
|
shinose/qplaybox
|
packages/addons/driver/hdhomerun/source/resources/actions.py
|
1
|
1477
|
################################################################################
# This file is part of OpenELEC - http://www.openelec.tv
# Copyright (C) 2009-2014 Stephan Raue (stephan@openelec.tv)
#
# OpenELEC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OpenELEC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenELEC. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import os
import sys
import xbmcaddon
__settings__ = xbmcaddon.Addon(id = 'driver.dvb.hdhomerun')
__cwd__ = __settings__.getAddonInfo('path')
__resources_lib__ = xbmc.translatePath(os.path.join(__cwd__, 'resources', 'lib'))
__settings_xml__ = xbmc.translatePath(os.path.join(__cwd__, 'resources', 'settings.xml'))
if len(sys.argv) == 2 and sys.argv[1] == 'refresh_tuners':
sys.path.append(__resources_lib__)
from functions import refresh_hdhomerun_tuners
refresh_hdhomerun_tuners(__settings_xml__)
__settings__.openSettings()
|
gpl-2.0
| -2,070,411,174,875,305,700
| 45.15625
| 90
| 0.633717
| false
| 3.777494
| false
| false
| false
|
axelberndt/Raspberry-Pi-Tools
|
src/ShutdownRebootVolumeControl.py
|
1
|
6353
|
#!/usr/bin/env python3.5
# This is a combination of ShutdownRebootButton.py and VolumeRotaryControl.py. It is handy for those who use a rotary switch.
# Author: Axel Berndt
from RPi import GPIO
from time import sleep, time
from subprocess import call
import alsaaudio
GPIOpinButton = 27 # Button is on GPIO channel 27 / pin 13 of 40way connector with GND on pin 14
GPIOpinA = 23 # left pin of the rotary encoder is on GPIO 23 (Pi pin 16)
GPIOpinB = 24 # right pin of the rotary encoder is on GPIO 24 (Pi pin 18)
aDown = False # this is set True to wait for GPIO A to go down
bUp = False # this is set True to wait for GPIO B to go up
bDown = False # this is set True to wait for GPIO B to go down
pressTime = float('Inf') # this is used to keep track of the time passing between button press and release, when waiting for button press/falling it has the positive inf value to prevent unintended shutdowns
# initialize GPIO input and define interrupts
def init():
GPIO.setmode(GPIO.BCM) # set the GPIO naming/numbering convention to BCM
GPIO.setup(GPIOpinA, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input channel A
GPIO.setup(GPIOpinB, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input channel B
GPIO.setup(GPIOpinButton, GPIO.IN, pull_up_down=GPIO.PUD_UP) # setup the channel as input with a 50K Ohm pull up. A push button will ground the pin, creating a falling edge.
GPIO.add_event_detect(GPIOpinA, GPIO.BOTH, callback=rotaryInterruptA) # define interrupt for action on channel A (no bouncetime needed)
GPIO.add_event_detect(GPIOpinB, GPIO.BOTH, callback=rotaryInterruptB) # define interrupt for action on channel B (no bouncetime needed)
GPIO.add_event_detect(GPIOpinButton, GPIO.BOTH, callback=buttonInterrupt)#, bouncetime=100) # define interrupt, add the bouncetime if it works better with your button
# the callback functions when turning the encoder
# this one reacts on action on channel A
def rotaryInterruptA(GPIOpin):
A = GPIO.input(GPIOpinA) # read current value of channel A
B = GPIO.input(GPIOpinB) # read current value of channel B
global aDown, bUp, bDown # get access to some more global variables
if aDown: # if we are waiting for channel A to go down (to finish -> rotation cycle)
if not A: # check if it is down now
aDown = False # -> rotation cycle finished
elif bUp or bDown: # if a <- rotation cycle is unfinished so far
pass # don't do anything new
elif A: # if a new rotation cycle starts, i.e. nothing to go up or down
mixer = alsaaudio.Mixer() # get ALSA mixer channel 'Master'
volume = int(mixer.getvolume()[0]) # get the left channel's volume gain (right channel is the same)
if B: # if B is already up, the rotation direction is ->
aDown = True # to finish the cycle, wait for A to go down again
if volume < 100: # do not get greater than 100 (ALSA max)
volume += 1 # increase volume gain
else: # if B still has to come up, the rotation direction is <-
bUp = True # in this rotation cycle B has to come up and down again, we start with waiting for B to come up
if volume < 0: # do not get below 0 (ALSA min)
volume -= 1 # decrease volume gain
mixer.setvolume(volume) # apply the new volume gain to the mixer channel
return # done
# this callback function reacts on action on channel B
def rotaryInterruptB(GPIOpin):
B = GPIO.input(GPIOpin) # read current value of channel B
global bUp, bDown # get access to some more global variables
if B: # if B is up
if bUp: # and we have been waiting for B to come up (this is part of the <- rotation cycle)
bDown = True # wait for B to come down again
bUp = False # done with this
elif bDown: # B is down (if B: was False) and if we were waiting for B to come down
bDown = False # <- rotation cycle finished
return # done
# the callback function when button is pressed/released
def buttonInterrupt(GPIOpin):
global pressTime # get access to the global time variable
if not GPIO.input(GPIOpin): # if button falling event
pressTime = time() # get the current time
if pressTime != float('Inf'): # if button is already pressed due to missing rise event or bouncing
return # keep the current pressTime value, done
else: # if button rising event
timePassed = time() - pressTime # compute how long the button was pressed
if timePassed < 2: # if it is less than 2 seconds
pressTime = float('Inf') # waiting for next button falling, prevent unintended reboot/shutdowns by setting this variable to positive infinity
elif timePassed < 5: # if pressed for 2 up to 5 seconds
call(['sudo reboot &'], shell=True) # do reboot
else: # if pressed for 5 seconds and more
call(['shutdown -h now "System shutdown by GPIO action" &'], shell=True) # do shutdown
# the main function
def main():
try: # run the program
init() # initialize everything
while True: # idle loop
sleep(300) # wakes up once every 5 minutes = 300 seconds
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
# the entry point
if __name__ == '__main__':
main()
|
gpl-3.0
| 9,147,913,887,483,484,000
| 60.096154
| 210
| 0.582087
| false
| 4.354352
| false
| false
| false
|
andikleen/pmu-tools
|
interval-plot.py
|
1
|
3554
|
#!/usr/bin/env python
# plot interval CSV output from perf/toplev
# perf stat -I1000 -x, -o file ...
# toplev -I1000 -x, -o file ...
# interval-plot.py file (or stdin)
# delimeter must be ,
# this is for data that is not normalized
# TODO: move legend somewhere else where it doesn't overlap?
from __future__ import print_function
import os
import csv
import sys
import collections
import argparse
import re
import matplotlib
if os.getenv("DISPLAY") is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv_formats
import gen_level
p = argparse.ArgumentParser(
usage='plot interval CSV output from perf stat/toplev',
description='''
perf stat -I1000 -x, -o file ...
toplev -I1000 -x, -o file ...
interval-plot.py file (or stdin)
delimeter must be ,
this is for data that is not normalized.''')
p.add_argument('--xkcd', action='store_true', help='enable xkcd mode')
p.add_argument('--style', help='set mpltools style (e.g. ggplot)')
p.add_argument('file', help='CSV file to plot (or stdin)', nargs='?')
p.add_argument('--output', '-o', help='Output to file. Otherwise show.',
nargs='?')
args = p.parse_args()
if args.style:
try:
from mpltools import style
style.use(args.style)
except ImportError:
print("Need mpltools for setting styles (pip install mpltools)")
try:
import brewer2mpl
all_colors = brewer2mpl.get_map('Paired', 'Qualitative', 12).hex_colors
except ImportError:
print("Install brewer2mpl for better colors (pip install brewer2mpl)")
all_colors = ('green','orange','red','blue',
'black','olive','purple','#6960EC', '#F0FFFF',
'#728C00', '#827B60', '#F87217', '#E55451', # 16
'#F88017', '#C11B17', '#17BFC2', '#C48793') # 20
cur_colors = collections.defaultdict(lambda: all_colors)
assigned = dict()
if args.file:
inf = open(args.file, "r")
else:
inf = sys.stdin
rc = csv.reader(inf)
timestamps = dict()
value = dict()
def isnum(x):
return re.match(r'[0-9.]+', x)
val = ""
for row in rc:
r = csv_formats.parse_csv_row(row)
if r is None:
continue
ts, cpu, event, val = r.ts, r.cpu, r.ev, r.val
if event not in assigned:
level = gen_level.get_level(event)
assigned[event] = cur_colors[level][0]
cur_colors[level] = cur_colors[level][1:]
if len(cur_colors[level]) == 0:
cur_colors[level] = all_colors
value[event] = []
timestamps[event] = []
timestamps[event].append(float(ts))
try:
value[event].append(float(val.replace("%","")))
except ValueError:
value[event].append(0.0)
levels = set(map(gen_level.get_level, assigned.keys()))
if args.xkcd:
try:
plt.xkcd()
except NameError:
print("Please update matplotlib. Cannot enable xkcd mode.")
n = 1
for l in levels:
ax = plt.subplot(len(levels), 1, n)
if val.find('%') >= 0:
ax.set_ylim(0, 100)
t = []
for j in assigned.keys():
print(j, gen_level.get_level(j), l)
if gen_level.get_level(j) == l:
t.append(j)
if 'style' not in globals():
ax.plot(timestamps[j], value[j], assigned[j])
else:
ax.plot(timestamps[j], value[j])
leg = ax.legend(t, loc='upper left')
leg.get_frame().set_alpha(0.5)
n += 1
plt.xlabel('Time')
if val.find('%') >= 0:
plt.ylabel('Bottleneck %')
else:
plt.ylabel("Counter value")
if args.output:
plt.savefig(args.output)
else:
plt.show()
|
gpl-2.0
| 6,773,985,436,433,152,000
| 27.66129
| 75
| 0.612549
| false
| 3.227975
| false
| false
| false
|
wglass/kiel
|
kiel/protocol/join_group.py
|
1
|
1780
|
from .part import Part
from .request import Request
from .response import Response
from .primitives import Array, String, Bytes, Int16, Int32
api_name = "join_group"
__all__ = [
"JoinGroupRequest",
"JoinGroupResponse",
"GroupProtocol",
"Member",
]
class GroupProtocol(Part):
"""
::
GroupProtocol =>
name => String
version => Int16
subscription => Array.of(String)
user_data => Bytes
"""
parts = (
("name", String),
("version", Int16),
("subscription", Array.of(String)),
("user_data", Bytes),
)
class JoinGroupRequest(Request):
"""
::
JoinGroupRequest =>
group_id => String
session_timeout => Int32
member_id => String
protocol_type => String
group_protocols => [GroupProtocol]
"""
api = "join_group"
parts = (
("group_id", String),
("session_timeout", Int32),
("member_id", String),
("protocol_type", String),
("group_protocols", Array.of(GroupProtocol)),
)
class Member(Part):
"""
::
Member =>
member_id => String
metadata => Bytes
"""
parts = (
("member_id", String),
("metadata", Bytes),
)
class JoinGroupResponse(Response):
"""
::
JoinGroupResponse =>
error_code => Int16
generation_id => Int32
protocol => String
leader_id => String
member_id => String
members => [Member]
"""
api = "join_group"
parts = (
("error_code", Int16),
("generation_id", Int32),
("protocol", String),
("leader_id", String),
("member_id", String),
("members", Array.of(Member)),
)
|
apache-2.0
| -4,484,983,534,276,010,500
| 18.139785
| 58
| 0.51573
| false
| 4.091954
| false
| false
| false
|
annavonzansen/exams
|
exams/migrations/0027_auto__del_field_orderitem_special_arrangement__del_field_historicalord.py
|
1
|
38780
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'OrderItem.special_arrangement'
db.delete_column(u'exams_orderitem', 'special_arrangement_id')
# Removing M2M table for field attached_candidates on 'OrderItem'
db.delete_table(db.shorten_name(u'exams_orderitem_attached_candidates'))
# Deleting field 'HistoricalOrderItem.special_arrangement_id'
db.delete_column(u'exams_historicalorderitem', u'special_arrangement_id')
def backwards(self, orm):
# Adding field 'OrderItem.special_arrangement'
db.add_column(u'exams_orderitem', 'special_arrangement',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['exams.SpecialArrangement'], null=True, blank=True),
keep_default=False)
# Adding M2M table for field attached_candidates on 'OrderItem'
m2m_table_name = db.shorten_name(u'exams_orderitem_attached_candidates')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('orderitem', models.ForeignKey(orm[u'exams.orderitem'], null=False)),
('candidate', models.ForeignKey(orm[u'exams.candidate'], null=False))
))
db.create_unique(m2m_table_name, ['orderitem_id', 'candidate_id'])
# Adding field 'HistoricalOrderItem.special_arrangement_id'
db.add_column(u'exams_historicalorderitem', u'special_arrangement_id',
self.gf('django.db.models.fields.IntegerField')(blank=True, null=True, db_index=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'education.school': {
'Meta': {'ordering': "('name', 'school_id')", 'object_name': 'School'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'fi'", 'max_length': '2'}),
'managers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'school_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['name']", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'education.schoolsite': {
'Meta': {'unique_together': "(('school', 'name'),)", 'object_name': 'SchoolSite'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_extra': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'Finland'", 'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'postal_code': ('django.db.models.fields.PositiveIntegerField', [], {}),
'postal_office': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.School']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'E'", 'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.answer': {
'Meta': {'object_name': 'Answer'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Assignment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Test']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.answeroption': {
'Meta': {'object_name': 'AnswerOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'max_length': '255'})
},
u'exams.assignment': {
'Meta': {'ordering': "('order',)", 'object_name': 'Assignment'},
'answer_options': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.AnswerOption']", 'null': 'True', 'blank': 'True'}),
'assignment_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'attached_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['exams.File']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.candidate': {
'Meta': {'unique_together': "(('examination', 'school', 'candidate_number'),)", 'object_name': 'Candidate', '_ormbases': [u'people.Person']},
'candidate_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'candidate_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.CandidateType']"}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['people.Person']", 'unique': 'True', 'primary_key': 'True'}),
'retrying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.School']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.SchoolSite']", 'null': 'True', 'blank': 'True'})
},
u'exams.candidatetype': {
'Meta': {'object_name': 'CandidateType'},
'code': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.candidateupload': {
'Meta': {'object_name': 'CandidateUpload'},
'by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.School']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.examination': {
'Meta': {'unique_together': "(('year', 'season'),)", 'object_name': 'Examination'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'registration_begin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'season': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['year', 'season']", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2013', 'max_length': '4'})
},
u'exams.examregistration': {
'Meta': {'unique_together': "(('subject', 'candidate'),)", 'object_name': 'ExamRegistration'},
'additional_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'candidate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Candidate']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'special_arrangements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['exams.SpecialArrangement']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'R'", 'max_length': '1'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.file': {
'Meta': {'object_name': 'File'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalcandidate': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCandidate'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'candidate_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'candidate_type_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'identity_number': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'merge_with_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'person_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retrying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'school_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'site_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalcandidatetype': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCandidateType'},
'code': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalcandidateupload': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCandidateUpload'},
u'by_user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'school_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalexamination': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalExamination'},
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'registration_begin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'season': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['year', 'season']", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2013', 'max_length': '4'})
},
u'exams.historicalorder': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrder'},
'additional_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'parent_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'site_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'c'", 'max_length': '2'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalorderitem': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrderItem'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
u'material_type_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'order_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'subject_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalspecialarrangement': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalSpecialArrangement'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalsubject': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalSubject'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'group_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicaltest': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalTest'},
'begin': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'subject_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.materialtype': {
'Meta': {'object_name': 'MaterialType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.order': {
'Meta': {'ordering': "('-date', 'site', 'examination')", 'object_name': 'Order'},
'additional_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Order']", 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.SchoolSite']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'c'", 'max_length': '2'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.MaterialType']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Order']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.specialarrangement': {
'Meta': {'object_name': 'SpecialArrangement'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '5'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.SubjectGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['exams.MaterialType']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.subjectgroup': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'SubjectGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.test': {
'Meta': {'object_name': 'Test'},
'begin': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'people.person': {
'Meta': {'object_name': 'Person'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_number': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'merge_with': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Person']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['exams']
|
gpl-2.0
| 5,900,534,965,387,721,000
| 87.339408
| 220
| 0.555982
| false
| 3.568274
| true
| false
| false
|
phelios/moneyleft
|
moneyleft/migrations/0001_initial.py
|
1
|
2230
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Entry'
db.create_table('moneyleft_entry', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('desc', self.gf('django.db.models.fields.CharField')(max_length=100)),
('amount', self.gf('django.db.models.fields.DecimalField')(decimal_places=2, max_digits=10)),
('type', self.gf('django.db.models.fields.IntegerField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['moneyleft.Categories'])),
))
db.send_create_signal('moneyleft', ['Entry'])
# Adding model 'Categories'
db.create_table('moneyleft_categories', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('moneyleft', ['Categories'])
def backwards(self, orm):
# Deleting model 'Entry'
db.delete_table('moneyleft_entry')
# Deleting model 'Categories'
db.delete_table('moneyleft_categories')
models = {
'moneyleft.categories': {
'Meta': {'object_name': 'Categories'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'moneyleft.entry': {
'Meta': {'object_name': 'Entry'},
'amount': ('django.db.models.fields.DecimalField', [], {'decimal_places': '2', 'max_digits': '10'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['moneyleft.Categories']"}),
'desc': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['moneyleft']
|
apache-2.0
| 6,019,200,249,292,548,000
| 41.09434
| 114
| 0.58296
| false
| 3.68595
| false
| false
| false
|
zrecore/alexventure.com
|
alexventure/portfolio/models.py
|
1
|
1277
|
from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField( max_length = 110 )
slug = models.CharField( max_length = 110 )
published = models.IntegerField( default = 0 )
parent = models.ForeignKey( 'self', on_delete = models.CASCADE, blank = True, null = True, default = None )
# ...to string helper
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField( max_length = 110 )
slug = models.CharField( max_length = 110 )
published = models.IntegerField( default = 0 )
creation_date = models.DateTimeField( 'date created' )
published_date = models.DateTimeField( 'date published' )
edit_date = models.DateTimeField( 'date edited' )
category = models.ForeignKey( Category, on_delete = models.CASCADE )
content_file = models.CharField( max_length = 255 )
# ...to string helper
def __str__(self):
return self.title
class Tag(models.Model):
name = models.CharField( max_length = 32 )
slug = models.CharField( max_length = 32 )
# ...to string helper
def __str__(self):
return self.name
|
gpl-3.0
| 204,253,478,973,647,900
| 38.90625
| 120
| 0.602976
| false
| 4.066879
| false
| false
| false
|
fletin/AutoComp
|
Comp.avs.py
|
1
|
4350
|
# ver 1.01
# .supports multitask simultaneously
import os,sys
import uuid
#Generate uuid to make sure filename unique
task_uuid=str(uuid.uuid1())
#tools path
x264_path=sys.path[0]+"\\x264\\x264.exe"
ffms2_path=sys.path[0]+"\\ffms2\\ffms2.dll"
bepipe_path=sys.path[0]+"\\BePipe\\BePipe.exe"
nero_path=sys.path[0]+"\\neroaac\\neroAacEnc.exe"
mp4box_path=sys.path[0]+"\\mp4box\\mp4box.exe"
work_path=sys.path[0]
#avs filters
newfps=0
newx=848
newy=480
#x264 para
x264_preset="veryslow" # faster normal slow veryslow, lower the speed, higher the compress ratio
x264_bitrate="2000" # kb/s *time(seconds)/8/1024/1024=MB
#x264_1passOutput="NUL" # one for no result while the other gets v2 for crf22
x264_1passOutput="\""+work_path+"\\temp\\"+task_uuid+".v.mp4\""
crf_value=24
# ffmpegsource2 function
ffms2_script="""function FFmpegSource2(string source, int "vtrack", int "atrack", bool "cache", \\
string "cachefile", int "fpsnum", int "fpsden", int "threads", \\
string "timecodes", int "seekmode", bool "overwrite", int "width", int "height", \\
string "resizer", string "colorspace", int "rffmode", int "adjustdelay", \\
bool "utf8", string "varprefix") {
vtrack = default(vtrack,-1)
atrack = default(atrack,-2)
cache = default(cache,true)
cachefile = default(cachefile,source+".ffindex")
fpsnum = default(fpsnum,-1)
fpsden = default(fpsden,1)
threads = default(threads,-1)
timecodes = default(timecodes,"")
seekmode = default(seekmode,1)
overwrite = default(overwrite,false)
width = default(width,-1)
height = default(height,-1)
resizer = default(resizer,"BICUBIC")
colorspace = default(colorspace,"")
rffmode = default(rffmode,0)
adjustdelay = default(adjustdelay,-1)
utf8 = default(utf8,false)
varprefix = default(varprefix, "")
((cache == true) && (atrack <= -2)) ? ffindex(source=source, cachefile=cachefile, \\
indexmask=0, overwrite=overwrite, utf8=utf8) : (cache == true) ? ffindex(source=source, \\
cachefile=cachefile, indexmask=-1, overwrite=overwrite, utf8=utf8) : nop
v = ffvideosource(source=source, track=vtrack, cache=cache, cachefile=cachefile, \\
fpsnum=fpsnum, fpsden=fpsden, threads=threads, timecodes=timecodes, \\
seekmode=seekmode, rffmode=rffmode, width=width, height=height, resizer=resizer, \\
colorspace=colorspace, utf8=utf8, varprefix=varprefix)
a = (atrack <= -2) ? blankclip(audio_rate=0) : ffaudiosource(source=source, \\
track=atrack, cache=cache, cachefile=cachefile, adjustdelay=adjustdelay, \\
utf8=utf8, varprefix=varprefix)
return audiodubex(v,a)
}"""
print("Input File: "+sys.argv[1]+"\n\r")
#AviSource frameserving
avspath=""
ext_name=sys.argv[1].split(".")[-1]
if ext_name.upper()=="AVS":
avspath=sys.argv[1]
else:
avspath=work_path+"\\temp\\"+task_uuid+".avs"
avsfile=open(avspath,"w+")
if ext_name.upper()=="AVI":
avsfile.write("AviSource(\""+sys.argv[1]+"\")\r\n")
else:
#avsfile.write("LoadPlugin(\""+ffms2_path+"\")\r\nAudioDub(FFVideoSource(\""+sys.argv[1]+"\"), FFAudioSource(\""+sys.argv[1]+"\"))\r\n")
avsfile.write(ffms2_script+"\r\n\r\n\r\n")
avsfile.write("LoadPlugin(\""+ffms2_path+"\")\r\nFFmpegSource2(\""+sys.argv[1]+"\")\r\n")
if newfps>0:
if newfps>20:
avsfile.write("convertfps("+str(newfps)+")\r\n")
else:
avsfile.write("changefps("+str(newfps)+")\r\n")
if (newx>0) & (newy>0):
avsfile.write("lanczosresize("+str(newx)+","+str(newy)+")\r\n")
avsfile.write("ConvertToYUY2()")
avsfile.close()
#Video Section
#x264
os.system(x264_path+" --pass 1 --stats \""+sys.path[0]+"\\temp\\"+task_uuid+".stats\" --level 5.1 --preset "+x264_preset+" --tune psnr --crf "+str(crf_value)+" --output "+x264_1passOutput+" \""+avspath+"\"")
#os.system(x264_path+" --pass 2 --stats \""+sys.path[0]+"\\temp\\temp.stats\" --level 5.1 --preset "+x264_preset+" --tune psnr --bitrate "+x264_bitrate+" --output \""+work_path+"\\temp\\v.mp4\" \""+avspath+"\"")
#Audio Section - neroaac
os.system(bepipe_path+" --script \"Import(^"+avspath+"^)\" | \""+nero_path+"\" -lc -cbr 96000 -if - -of "+work_path+"\\temp\\"+task_uuid+".a.m4a\"")
#Muxing
os.system(mp4box_path+" -add \""+work_path+"\\temp\\"+task_uuid+".v.mp4\" -add \""+work_path+"\\temp\\"+task_uuid+".a.m4a\" \""+sys.argv[1]+".mp4\"")
#Finishing
print("Finished.")
os.system("pause")
os.system("del "+work_path+"\\temp\\*.* /q")
|
gpl-2.0
| -3,835,330,320,334,072,300
| 30.521739
| 211
| 0.665747
| false
| 2.520278
| false
| false
| false
|
timofeymukha/turbulucid
|
turbulucid/core/case.py
|
1
|
14104
|
# This file is part of turbulucid
# (c) 2018 Timofey Mukha
# The code is released under the GNU GPL Version 3 licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from vtk.util.numpy_support import numpy_to_vtk
from vtk.util.numpy_support import vtk_to_numpy
import os
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from .readers import NativeReader, LegacyReader, XMLReader
__all__ = ["Case"]
class Case:
"""A class representing a simulation case.
"""
def __init__(self, fileName, clean=False, pointData=False):
"""
Create Case from file.
Parameters
----------
fileName : str
The file to be read in. Should be data in VTK format.
clean : bool
Whether to attempt to clean the data of redundant cells.
"""
self.fileName = fileName
# Read in the data
self._blockData = self.read(clean, pointData)
# Compute the cell-centres
self._cellCentres = vtk.vtkCellCenters()
self._cellCentres.SetInputData(self._blockData.GetBlock(0))
self._cellCentres.Update()
self._cellCentres =\
dsa.WrapDataObject(self._cellCentres.GetOutput()).GetPoints()
self._cellCentres = np.array(self._cellCentres[:, :2])
self._vtkData = dsa.WrapDataObject(self._blockData.GetBlock(0))
self._boundaries = self._fill_boundary_list()
self._bounds = self._vtkData.VTKObject.GetBounds()[:4]
self._fields = self._vtkData.CellData.keys()
plot_limits = self._compute_plot_limits()
self._xlim = plot_limits[0]
self._ylim = plot_limits[1]
self._boundaryCellCoords, self._boundaryCellData = \
self._compute_boundary_cell_data()
@property
def blockData(self):
"""vtkMultiBlockDataSet : the multiblock data assembled by the
reader.
"""
return self._blockData
@property
def vtkData(self):
"""wrapped PolyData : The actual data read by the reader."""
return self._vtkData
@property
def cellCentres(self):
"""wrapped VTKArray : the cell centres of the read data """
return self._cellCentres
@property
def boundaries(self):
"""list : A list of names of the boundaries present the case."""
return self._boundaries
@property
def bounds(self):
"""tuple : (min(x), max(x), min(y), max(y))."""
return self._bounds
@property
def fields(self):
"""list of str: The names of the fields present in the case."""
return self._fields
@property
def xlim(self):
"""list of two floats: The x limits that cover the
geometry of the case, plus small a margin.
"""
return self._xlim
@property
def ylim(self):
"""list of two floats: The y limits that cover the
geometry of the case, plus a small margin.
"""
return self._ylim
def _fill_boundary_list(self):
fieldData = self.vtkData.FieldData['boundaries']
boundaryList = []
for i in range(fieldData.GetNumberOfValues()):
boundaryList.append(fieldData.GetValue(i))
return boundaryList
def __getitem__(self, item):
"""Return a cell array by name.
Parameters
----------
item : string
The name of the cell array.
Returns
-------
ndarray
Array of values of the requested field.
"""
if item not in self._fields:
raise ValueError("Field " + item + " not present in the case.")
return np.copy(np.array((self.vtkData.CellData[item])))
def __setitem__(self, item, values):
"""Add another internal field to the case.
Parameters
----------
item : string
The name of the cell array.
values : ndarray
The values of the field.
"""
if values.shape[0] != self[self.fields[0]].shape[0]:
raise ValueError("The dimensionality of the provided field "
"does not match that of the case.")
self.fields.append(item)
cellData = self._vtkData.VTKObject.GetCellData()
valuesVtk = vtk.vtkDoubleArray()
if np.ndim(values) > 1:
valuesVtk.SetNumberOfComponents(values.shape[1])
valuesVtk.SetNumberOfTuples(values.shape[0])
for i in range(values.shape[0]):
valuesVtk.SetTuple(i, values[i, :])
else:
valuesVtk.SetNumberOfComponents(1)
valuesVtk.SetNumberOfValues(values.shape[0])
for i in range(values.shape[0]):
valuesVtk.SetValue(i, values[i])
valuesVtk.SetName(item)
cellData.AddArray(valuesVtk)
# Add boundary cell data
# Add boundary data by copying from boundary cells data
for boundary in self.boundaries:
boundaryCellIds = self._vtkData.FieldData[boundary]
self._boundaryCellData[boundary][item] = self[item][boundaryCellIds, ...]
block = self.extract_block_by_name(boundary)
cellData = block.GetCellData()
valuesVtk = vtk.vtkDoubleArray()
nVals = self.boundary_cell_data(boundary)[0][:, 0].size
bCellData = self.boundary_cell_data(boundary)[1][item]
if np.ndim(values) > 1:
valuesVtk.SetNumberOfComponents(values.shape[1])
valuesVtk.SetNumberOfTuples(nVals)
for i in range(nVals):
valuesVtk.SetTuple(i, bCellData[i, :])
else:
valuesVtk.SetNumberOfComponents(1)
valuesVtk.SetNumberOfValues(nVals)
for i in range(nVals):
valuesVtk.SetValue(i, bCellData[i])
valuesVtk.SetName(item)
cellData.AddArray(valuesVtk)
def __delitem__(self, item):
"""Delete an internal field form the case.
Parameters
----------
item : str
Name of the field to delete.
"""
self.vtkData.VTKObject.GetCellData().RemoveArray(item)
self.fields.remove(item)
for boundary in self.boundaries:
del self._boundaryCellData[boundary][item]
block = self.extract_block_by_name(boundary)
block.GetCellData().RemoveArray(item)
def _compute_plot_limits(self):
""" Compute xlim and ylim."""
minX = self.bounds[0]
maxX = self.bounds[1]
minY = self.bounds[2]
maxY = self.bounds[3]
marginX = (maxX - minX)/60
marginY = (maxY - minY)/60
return (np.array([minX - marginX, maxX + marginX]),
np.array([minY - marginY, maxY + marginY]))
def _transform(self, transform):
"""Transform the geometry according to a vtkTransform filter."""
# Transform the internal field
filter = vtk.vtkTransformPolyDataFilter()
filter.SetInputData(self.blockData.GetBlock(0))
filter.SetTransform(transform)
filter.Update()
self._blockData.SetBlock(0, filter.GetOutput())
# Transform boundary data
i = 1
for boundary in self.boundaries:
filter = vtk.vtkTransformPolyDataFilter()
filter.SetTransform(transform)
filter.SetInputData(self.blockData.GetBlock(i))
filter.Update()
self.blockData.SetBlock(i, filter.GetOutput())
i += 1
# Update attuributes
self._cellCentres = vtk.vtkCellCenters()
self._cellCentres.SetInputData(self.blockData.GetBlock(0))
self._cellCentres.Update()
self._cellCentres = \
dsa.WrapDataObject(self._cellCentres.GetOutput()).GetPoints()
self._cellCentres = np.array(self._cellCentres[:, :2])
self._vtkData = dsa.WrapDataObject(self._blockData.GetBlock(0))
self._bounds = self._vtkData.VTKObject.GetBounds()[:4]
plot_limits = self._compute_plot_limits()
self._xlim = plot_limits[0]
self._ylim = plot_limits[1]
self._boundaryCellCoords, self._boundaryCellData = \
self._compute_boundary_cell_data()
def _compute_boundary_cell_data(self):
from collections import OrderedDict
boundaryCellData = OrderedDict()
boundaryCellCoords = OrderedDict()
for b in self.boundaries:
boundaryCellData[b] = OrderedDict()
cellIds = self._vtkData.FieldData[b]
boundaryCellCoords[b] = self.cellCentres[cellIds, :]
for f in self.fields:
boundaryCellData[b][f] = self.__getitem__(f)[cellIds, ...]
return boundaryCellCoords, boundaryCellData
def translate(self, dx, dy):
"""Translate the geometry of the case.
Parameters
----------
dx : float
The translation along the x axis.
dy : float
The translation along the y axis.
"""
transform = vtk.vtkTransform()
transform.Translate(dx, dy, 0)
transform.Update()
self._transform(transform)
def scale(self, scaleX, scaleY):
"""Scale the geometry of the case.
The coordinates get divided by the scaling factors.
Parameters
----------
scaleX : float
The scaling factor along x.
scaleY : float
The scaling factor along y.
"""
transform = vtk.vtkTransform()
transform.Scale(1/scaleX, 1/scaleY, 0)
transform.Update()
self._transform(transform)
def rotate(self, angle):
"""Rotate the geometry of the case around the z axis.
Parameters
----------
dx : angle
Rotation angle in degrees.
"""
axis = [0, 0, 1]
transform = vtk.vtkTransform()
transform.RotateWXYZ(angle, axis[0], axis[1], axis[2])
transform.Update()
self._transform(transform)
def boundary_cell_data(self, boundary, sort=None):
"""Return cell-centre coordinates and data from cells adjacent
to a specific boundary.
Parameters
----------
boundary : str
The name of the boundary.
sort : {None, 'x', 'y'}, optional
Whether to sort the data along a coordinate. Use 'x' and
'y' to sort along x and y, respectively. Default is no
sorting.
Returns
-------
Two ndarrays
"""
points = np.copy(self._boundaryCellCoords[boundary])
data = self._boundaryCellData[boundary].copy()
if sort is None:
return points, data
elif sort == "x":
ind = np.argsort(points[:, 0])
elif sort == "y":
ind = np.argsort(points[:, 1])
points = points[ind]
for key in data:
data[key] = data[key][ind, ...]
return points, data
def extract_block_by_name(self, name):
"""Extract a block from the case by a given name."""
return self._blockData.GetBlock(self.boundaries.index(name) + 1)
def boundary_data(self, boundary, sort=None):
"""Return cell-center coordinates and data from a boundary.
Parameters
----------
boundary : str
The name of the boundary.
sort : str
Whether to sort the data along a coordinate. Use "x" and
"y" to sort along x and y, respectively. Default is no
sorting.
Returns
-------
Two ndarrays
The coordinates of the boundary face centres.
The corresponding data.
"""
blockData = self.extract_block_by_name(boundary)
cCenters = vtk.vtkCellCenters()
cCenters.SetInputData(blockData)
cCenters.Update()
points = np.array(dsa.WrapDataObject(cCenters.GetOutput()).Points)
dataVTK = dsa.WrapDataObject(blockData).CellData
data = {}
for key in dataVTK.keys():
data[key] = np.array(dataVTK[key])
if sort is None:
return points[:, [0, 1]], data
elif sort == "x":
ind = np.argsort(points[:, 0])
elif sort == "y":
ind = np.argsort(points[:, 1])
points = points[ind]
for key in data:
data[key] = data[key][ind]
return points[:, [0, 1]], data
def read(self, clean, pointData):
"""Read in the data from a file.
Parameters
----------
clean : bool
Whether to attempt cleaning the case of degenerate cells upon
read.
pointData : bool
Whether the file contains point data instead of cell data.
Cell data will be computed by interpolation.
Raises
------
ValueError
If the provided file does not exist.
"""
fileName = self.fileName
fileExt = os.path.splitext(fileName)[1]
if fileExt == ".vtm":
reader = NativeReader(fileName)
return reader.data
elif fileExt == ".vtk":
return LegacyReader(fileName, clean=clean,
pointData=pointData).data
elif (fileExt == ".vtu") or (fileExt == ".vtp"):
return XMLReader(fileName, clean=clean, pointData=pointData).data
else:
raise ValueError("Unsupported file format.", fileName, fileExt)
def write(self, writePath):
"""Save the case to a .vtm format.
Parameters
----------
writePath : str
The name of the file.
"""
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(writePath)
writer.SetInputData(self._blockData)
writer.Write()
|
gpl-3.0
| -8,321,563,673,050,837,000
| 28.383333
| 85
| 0.573454
| false
| 4.255884
| false
| false
| false
|
bbfamily/abu
|
abupy/TradeBu/ABuTradeProxy.py
|
1
|
14496
|
# -*- encoding:utf-8 -*-
"""
交易执行代理模块
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from contextlib import contextmanager
from functools import total_ordering
from enum import Enum
import numpy as np
import pandas as pd
from . import ABuTradeDrawer
from . import ABuTradeExecute
__author__ = '阿布'
__weixin__ = 'abu_quant'
class EOrderSameRule(Enum):
"""对order_pd中对order判断为是否相同使用的规则"""
"""order有相同的symbol和买入日期就认为是相同"""
ORDER_SAME_BD = 0
"""order有相同的symbol, 买入日期,和卖出日期,即不考虑价格,只要日期相同就相同"""
ORDER_SAME_BSD = 1
"""order有相同的symbol, 买入日期,相同的买入价格,即单子买入时刻都相同"""
ORDER_SAME_BDP = 2
"""order有相同的symbol, 买入日期, 买入价格, 并且相同的卖出日期和价格才认为是相同,即买入卖出时刻都相同"""
ORDER_SAME_BSPD = 3
@total_ordering
class AbuOrderPdProxy(object):
"""
包装交易订单构成的pd.DataFrame对象,外部debug因子的交易结果,寻找交易策略的问题使用,
支持两个orders_pd的并集,交集,差集,类似set的操作,同时支持相等,不等,大于,小于
的比较操作,eg如下:
orders_pd1 = AbuOrderPdProxy(orders_pd1)
with orders_pd1.proxy_work(orders_pd2) as (order1, order2):
a = order1 | order2 # 两个交易结果的并集
b = order1 & order2 # 两个交易结果的交集
c = order1 - order2 # 两个交易结果的差集(在order1中,但不在order2中)
d = order2 - order1 # 两个交易结果的差集(在order2中,但不在order1中)
eq = order1 == order2 # 两个交易结果是否相同
lg = order1 > order2 # order1唯一的交易数量是否大于order2
lt = order1 < order2 # order1唯一的交易数量是否小于order2
"""
def __init__(self, orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
初始化函数需要pd.DataFrame对象,暂时未做类型检测
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
# 需要copy因为会添加orders_pd的列属性等
self.orders_pd = orders_pd.copy()
self.same_rule = same_rule
# 并集, 交集, 差集运算结果存储
self.op_result = None
self.last_op_metrics = {}
@contextmanager
def proxy_work(self, orders_pd):
"""
传人需要比较的orders_pd,构造ABuOrderPdProxy对象,返回使用者,
对op_result进行统一分析
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return:
"""
# 运算集结果重置
self.op_result = None
# 实例化比较的ABuOrderPdProxy对象
other = AbuOrderPdProxy(orders_pd)
try:
yield self, other
finally:
if isinstance(self.op_result, pd.DataFrame):
# 如果有并集, 交集, 差集运算结果存储,
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
metrics = AbuMetricsBase(self.op_result, None, None, None)
metrics.fit_metrics_order()
self.last_op_metrics['win_rate'] = metrics.win_rate
self.last_op_metrics['gains_mean'] = metrics.gains_mean
self.last_op_metrics['losses_mean'] = metrics.losses_mean
self.last_op_metrics['sum_profit'] = self.op_result['profit'].sum()
self.last_op_metrics['sum_profit_cg'] = self.op_result['profit_cg'].sum()
def __and__(self, other):
""" & 操作符的重载,计算两个交易集的交集"""
# self.op = 'intersection(order1 & order2)'
self.op_result = intersection_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __or__(self, other):
""" | 操作符的重载,计算两个交易集的并集"""
# self.op = 'union(order1 | order2)'
self.op_result = union_in_2orders(self.orders_pd, other.orders_pd)
return self.op_result
def __sub__(self, other):
""" - 操作符的重载,计算两个交易集的差集"""
self.op_result = difference_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __eq__(self, other):
""" == 操作符的重载,计算两个交易集的是否相同"""
return (self - other).empty and (other - self).empty
def __gt__(self, other):
""" > 操作符的重载,计算两个交易集的大小, 类被total_ordering装饰,可以支持lt等操作符"""
unique_cnt = find_unique_group_symbol(self.orders_pd).shape[0]
other_unique_cnt = find_unique_group_symbol(other.orders_pd).shape[0]
return unique_cnt > other_unique_cnt
def union_in_2orders(orders_pd, other_orders_pd):
"""
并集:分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中所有不同的交易,
注意这里不认为在相同的交易日买入相同的股票,两笔交易就一样,这里只是两个orders_pd合并
后使用drop_duplicates做了去除完全一样的order,即结果为并集:
orders_pd | cmp_orders_pd或orders_pd.union(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return: orders_pd | cmp_orders_pd
"""
orders_pd = orders_pd.append(other_orders_pd)
orders_pd = orders_pd.drop_duplicates()
return orders_pd
def _same_pd(order, other_orders_pd, same_rule):
"""
根据same_rule的规则从orders_pd和other_orders_pd中返回相同的df
:param order: orders_pd中的一行order记录数据
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则
:return: 从orders_pd和other_orders_pd中返回相同的df
"""
symbol = order.symbol
buy_day = order['buy_date']
buy_price = order['buy_price']
sell_day = order['sell_date']
sell_price = order['sell_price']
if same_rule == EOrderSameRule.ORDER_SAME_BD:
# 只根据买入时间和买入symbol确定是否相同,即认为在相同的交易日买入相同的股票,两笔交易就一样,忽略其它所有order中的因素
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSD:
# 根据买入时间,卖出时间和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BDP:
# 根据买入时间,买入价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['buy_price'] == buy_price)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSPD:
# 根据买入时间,卖出时间, 买入价格和卖出价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)
& (other_orders_pd['buy_price'] == buy_price)
& (other_orders_pd['sell_price'] == sell_price)]
else:
raise TypeError('same_rule type is {}!!'.format(same_rule))
return same_pd
def intersection_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
交集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中相同的交易,
即结果为交集:orders_pd & cmp_orders_pd或orders_pd.intersection(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd & cmp_orders_pd
"""
def _intersection(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 如果是空,说明不相交
return False
# 相交, intersection=1,是交集
return True
orders_pd['intersection'] = orders_pd.apply(_intersection, axis=1)
return orders_pd[orders_pd['intersection'] == 1]
def difference_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
差集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd的不同交易,
注意返回的结果是存在orders_pd中的交易,但不在cmp_orders_pd中的交易,即结果
为差集:orders_pd - cmp_orders_pd或orders_pd.difference(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd - cmp_orders_pd
"""
def _difference(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 没有相同的说明是差集
return True
# 有相同的说明不是差集
return False
orders_pd['difference'] = orders_pd.apply(_difference, axis=1)
return orders_pd[orders_pd['difference'] == 1]
def find_unique_group_symbol(order_pd):
"""
按照'buy_date', 'symbol'分组后,只筛选组里的第一个same_group.iloc[0]
:param order_pd:
:return:
"""
def _find_unique_group_symbol(same_group):
# 只筛选组里的第一个, 即同一个交易日,对一个股票的交易只保留一个order
return same_group.iloc[0]
# 按照'buy_date', 'symbol'分组后apply same_handle
order_pds = order_pd.groupby(['buy_date', 'symbol']).apply(_find_unique_group_symbol)
return order_pds
def find_unique_symbol(order_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
order_pd中如果一个buy_date对应的一个symbol有多条交易记录,过滤掉,
注意如果在对应多条记录中保留一个,使用find_unique_group_symbol
:param order_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
def _find_unique_symbol(order):
"""根据order的symbol和buy_date在原始order_pd中进行复合条件筛选,结果same_pd如果只有1个就唯一,否则就是重复的"""
same_pd = _same_pd(order, order_pd, same_rule)
if same_pd.empty or same_pd.shape[0] == 1:
return False
# 同一天一个symbol有多条记录的一个也没留,都过滤
return True
same_mark = order_pd.apply(_find_unique_symbol, axis=1)
return order_pd[same_mark == 0]
def trade_summary(orders, kl_pd, draw=False, show_info=True):
"""
主要将AbuOrder对象序列转换为pd.DataFrame对象orders_pd,以及将
交易单子时间序列转换交易行为顺序序列,绘制每笔交易的细节交易图,以及
简单文字度量输出
:param orders: AbuOrder对象序列
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param draw: 是否可视化交易细节图示
:param show_info: 是否输出交易文字信息
"""
# AbuOrder对象序列转换为pd.DataFrame对象orders_pd
orders_pd = ABuTradeExecute.make_orders_pd(orders, kl_pd)
# 交易单子时间序列转换交易行为顺序序列
action_pd = ABuTradeExecute.transform_action(orders_pd)
summary = ''
if draw:
# 绘制每笔交易的细节交易图
ABuTradeDrawer.plot_his_trade(orders, kl_pd)
if show_info:
# simple的意思是没有计算交易费用
simple_profit = 'simple profit: {} \n'.format(ABuTradeExecute.calc_simple_profit(orders, kl_pd))
summary += simple_profit
# 每笔交易收益期望
mean_win_profit = 'mean win profit {} \n'.format(np.mean(orders_pd[orders_pd.result == 1]['profit']))
summary += mean_win_profit
# 每笔交易亏损期望
mean_loss_profit = 'mean loss profit {} \n'.format(np.mean(orders_pd[orders_pd.result == -1]['profit']))
summary += mean_loss_profit
# 盈利笔数
win_cnt = 0 if len(orders_pd[orders_pd.result == 1].result.value_counts().values) <= 0 else \
orders_pd[orders_pd.result == 1].result.value_counts().values[0]
# 亏损笔数
loss_cnt = 0 if len(orders_pd[orders_pd.result == -1].result.value_counts().values) <= 0 else \
orders_pd[orders_pd.result == -1].result.value_counts().values[0]
# 胜率
win_rate = 'win rate ' + str('*@#')
if win_cnt + loss_cnt > 0:
win_rate = 'win rate: {}%'.format(float(win_cnt) / float(float(loss_cnt) + float(win_cnt)))
summary += win_rate
return orders_pd, action_pd, summary
|
gpl-3.0
| -5,568,852,503,388,017,000
| 35.951456
| 115
| 0.632335
| false
| 2.223997
| false
| false
| false
|
eort/OpenSesame
|
libqtopensesame/items/qtautoplugin.py
|
2
|
5912
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
import os
from libopensesame import plugins
from libqtopensesame.items.qtplugin import qtplugin
from libqtopensesame import validators
from libqtopensesame.misc.translate import translation_context
from libopensesame.exceptions import osexception
_ = translation_context(u'qtautoplugin', category=u'core')
class qtautoplugin(qtplugin):
"""A class that processes auto-plugins defined in a YAML file"""
def __init__(self, plugin_file):
qtplugin.__init__(self, plugin_file)
def init_edit_widget(self):
"""Construct the GUI controls based on info.yaml"""
qtplugin.init_edit_widget(self, False)
item_type_translate = translation_context(self.item_type,
category=u'plugin')
self.info = plugins.plugin_properties(self.item_type, _type=u'plugins')
# Process the help url, if specified
if u'help' in self.info:
self.help_url = self.info[u'help']
# Some options are required. Which options are requires depends on the
# specific widget.
required = [
([u'checkbox', u'color_edit', u'combobox', u'editor', u'filepool', \
u'line_edit', u'spinbox', u'text'], [u'label']),
([u'checkbox', u'color_edit', u'combobox', u'editor', u'filepool', \
u'line_edit', u'spinbox'], [u'var']),
([u'spinbox', u'slider'], [u'min_val', u'max_val']),
([u'combobox'], [u'options']),
]
# Keywords are optional parameters that are set to some default if they
# are not specified.
keywords = {
u'info' : None,
u'min_width' : None,
u'prefix' : u'',
u'suffix' : u'',
u'left_label' : u'min.',
u'right_label' : u'max.',
u'syntax' : False
}
# This indicates whether we should pad the controls with a stretch at
# the end.
need_stretch = True
for c in self.info[u'controls']:
# Check whether all required options have been specified
if u'type' not in c:
raise osexception(
_(u'You must specify "type" for %s controls in info.yaml') \
% option)
for types, options in required:
if c[u'type'] in types:
for option in options:
if option not in c:
raise osexception(
_(u'You must specify "%s" for %s controls in info.yaml') \
% (option, c[u'type']))
if u'var' in c and not self.syntax.valid_var_name(c[u'var']):
raise osexception(
_(u'Invalid variable name (%s) specified in %s plugin info') %
(c[u'var'], self.item_type))
# Set missing keywords to None
for keyword, default in keywords.items():
if keyword not in c:
c[keyword] = default
# Translate translatable fields
c[u'label'] = item_type_translate(c[u'label'])
if c[u'info'] is not None:
c[u'info'] = item_type_translate(c[u'info'])
# Parse checkbox
if c[u'type'] == u'checkbox':
widget = self.add_checkbox_control(c[u'var'], c[u'label'],
info=c[u'info'])
# Parse color_edit
elif c[u'type'] == u'color_edit':
widget = self.add_color_edit_control(c[u'var'], c[u'label'],
info=c[u'info'], min_width=c[u'min_width'])
# Parse combobox
elif c[u'type'] == u'combobox':
widget = self.add_combobox_control(c[u'var'], c[u'label'],
c[u'options'], info=c[u'info'])
# Parse editor
elif c[u'type'] == u'editor':
widget = self.add_editor_control(c[u'var'], c[u'label'],
syntax=c[u'syntax'])
need_stretch = False
# Parse filepool
elif c[u'type'] == u'filepool':
widget = self.add_filepool_control(c[u'var'], c[u'label'],
info=c[u'info'])
# Parse line_edit
elif c[u'type'] == u'line_edit':
widget = self.add_line_edit_control(c[u'var'], c[u'label'],
info=c[u'info'], min_width=c[u'min_width'])
# Parse spinbox
elif c[u'type'] == u'spinbox':
widget = self.add_spinbox_control(c[u'var'], c[u'label'],
c[u'min_val'], c[u'max_val'], prefix=c[u'prefix'],
suffix=c[u'suffix'], info=c[u'info'])
# Parse slider
elif c[u'type'] == u'slider':
widget = self.add_slider_control(c[u'var'], c[u'label'],
c[u'min_val'], c[u'max_val'], left_label=c[u'left_label'],
right_label=c[u'right_label'], info=c[u'info'])
# Parse text
elif c[u'type'] == u'text':
widget = self.add_text(c[u'label'])
else:
raise Exception(_(u'"%s" is not a valid qtautoplugin control') \
% controls[u'type'])
# Add an optional validator
if u'validator' in c:
try:
validator = getattr(validators,
u'%s_validator' % c[u'validator'])
except:
raise osexception(
u'Invalid validator: %s' % c[u'validator'])
widget.setValidator(validator(self.main_window))
# Add the widget as an item property when the 'name' option is
# specified.
if u'name' in c:
if hasattr(self, c[u'name']):
raise Exception(_(u'Name "%s" is already taken in qtautoplugin control') \
% c[u'name'])
setattr(self, c[u'name'], widget)
if need_stretch:
self.add_stretch()
self.lock = True
def apply_edit_changes(self):
"""Applies the controls. I.e. sets the variables from the controls."""
if not qtplugin.apply_edit_changes(self) or self.lock:
return False
return True
def edit_widget(self):
"""Sets the controls based on the variables."""
self.lock = True
qtplugin.edit_widget(self)
self.lock = False
return self._edit_widget
|
gpl-3.0
| 3,340,131,450,445,400,600
| 32.782857
| 79
| 0.653755
| false
| 2.953047
| false
| false
| false
|
oVirt/jenkins
|
stdci_libs/stdci_dsl/api/formatters/runners.py
|
1
|
1620
|
#!/bin/env python
"""runners.py - Set of data formatters for stdci runners"""
import logging
from yaml import safe_dump
_formatters = {}
logger = logging.getLogger(__name__)
class FormatterNotFoundError(Exception):
pass
def get_formatter(formatter_name):
"""Given formatter name, return formatter function
:param str formatter_name: Name of the required formatter
:rtype: function
:returns: Formatter function
"""
formatter_ = _formatters.get(formatter_name, None)
if formatter_ is None:
raise FormatterNotFoundError(
'Could not find formatter_: {0}'.format(formatter_name)
)
return formatter_
def formatter(name):
"""Decorator function for formatter registration"""
def wrapper(function):
_formatters[name] = function
logger.debug('Registered runner data formatter: %s', name)
return function
return wrapper
@formatter('yaml_dumper')
def _dump_to_yaml_formatter(obj, template=None):
# TODO: use dict comprehension as soon as python 2.6 support is dropped
repos_fmt = {}
for repo_name, repo_url in obj.repos:
repos_fmt[repo_name] = repo_url
mounts_fmt = {}
for src, dst in obj.mounts:
mounts_fmt[src] = dst
yumrepos_fmt = '' if obj.yumrepos is None else obj.yumrepos
data = {
'script': str(obj.script),
'yumrepos': yumrepos_fmt,
'environment': obj.environment,
'mounts': mounts_fmt,
'repos': repos_fmt,
'hash': obj.hash,
'packages': obj.packages
}
return safe_dump(data, default_flow_style=False)
|
gpl-3.0
| -3,148,337,800,633,962,500
| 25.129032
| 75
| 0.648148
| false
| 3.96088
| false
| false
| false
|
Juniper/contrail-horizon
|
openstack_dashboard/dashboards/project/networking/views.py
|
1
|
6510
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Networks.
"""
import logging
from django.core.urlresolvers import reverse_lazy # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import workflows
from horizon import tabs
from openstack_dashboard import api
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import forms as project_forms
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking.ports \
import tables as port_tables
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking.subnets \
import tables as subnet_tables
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import tables as project_tables
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import workflows as project_workflows
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import tabs as project_tabs
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.NetworkingTabs
template_name = 'project/networking/index.html'
class CreateView(workflows.WorkflowView):
workflow_class = project_workflows.CreateNetwork
def get_initial(self):
pass
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateNetwork
template_name = 'project/networking/update.html'
#context_object_name = 'network_id'
success_url = reverse_lazy("horizon:project:networking:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
network_id = self.kwargs['network_id']
try:
self._object = api.neutron.network_get(self.request,
network_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up']}
class DetailView(tables.MultiTableView):
table_classes = (subnet_tables.SubnetsTable, port_tables.PortsTable)
template_name = 'project/networking/detail.html'
failure_url = reverse_lazy('horizon:project:networking:index')
def get_subnets_data(self):
try:
network = self._get_data()
subnets = api.neutron.subnet_list(self.request,
network_id=network.id)
except Exception:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
for s in subnets:
s.set_id_as_name_if_empty()
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except Exception:
ports = []
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
def _get_data(self):
if not hasattr(self, "_network"):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
self._network = network
return self._network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["network"] = self._get_data()
return context
class ModifyPolicyView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateNetworkAttachedPolicies
#context_object_name = 'network'
def get_context_data(self, **kwargs):
context = super(ModifyPolicyView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
network = self.get_object()
context["name"] = network.name
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
network_id = self.kwargs['network_id']
try:
self._object = api.neutron.network_get(self.request,
network_id)
except Exception:
redirect = reverse("horizon:project:networking:index")
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
initial = super(ModifyPolicyView, self).get_initial()
network = self.get_object()
initial.update({'network_id': self.kwargs['network_id'],
'tenant_id': network['tenant_id'],
'name': network['name']})
msg = _('get_initial net %s') % str(initial)
LOG.error(msg)
return initial
|
apache-2.0
| 5,272,953,729,970,121,000
| 37.070175
| 93
| 0.634101
| false
| 4.249347
| false
| false
| false
|
datawire/telepresence
|
telepresence/cli.py
|
1
|
17399
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import webbrowser
from contextlib import contextmanager
from pathlib import Path
from subprocess import check_output
from traceback import format_exc
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union
from urllib.parse import quote_plus
import telepresence
from telepresence.runner import BackgroundProcessCrash, Runner
from telepresence.utilities import dumb_print, random_name
class PortMapping(object):
"""Maps local ports to listen to remote exposed ports."""
def __init__(self) -> None:
self._mapping = {} # type: Dict[int,int]
@classmethod
def parse(cls, port_strings: List[str]) -> "PortMapping":
"""Parse list of 'port' or 'local_port:remote_port' to PortMapping."""
result = PortMapping()
for port_string in port_strings:
if ":" in port_string:
local_port, remote_port = map(int, port_string.split(":"))
else:
local_port, remote_port = int(port_string), int(port_string)
result._mapping[local_port] = remote_port
return result
def merge_automatic_ports(self, ports: List[int]) -> None:
"""
Merge a list of ports to the existing ones.
The existing ones will win if the port is already there.
"""
remote = self.remote()
for port in ports:
if port in remote:
continue
self._mapping[port] = port
def remote(self) -> Set[int]:
"""Return set of remote ports."""
return set(self._mapping.values())
def local_to_remote(self) -> Set[Tuple[int, int]]:
"""Return set of pairs of local, remote ports."""
return set(self._mapping.items())
def has_privileged_ports(self) -> bool:
"""
Return true if any remote port is privileged (< 1024)
"""
return any([p < 1024 for p in self.remote()])
def safe_output(args: List[str]) -> str:
"""
Capture output from a command but try to avoid crashing
:param args: Command to run
:return: Output from the command
"""
try:
return str(check_output(args), "utf-8").strip().replace("\n", " // ")
except Exception as e:
return "(error: {})".format(e)
def report_crash(error: Any, log_path: str, logs: str) -> None:
print(
"\nLooks like there's a bug in our code. Sorry about that!\n\n" +
error + "\n"
)
if log_path != "-":
log_ref = " (see {} for the complete logs):".format(log_path)
else:
log_ref = ""
if "\n" in logs:
print(
"Here are the last few lines of the logfile" + log_ref + "\n\n" +
"\n".join(logs.splitlines()[-12:]) + "\n"
)
report = "no"
if sys.stdout.isatty():
message = (
"Would you like to file an issue in our issue tracker?"
" You'll be able to review and edit before anything is"
" posted to the public."
" We'd really appreciate the help improving our product. [Y/n]: "
)
try:
report = input(message).lower()[:1]
except EOFError:
print("(EOF)")
if report in ("y", ""):
url = "https://github.com/datawire/telepresence/issues/new?body="
body = quote_plus(
BUG_REPORT_TEMPLATE.format(
sys.argv,
telepresence.__version__,
sys.version,
safe_output(["kubectl", "version", "--short"]),
safe_output(["oc", "version"]),
safe_output(["uname", "-a"]),
error,
logs[-1000:],
)[:4000]
) # Overly long URLs won't work
webbrowser.open_new(url + body)
@contextmanager
def crash_reporting(runner: Optional[Runner] = None) -> Iterator[None]:
"""
Decorator that catches unexpected errors
"""
try:
yield
except KeyboardInterrupt:
if runner is not None:
show = runner.show
else:
show = dumb_print
show("Keyboard interrupt (Ctrl-C/Ctrl-Break) pressed")
raise SystemExit(0)
except Exception as exc:
if isinstance(exc, BackgroundProcessCrash):
error = exc.details
else:
error = format_exc()
logs = "Not available"
log_path = "-"
if runner is not None:
logs = runner.read_logs()
log_path = runner.logfile_path
runner.write("CRASH: {}".format(exc))
runner.write(error)
runner.write("(calling crash reporter...)")
report_crash(error, log_path, logs)
raise SystemExit(1)
def path_or_bool(value: str) -> Union[Path, bool]:
"""Parse value as a Path or a boolean"""
path = Path(value)
if path.is_absolute():
return path
value = value.lower()
if value in ("true", "on", "yes", "1"):
return True
if value in ("false", "off", "no", "0"):
return False
raise argparse.ArgumentTypeError(
"Value must be true, false, or an absolute filesystem path"
)
def absolute_path(value: str) -> Path:
"""Parse value as a Path or a boolean"""
path = Path(value)
if path.is_absolute():
return path
raise argparse.ArgumentTypeError(
"Value must be an absolute filesystem path"
)
def parse_args(in_args: Optional[List[str]] = None) -> argparse.Namespace:
"""Create a new ArgumentParser and parse sys.argv."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False, # can make adding changes not backwards compatible
description=(
"Telepresence: local development proxied to a remote Kubernetes "
"cluster.\n\n"
"Documentation: https://telepresence.io\n"
"Real-time help: https://d6e.co/slack\n"
"Issue tracker: https://github.com/datawire/telepresence/issues\n"
"\n" + HELP_EXAMPLES + "\n\n"
)
)
parser.add_argument(
'--version', action='version', version=telepresence.__version__
)
parser.add_argument(
"--verbose",
action='store_true',
help="Enables verbose logging for troubleshooting."
)
parser.add_argument(
"--logfile",
default="./telepresence.log",
help=(
"The path to write logs to. '-' means stdout, "
"default is './telepresence.log'."
)
)
parser.add_argument(
"--method",
"-m",
choices=["inject-tcp", "vpn-tcp", "container"],
help=(
"'inject-tcp': inject process-specific shared "
"library that proxies TCP to the remote cluster.\n"
"'vpn-tcp': all local processes can route TCP "
"traffic to the remote cluster. Requires root.\n"
"'container': used with --docker-run.\n"
"\n"
"Default is 'vpn-tcp', or 'container' when --docker-run is used.\n"
"\nFor more details see "
"https://telepresence.io/reference/methods.html"
)
)
group_deployment = parser.add_mutually_exclusive_group()
group_deployment.add_argument(
'--new-deployment',
"-n",
metavar="DEPLOYMENT_NAME",
dest="new_deployment",
help=(
"Create a new Deployment in Kubernetes where the "
"datawire/telepresence-k8s image will run. It will be deleted "
"on exit. If no deployment option is specified this will be "
" used by default, with a randomly generated name."
)
)
group_deployment.add_argument(
"--swap-deployment",
"-s",
dest="swap_deployment",
metavar="DEPLOYMENT_NAME[:CONTAINER]",
help=(
"Swap out an existing deployment with the Telepresence proxy, "
"swap back on exit. If there are multiple containers in the pod "
"then add the optional container name to indicate which container"
" to use."
)
)
group_deployment.add_argument(
"--deployment",
"-d",
metavar="EXISTING_DEPLOYMENT_NAME",
help=(
"The name of an existing Kubernetes Deployment where the " +
"datawire/telepresence-k8s image is already running."
)
)
parser.add_argument(
"--context",
default=None,
help=(
"The Kubernetes context to use. Defaults to current kubectl"
" context."
)
)
parser.add_argument(
"--namespace",
default=None,
help=(
"The Kubernetes namespace to use. Defaults to kubectl's default"
" for the current context, which is usually 'default'."
)
)
parser.add_argument(
"--serviceaccount",
dest="service_account",
default=None,
help=(
"The Kubernetes service account to use. Sets the value for a new"
" deployment or overrides the value for a swapped deployment."
)
)
parser.add_argument(
"--expose",
action='append',
metavar="PORT[:REMOTE_PORT]",
default=[],
help=(
"Port number that will be exposed to Kubernetes in the Deployment."
" Should match port exposed in the existing Deployment if using "
"--deployment or --swap-deployment. By default local port and "
"remote port are the same; if you want to listen on port 8080 "
"locally but be exposed as port 80 in Kubernetes you can do "
"'--expose 8080:80'."
)
)
parser.add_argument(
"--to-pod",
action="append",
metavar="PORT",
type=int,
default=[],
help=(
"Access localhost:PORT on other containers in the swapped "
"deployment's pod from your host or local container. For example, "
"use this to reach proxy/helper containers in the pod with "
"--swap-deployment."
)
)
parser.add_argument(
"--from-pod",
action="append",
metavar="PORT",
type=int,
default=[],
help=(
"Allow access to localhost:PORT on your host or local container "
"from other containers in the swapped deployment's pod. For "
"example, use this to let an adapter container forward requests "
"to your swapped deployment."
)
)
parser.add_argument(
"--container-to-host",
action="append",
metavar="CONTAINER_PORT[:HOST_PORT]",
default=[],
help=(
"For the container method, listen on localhost:CONTAINER_PORT in"
" the container and forward connections to localhost:HOST_PORT on"
" the host running Telepresence. Useful for allowing code running"
" in the container to connect to an IDE or debugger running on the"
" host."
)
)
parser.add_argument(
"--also-proxy",
metavar="CLOUD_HOSTNAME",
dest="also_proxy",
action='append',
default=[],
help=(
"If you are using --method=vpn-tcp, use this to add additional "
"remote IPs, IP ranges, or hostnames to proxy. Kubernetes service "
"and pods are proxied automatically, so you only need to list "
"cloud resources, e.g. the hostname of a AWS RDS. "
"When using --method=inject-tcp "
"this option is unnecessary as all outgoing communication in "
"the run subprocess will be proxied."
)
)
parser.add_argument(
"--local-cluster",
action='store_true',
help=(
"If you are using --method=vpn-tcp with a local cluster (one that"
" is running on the same computer as Telepresence) and you"
" experience DNS loops or loss of Internet connectivity while"
" Telepresence is running, use this flag to enable an internal"
" workaround that may help."
)
)
mount_group = parser.add_mutually_exclusive_group()
mount_group.add_argument(
"--docker-mount",
type=absolute_path,
metavar="PATH",
dest="docker_mount",
default=None,
help=(
"The absolute path for the root directory where volumes will be "
"mounted, $TELEPRESENCE_ROOT. "
"Requires --method container."
)
)
mount_group.add_argument(
"--mount",
type=path_or_bool,
metavar="PATH_OR_BOOLEAN",
dest="mount",
default=True,
help=(
"The absolute path for the root directory where volumes will be "
"mounted, $TELEPRESENCE_ROOT. "
"Use \"true\" to have Telepresence pick a random mount point "
"under /tmp (default). "
"Use \"false\" to disable filesystem mounting entirely."
)
)
parser.add_argument(
"--env-json",
metavar="FILENAME",
default=None,
help="Also emit the remote environment to a file as a JSON blob."
)
parser.add_argument(
"--env-file",
metavar="FILENAME",
default=None,
help=(
"Also emit the remote environment to an env file in Docker "
"Compose format. "
"See https://docs.docker.com/compose/env-file/ for more "
"information on the limitations of this format."
)
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--run-shell",
dest="runshell",
action="store_true",
help="Run a local shell that will be proxied to/from Kubernetes.",
)
group.add_argument(
"--run",
metavar=("COMMAND", "ARG"),
dest="run",
nargs=argparse.REMAINDER,
help=(
"Run the specified command arguments, e.g. "
"'--run python myapp.py'."
)
)
group.add_argument(
"--docker-run",
metavar="DOCKER_RUN_ARG",
dest="docker_run",
nargs=argparse.REMAINDER,
help=(
"Run a Docker container, by passing the arguments to 'docker run',"
" e.g. '--docker-run -i -t ubuntu:16.04 /bin/bash'. "
"Requires --method container."
)
)
args = parser.parse_args(in_args)
# Fill in defaults:
if args.method is None:
if args.docker_run is not None:
args.method = "container"
else:
args.method = "vpn-tcp"
if args.deployment is None and args.new_deployment is None and (
args.swap_deployment is None
):
args.new_deployment = random_name()
if args.docker_mount:
args.mount = False
if args.method == "container" and args.docker_run is None:
raise SystemExit(
"'--docker-run' is required when using '--method container'."
)
if args.docker_run is not None and args.method != "container":
raise SystemExit(
"'--method container' is required when using '--docker-run'."
)
if args.docker_mount is not None and args.method != "container":
raise SystemExit(
"'--method container' is required when using '--docker-mount'."
)
args.expose = PortMapping.parse(args.expose)
args.container_to_host = PortMapping.parse(args.container_to_host)
return args
HELP_EXAMPLES = """\
== Examples ==
Send a HTTP query to Kubernetes Service called 'myservice' listening on port \
8080:
$ telepresence --run curl http://myservice:8080/
Replace an existing Deployment 'myserver' listening on port 9090 with a local \
process listening on port 9090:
$ telepresence --swap-deployment myserver --expose 9090 \
--run python3 -m http.server 9090
Use a different local port than the remote port:
$ telepresence --swap-deployment myserver --expose 9090:80 \
--run python3 -m http.server 9090
Run a Docker container instead of a local process:
$ telepresence --swap-deployment myserver --expose 80 \
--docker-run -i -t nginx:latest
== Detailed usage ==
"""
BUG_REPORT_TEMPLATE = u"""\
### What were you trying to do?
(please tell us)
### What did you expect to happen?
(please tell us)
### What happened instead?
(please tell us - the traceback is automatically included, see below.
use https://gist.github.com to pass along full telepresence.log)
### Automatically included information
Command line: `{}`
Version: `{}`
Python version: `{}`
kubectl version: `{}`
oc version: `{}`
OS: `{}`
```
{}
```
Logs:
```
{}
```
"""
|
apache-2.0
| 8,950,086,370,324,174,000
| 31.042357
| 79
| 0.579918
| false
| 4.222033
| false
| false
| false
|
mindbody/API-Examples
|
SDKs/Python/swagger_client/models/get_class_payroll_response.py
|
1
|
4756
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.class_payroll_event import ClassPayrollEvent # noqa: F401,E501
from swagger_client.models.pagination_response import PaginationResponse # noqa: F401,E501
class GetClassPayrollResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_response': 'PaginationResponse',
'class_payroll': 'list[ClassPayrollEvent]'
}
attribute_map = {
'pagination_response': 'PaginationResponse',
'class_payroll': 'ClassPayroll'
}
def __init__(self, pagination_response=None, class_payroll=None): # noqa: E501
"""GetClassPayrollResponse - a model defined in Swagger""" # noqa: E501
self._pagination_response = None
self._class_payroll = None
self.discriminator = None
if pagination_response is not None:
self.pagination_response = pagination_response
if class_payroll is not None:
self.class_payroll = class_payroll
@property
def pagination_response(self):
"""Gets the pagination_response of this GetClassPayrollResponse. # noqa: E501
Contains information about the pagination used. # noqa: E501
:return: The pagination_response of this GetClassPayrollResponse. # noqa: E501
:rtype: PaginationResponse
"""
return self._pagination_response
@pagination_response.setter
def pagination_response(self, pagination_response):
"""Sets the pagination_response of this GetClassPayrollResponse.
Contains information about the pagination used. # noqa: E501
:param pagination_response: The pagination_response of this GetClassPayrollResponse. # noqa: E501
:type: PaginationResponse
"""
self._pagination_response = pagination_response
@property
def class_payroll(self):
"""Gets the class_payroll of this GetClassPayrollResponse. # noqa: E501
Contains the class payroll events. # noqa: E501
:return: The class_payroll of this GetClassPayrollResponse. # noqa: E501
:rtype: list[ClassPayrollEvent]
"""
return self._class_payroll
@class_payroll.setter
def class_payroll(self, class_payroll):
"""Sets the class_payroll of this GetClassPayrollResponse.
Contains the class payroll events. # noqa: E501
:param class_payroll: The class_payroll of this GetClassPayrollResponse. # noqa: E501
:type: list[ClassPayrollEvent]
"""
self._class_payroll = class_payroll
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetClassPayrollResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetClassPayrollResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
bsd-2-clause
| -2,032,422,677,886,335,000
| 31.135135
| 119
| 0.609336
| false
| 4.201413
| false
| false
| false
|
Pushjet/Pushjet-Server-Api
|
controllers/subscription.py
|
1
|
1428
|
from flask import Blueprint, jsonify
from utils import Error, has_service, has_uuid, queue_zmq_message
from shared import db
from models import Subscription
from json import dumps as json_encode
from config import zeromq_relay_uri
subscription = Blueprint('subscription', __name__)
@subscription.route('/subscription', methods=['POST'])
@has_uuid
@has_service
def subscription_post(client, service):
exists = Subscription.query.filter_by(device=client).filter_by(service=service).first() is not None
if exists:
return Error.DUPLICATE_LISTEN
subscription_new = Subscription(client, service)
db.session.add(subscription_new)
db.session.commit()
if zeromq_relay_uri:
queue_zmq_message(json_encode({'subscription': subscription_new.as_dict()}))
return jsonify({'service': service.as_dict()})
@subscription.route('/subscription', methods=['GET'])
@has_uuid
def subscription_get(client):
subscriptions = Subscription.query.filter_by(device=client).all()
return jsonify({'subscriptions': [_.as_dict() for _ in subscriptions]})
@subscription.route('/subscription', methods=['DELETE'])
@has_uuid
@has_service
def subscription_delete(client, service):
l = Subscription.query.filter_by(device=client).filter_by(service=service).first()
if l is not None:
db.session.delete(l)
db.session.commit()
return Error.NONE
return Error.NOT_SUBSCRIBED
|
bsd-2-clause
| 6,787,025,586,137,599,000
| 30.733333
| 103
| 0.721989
| false
| 3.828418
| false
| false
| false
|
HyShai/youtube-dl
|
youtube_dl/downloader/f4m.py
|
1
|
12552
|
from __future__ import unicode_literals
import base64
import io
import itertools
import os
import time
import xml.etree.ElementTree as etree
from .common import FileDownloader
from .http import HttpFD
from ..compat import (
compat_urlparse,
)
from ..utils import (
struct_pack,
struct_unpack,
format_bytes,
encodeFilename,
sanitize_open,
xpath_text,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
self.read(1)
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
return res
def write_unsigned_int(stream, val):
stream.write(struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class F4mFD(FileDownloader):
"""
A downloader for f4m manifests or AdobeHDS.
"""
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
if not media:
self.report_error('Unsupported DRM')
return media
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[download] Downloading f4m manifest')
manifest = self.ydl.urlopen(man_url).read()
self.report_destination(filename)
http_dl = HttpQuietDownloader(
self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'ratelimit': self.params.get('ratelimit', None),
'test': self.params.get('test', False),
}
)
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
if bootstrap_node.text is None:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_node.attrib['url'])
bootstrap = self.ydl.urlopen(bootstrap_url).read()
else:
bootstrap = base64.b64decode(bootstrap_node.text)
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text)
else:
metadata = None
boot_info = read_bootstrap_info(bootstrap)
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
write_flv_header(dest_stream)
write_metadata_tag(dest_stream, metadata)
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'downloaded_bytes': 0,
'frag_counter': 0,
}
start = time.time()
def frag_progress_hook(status):
frag_total_bytes = status.get('total_bytes', 0)
estimated_size = (state['downloaded_bytes'] +
(total_frags - state['frag_counter']) * frag_total_bytes)
if status['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_counter'] += 1
progress = self.calc_percent(state['frag_counter'], total_frags)
byte_counter = state['downloaded_bytes']
else:
frag_downloaded_bytes = status['downloaded_bytes']
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
progress = self.calc_percent(state['frag_counter'], total_frags)
progress += frag_progress / float(total_frags)
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
self.report_progress(progress, format_bytes(estimated_size),
status.get('speed'), eta)
http_dl.add_progress_hook(frag_progress_hook)
frags_filenames = []
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
if akamai_pv:
url += '?' + akamai_pv.strip(';')
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:
return False
with open(frag_filename, 'rb') as down:
down_data = down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
if box_type == b'mdat':
dest_stream.write(box_data)
break
frags_filenames.append(frag_filename)
dest_stream.close()
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
self.try_rename(tmpfilename, filename)
for frag_file in frags_filenames:
os.remove(frag_file)
fsize = os.path.getsize(encodeFilename(filename))
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
|
unlicense
| -2,441,628,705,126,840,000
| 32.832884
| 88
| 0.565249
| false
| 3.888476
| false
| false
| false
|
makson96/free-engineer
|
games/doom3/game.py
|
2
|
2138
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
##This software is available to you under the terms of the GPL-3, see "/usr/share/common-licenses/GPL-3".
##Copyright:
##- Tomasz Makarewicz (makson96@gmail.com)
import os, shutil
from subprocess import check_output
recultis_dir = os.getenv("HOME") + "/.recultis/"
self_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
install_dir = recultis_dir + "doom3/"
desk_dir = str(check_output(['xdg-user-dir', 'DESKTOP']))[2:-3]
full_name = "Doom 3 BFG on RBDOOM-3-BFG engine"
description = """Doom 3: BFG is the remaster of classic Doom 3 with all expansions. It
features enhanced graphic and audio to original game. Doom 3 is one of
the best FPS games of all time. Unfortunately, it was never released
on Linux, but game engine was release open source. With many
enhancements and new features, game is now available on Linux and it
is better than ever before. Recultis uses RBDOOM-3-BFG flavor of the
engine and requires game to be present in your Steam Library.
"""
shops = ["steam"]
s_appid = "208200"
steam_link = "http://store.steampowered.com/app/"+s_appid+"/"
screenshot_path = self_dir + "../../assets/html/rbdoom3-screen.png"
icon1_name = "rbdoom-3-bfg.png"
icon_list = [icon1_name]
engine = "rbdoom-3-bfg"
runtime_version = 2
env_var = "LD_LIBRARY_PATH=$HOME/.recultis/runtime/recultis" + str(runtime_version) + ":$HOME/.recultis/runtime/recultis" + str(runtime_version) + "/custom"
launcher1_cmd = "bash -c 'cd $HOME/.recultis/doom3/; " + env_var + " ./RBDoom3BFG'"
launcher_cmd_list = [["Doom3 BFG", launcher1_cmd]]
launcher1_text = """[Desktop Entry]
Type=Application
Name=Doom 3 BFG
Comment=Play Doom 3 BFG
Exec=""" + launcher1_cmd + """
Icon=""" + icon1_name + """
Categories=Game;
Terminal=false
"""
launcher_list = [["doom3.desktop", launcher1_text]]
uninstall_files_list = []
uninstall_dir_list = []
def prepare_engine():
print("Prepare game engine")
try:
os.remove(install_dir + "RBDoom3BFG")
shutil.rmtree(install_dir + "lib")
except:
pass
shutil.copy(recultis_dir + "tmp/rbdoom-3-bfg/RBDoom3BFG", install_dir + "RBDoom3BFG")
print("Game engine ready")
|
gpl-3.0
| 4,665,695,567,504,706,000
| 34.633333
| 156
| 0.707203
| false
| 2.787484
| false
| false
| false
|
alirizakeles/tendenci
|
tendenci/apps/base/management/commands/upload_addon.py
|
1
|
1366
|
from optparse import make_option
import os
import zipfile
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Addon upload process.
Usage:
example:
python manage.py upload_addon --zip_path /uploads/addons/addon.zip
"""
option_list = BaseCommand.option_list + (
make_option(
'--zip_path',
action='store',
dest='zip_path',
default='',
help='Path to the zip file'),
)
def handle(self, *args, **options):
path = options['zip_path']
addon_zip = zipfile.ZipFile(default_storage.open(path))
addon_name = addon_zip.namelist()[0]
addon_name = addon_name.strip('/')
addon_zip.extractall(settings.SITE_ADDONS_PATH)
print "Updating tendenci site"
os.system('python manage.py syncdb --noinput')
os.system('python manage.py migrate %s --noinput' % addon_name)
os.system('python manage.py update_settings %s' % addon_name)
os.system('python manage.py collectstatic --noinput')
print "Restarting Server"
os.system('sudo reload %s' % os.path.basename(settings.PROJECT_ROOT))
print 'Deleting zip file'
default_storage.delete(path)
|
gpl-3.0
| 7,041,375,665,213,064,000
| 28.695652
| 77
| 0.627379
| false
| 4.005865
| false
| false
| false
|
xS1ender/CytaPWN
|
cytapwn.py
|
1
|
7318
|
#!/usr/bin/python
#!/usr/bin/python2
#!/usr/bin/python3
# +-------------------------------------------------------------------------------------------------------------+
# | ZTE ZXHN H267N Router with <= V1.0.01_CYTA_A01 - RCE Root Exploit |
# | Copyright (c) 2017 Kropalis Thomas <xslender@protonmail.com> |
# +-------------------------------------------------------------------------------------------------------------+
# | This python script connects to ZTE ZXHN H267N running CYTA's software through telnet |
# | using the current credentials, and changes/adds/removes data and features. This script |
# | is tested mostly on a machine running Kali Linux 2017.1 and Windows 10 Prof Edition. |
# | UPDATE (12/6/17): CytaPWN will no longer support Windows; This might change in the future. |
# +-------------------------------------------------------------------------------------------------------------+
# | Tested on ZTE: |
# | [*] Model name : ZTE ZXHN H267N |
# | [*] Software Version : V1.0.0T6P1_CYTA |
# | [*] Hardware Version : V1.3 |
# | [*] Bootloader Version : V1.0.0 |
# +-------------------------------------------------------------------------------------------------------------+
# | ztexploit.py tested on Kali Linux 2017.1 (amd64) |
# +-------------------------------------------------------------------------------------------------------------+
# | TODO: Add more features - including changing WPA Key and SSID Name, full control |
# | over network's devices, compatibility for Windows. |
# +-------------------------------------------------------------------------------------------------------------+
import urllib, re, time, os, sys, requests
import urllib2, commands, telnetlib, imp
from bs4 import BeautifulSoup as bs
# -------------------------------------------------
# See if BeatifulSoup is installed, continue if
# it is and install it through pip if not
# -------------------------------------------------
# try:
# imp.find_module('BeatifulSoup()')
# from bs4 import BeautifulSoup as bs
# except ImportError:
# os.system('pip install BeatifulSoup')
# -------------------------------------------------
# Generic (hidden) 'root' account credentials.
# Hint: Use these credentials to login on Telnet
# -------------------------------------------------
username = "CytaAdmRes"
password = "d5l_cyt@_Adm1n"
# --------------------------------------------------
# Payload with root credentials for the router's
# interface. Mostly to grab needed router info.
# --------------------------------------------------
payload = {
'Frm_Username':username,
'Frm_Password':password
}
os.system('clear')
##
RED = '\033[31m'
GREEN = '\033[32m'
RESET = '\033[0;0m'
##
print "+------------------------------------------------------------------+"
print "| ZTE ZXHN H267N with <= V1.0.01_CYTA_A01 - RCE Root Exploit |"
print "| Thomas Kropalis (c) 2017 - <xslender@protonmail.com> |"
print "+------------------------------------------------------------------+"
try:
targetip = raw_input("\nEnter the address of the ZTE router:\n> ")
if targetip[:7] != "http://":
target = "http://"+targetip
try:
sys.stdout.write(" [*] Pinging router address...\r")
sys.stdout.flush()
time.sleep(2)
ping_res = urllib.urlopen(target).getcode()
if ping_res == 200:
sys.stdout.write(" ["+GREEN+" OK "+RESET+"]\n")
else:
print("[-] "+RED+"Error"+RESET)
sys.exit()
response = urllib.urlopen(target)
html_data = response.read()
sys.stdout.write(" [*] Retrieving random login token...\r")
sys.stdout.flush()
time.sleep(3)
# Checking for random Login token
Frm_Logintoken = re.findall(r'Frm_Logintoken"\).value = "(.*)";', html_data)
if Frm_Logintoken :
sys.stdout.write(" ["+GREEN+" OK "+RESET+"]\n")
time.sleep(1)
Frm_Logintoken = str(Frm_Logintoken[0])
# Check router information
info = target
r = requests.get(target)
data = r.text
s = bs(data, "lxml")
response = urllib.urlopen(info)
html_data = response.read()
Frm_ModelName = str(s.find_all("span",class_="w250"))#"ZXHN H267N"
if Frm_ModelName :
print " [*] Model Name: "+GREEN+Frm_ModelName+RESET
Frm_SerialNumber = "0"
if Frm_SerialNumber :
print " [*] Serial Number: "+GREEN+Frm_SerialNumber+RESET
Frm_SoftwareVerExtent = "V1.0.0"
if Frm_SoftwareVerExtent :
print " [*] Hardware Version: "+GREEN+Frm_SoftwareVerExtent+RESET
Frm_HardwareVer = "V1.0.0T6P1_CYTA"
if Frm_HardwareVer :
print " [*] Software Version: "+GREEN+Frm_HardwareVer+RESET
Frm_BootVer = "V1.0.0 (Strong guess)"
if Frm_BootVer :
print " [*] Boot Loader Version: "+GREEN+Frm_BootVer+RESET
# Main menu
print"\nWelcome to CytaPWN main menu:"
print" 1. Start FTP Daemon"
print" 2. Initiate a MITM to a connected device"
print" 3. Control and administrate connected devices"
print" 4. Initiate a Telnet connection"
print" 5. About."
print" 6. Quit."
while True:
choice = raw_input("\nEnter your choice: ")
if choice == "5":
print"\n+---------------------------------------------------------------------------+"
print"| 0Day exploit for most Cyta's routers. Developed by Thomas Kropalis. |"
print"| This exploit allows full administrative control over the router and its |"
print"| connected devices. It mostly works on new routers, obtained around 2016. |"
print"+---------------------------------------------------------------------------+"
elif choice == "6":
print"Exiting.."
time.sleep(1)
sys.exit(1)
else:
print("\n["+RED+"-"+RESET+"] Invalid Option. ")
time.sleep(1)
else:
sys.stdout.write(" ["+RED+" FALSE "+RESET+"]\n")
except IOError, e:
print "Failed to connect on "+target
except (KeyboardInterrupt, SystemExit):
print "Exiting.."
|
apache-2.0
| -4,255,299,614,088,935,400
| 48.120805
| 113
| 0.410768
| false
| 4.467643
| false
| false
| false
|
wuliming/pcp
|
src/python/pcp/pmcc.py
|
1
|
23035
|
""" Convenience Classes building on the base PMAPI extension module """
#
# Copyright (C) 2013-2015 Red Hat
# Copyright (C) 2009-2012 Michael T. Werner
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from sys import stderr
from ctypes import c_int, c_uint, c_char_p, cast, POINTER
from pcp.pmapi import (pmContext, pmResult, pmValueSet, pmValue, pmDesc,
pmErr, pmOptions, timeval)
from cpmapi import (PM_CONTEXT_HOST, PM_CONTEXT_ARCHIVE, PM_INDOM_NULL,
PM_IN_NULL, PM_ID_NULL, PM_SEM_COUNTER, PM_ERR_EOL, PM_TYPE_DOUBLE)
class MetricCore(object):
"""
Core metric information that can be queried from the PMAPI
PMAPI metrics are unique by name, and MetricCores should be also
rarely, some PMAPI metrics with different names might have identical PMIDs
PMAPI metrics are unique by (name) and by (name,pmid) - _usually_ by (pmid)
too. Note that names here (and only here) are stored as byte strings for
direct PMAPI access. All dictionaries/caching strategies built using the
core structure use native strings (i.e., not byte strings in python3).
"""
def __init__(self, ctx, name, pmid):
self.ctx = ctx
if type(name) != type(b''):
name = name.encode('utf-8')
self.name = name
self.pmid = pmid
self.desc = None
self.text = None
self.help = None
class Metric(object):
"""
Additional metric information, such as conversion factors and values
several instances of Metric may share a MetricCore instance
"""
##
# constructor
def __init__(self, core):
self._core = core # MetricCore
self._vset = None # pmValueSet member
self._values = None
self._prevvset = None
self._prevValues = None
self._convType = core.desc.contents.type
self._convUnits = None
self._errorStatus = None
self._netValues = None # (instance, name, value)
self._netPrevValues = None # (instance, name, value)
self._netConvertedValues = None # (instance, name, value)
##
# core property read methods
def _R_ctx(self):
return self._core.ctx
def _R_name(self):
return self._core.name.decode()
def _R_pmid(self):
return self._core.pmid
def _R_desc(self):
return self._core.desc
def _R_text(self):
return self._core.text
def _R_help(self):
return self._core.help
def get_vlist(self, vset, vlist_idx):
""" Return the vlist[vlist_idx] of vset[vset_idx] """
listptr = cast(vset.contents.vlist, POINTER(pmValue))
return listptr[vlist_idx]
def get_inst(self, vset, vlist_idx):
""" Return the inst for vlist[vlist_idx] of vset[vset_idx] """
return self.get_vlist(vset, vset_idx, vlist_idx).inst
def computeValues(self, inValues):
""" Extract the value for a singleton or list of instances
as a triple (inst, name, val)
"""
vset = inValues
ctx = self.ctx
instD = ctx.mcGetInstD(self.desc.contents.indom)
valL = []
for i in range(vset.numval):
instval = self.get_vlist(vset, i)
try:
name = instD[instval.inst]
except KeyError:
name = ''
outAtom = self.ctx.pmExtractValue(
vset.valfmt, instval, self.desc.type, self._convType)
if self._convUnits:
desc = (POINTER(pmDesc) * 1)()
desc[0] = self.desc
outAtom = self.ctx.pmConvScale(
self._convType, outAtom, desc, 0, self._convUnits)
value = outAtom.dref(self._convType)
valL.append((instval, name, value))
return valL
def _find_previous_instval(self, index, inst, pvset):
""" Find a metric instance in the previous resultset """
if index <= pvset.numval:
pinstval = self.get_vlist(pvset, index)
if inst == pinstval.inst:
return pinstval
for pi in range(pvset.numval):
pinstval = self.get_vlist(pvset, pi)
if inst == pinstval.inst:
return pinstval
return None
def convertValues(self, values, prevValues, delta):
""" Extract the value for a singleton or list of instances as a
triple (inst, name, val) for COUNTER metrics with the value
delta calculation applied (for rate conversion).
"""
if self.desc.sem != PM_SEM_COUNTER:
return self.computeValues(values)
if prevValues == None:
return None
pvset = prevValues
vset = values
ctx = self.ctx
instD = ctx.mcGetInstD(self.desc.contents.indom)
valL = []
for i in range(vset.numval):
instval = self.get_vlist(vset, i)
pinstval = self._find_previous_instval(i, instval.inst, pvset)
if pinstval == None:
continue
try:
name = instD[instval.inst]
except KeyError:
name = ''
outAtom = self.ctx.pmExtractValue(vset.valfmt,
instval, self.desc.type, PM_TYPE_DOUBLE)
poutAtom = self.ctx.pmExtractValue(pvset.valfmt,
pinstval, self.desc.type, PM_TYPE_DOUBLE)
if self._convUnits:
desc = (POINTER(pmDesc) * 1)()
desc[0] = self.desc
outAtom = self.ctx.pmConvScale(
PM_TYPE_DOUBLE, outAtom, desc, 0, self._convUnits)
poutAtom = self.ctx.pmConvScale(
PM_TYPE_DOUBLE, poutAtom, desc, 0, self._convUnits)
value = outAtom.dref(PM_TYPE_DOUBLE)
pvalue = poutAtom.dref(PM_TYPE_DOUBLE)
if (value >= pvalue):
valL.append((instval, name, (value - pvalue) / delta))
return valL
def _R_values(self):
return self._values
def _R_prevValues(self):
return self._prevValues
def _R_convType(self):
return self._convType
def _R_convUnits(self):
return self._convUnits
def _R_errorStatus(self):
return self._errorStatus
def _R_netConvValues(self):
return self._netConvValues
def _R_netPrevValues(self):
if not self._prevvset:
return None
self._netPrevValues = self.computeValues(self._prevvset)
return self._netPrevValues
def _R_netValues(self):
if not self._vset:
return None
self._netValues = self.computeValues(self._vset)
return self._netValues
def _W_values(self, values):
self._prev = self._values
self._values = values
self._netPrev = self._netValue
self._netValue = None
def _W_convType(self, value):
self._convType = value
def _W_convUnits(self, value):
self._convUnits = value
# interface to properties in MetricCore
ctx = property(_R_ctx, None, None, None)
name = property(_R_name, None, None, None)
pmid = property(_R_pmid, None, None, None)
desc = property(_R_desc, None, None, None)
text = property(_R_text, None, None, None)
help = property(_R_help, None, None, None)
# properties specific to this instance
values = property(_R_values, _W_values, None, None)
prevValues = property(_R_prevValues, None, None, None)
convType = property(_R_convType, _W_convType, None, None)
convUnits = property(_R_convUnits, _W_convUnits, None, None)
errorStatus = property(_R_errorStatus, None, None, None)
netValues = property(_R_netValues, None, None, None)
netPrevValues = property(_R_netPrevValues, None, None, None)
netConvValues = property(_R_netConvValues, None, None, None)
def metricPrint(self):
indomstr = self.ctx.pmInDomStr(self.desc.indom)
print(" ", "indom:", indomstr)
instD = self.ctx.mcGetInstD(self.desc.indom)
for inst, name, val in self.netValues:
print(" ", name, val)
def metricConvert(self, delta):
convertedList = self.convertValues(self._vset, self._prevvset, delta)
self._netConvValues = convertedList
return self._netConvValues
class MetricCache(pmContext):
"""
A cache of MetricCores is kept to reduce calls into the PMAPI library
this also slightly reduces the memory footprint of Metric instances
that share a common MetricCore
a cache of instance domain information is also kept, which further
reduces calls into the PMAPI and reduces the memory footprint of
Metric objects that share a common instance domain
"""
##
# overloads
def __init__(self, typed = PM_CONTEXT_HOST, target = "local:"):
pmContext.__init__(self, typed, target)
self._mcIndomD = {}
self._mcByNameD = {}
self._mcByPmidD = {}
##
# methods
def mcGetInstD(self, indom):
""" Query the instance : instance_list dictionary """
return self._mcIndomD[indom]
def _mcAdd(self, core):
""" Update the dictionary """
indom = core.desc.contents.indom
if indom not in self._mcIndomD:
if c_int(indom).value == c_int(PM_INDOM_NULL).value:
instmap = { PM_IN_NULL : b'PM_IN_NULL' }
else:
if self._type == PM_CONTEXT_ARCHIVE:
instL, nameL = self.pmGetInDomArchive(core.desc)
else:
instL, nameL = self.pmGetInDom(core.desc)
if instL != None and nameL != None:
instmap = dict(zip(instL, nameL))
else:
instmap = {}
self._mcIndomD.update({indom: instmap})
self._mcByNameD.update({core.name.decode(): core})
self._mcByPmidD.update({core.pmid: core})
def mcGetCoresByName(self, nameL):
""" Update the core (metric id, description,...) list """
coreL = []
missD = None
errL = None
# lookup names in cache
for index, name in enumerate(nameL):
if type(name) == type(b''):
name = name.decode()
# lookup metric core in cache
core = self._mcByNameD.get(name)
if not core:
# cache miss
if not missD:
missD = {}
missD.update({name: index})
coreL.append(core)
# some cache lookups missed, fetch pmids and build missing MetricCores
if missD:
idL, errL = self.mcFetchPmids(missD.keys())
for name, pmid in idL:
if pmid == PM_ID_NULL:
# fetch failed for the given metric name
if not errL:
errL = []
errL.append(name)
else:
# create core pmDesc
newcore = self._mcCreateCore(name, pmid)
# update core ref in return list
coreL[missD[name]] = newcore
return coreL, errL
def _mcCreateCore(self, name, pmid):
""" Update the core description """
newcore = MetricCore(self, name, pmid)
try:
newcore.desc = self.pmLookupDesc(pmid)
except pmErr as error:
fail = "%s: pmLookupDesc: %s" % (error.progname(), error.message())
print >> stderr, fail
raise SystemExit(1)
# insert core into cache
self._mcAdd(newcore)
return newcore
def mcFetchPmids(self, nameL):
""" Update the core metric ids. note: some names have identical pmids """
errL = None
nameA = (c_char_p * len(nameL))()
for index, name in enumerate(nameL):
if type(name) != type(b''):
name = name.encode('utf-8')
nameA[index] = c_char_p(name)
try:
pmidArray = self.pmLookupName(nameA)
if len(pmidArray) < len(nameA):
missing = "%d of %d metric names" % (len(pmidArray), len(nameA))
print >> stderr, "Cannot resolve", missing
raise SystemExit(1)
except pmErr as error:
fail = "%s: pmLookupName: %s" % (error.progname(), error.message())
print >> stderr, fail
raise SystemExit(1)
return zip(nameL, pmidArray), errL
class MetricGroup(dict):
"""
Manages a group of metrics for fetching the values of
a MetricGroup is a dictionary of Metric objects, for which data can
be fetched from a target system using a single call to pmFetch
the Metric objects are indexed by the metric name
pmFetch fetches data for a list of pmIDs, so there is also a shadow
dictionary keyed by pmID, along with a shadow list of pmIDs
"""
##
# property read methods
def _R_contextCache(self):
return self._ctx
def _R_pmidArray(self):
return self._pmidArray
def _R_timestamp(self):
return self._result.contents.timestamp
def _R_result(self):
return self._result
def _R_prevTimestamp(self):
return self._prev.contents.timestamp
def _R_prev(self):
return self._prev
##
# property write methods
def _W_result(self, pmresult):
self._prev = self._result
self._result = pmresult
##
# property definitions
contextCache = property(_R_contextCache, None, None, None)
pmidArray = property(_R_pmidArray, None, None, None)
result = property(_R_result, _W_result, None, None)
timestamp = property(_R_timestamp, None, None, None)
prev = property(_R_prev, None, None, None)
prevTimestamp = property(_R_prevTimestamp, None, None, None)
##
# overloads
def __init__(self, contextCache, inL = []):
dict.__init__(self)
self._ctx = contextCache
self._pmidArray = None
self._result = None
self._prev = None
self._altD = {}
self.mgAdd(inL)
def __setitem__(self, attr, value = []):
if attr in self:
raise KeyError("metric group with that key already exists")
else:
dict.__setitem__(self, attr, MetricGroup(self, inL = value))
##
# methods
def mgAdd(self, nameL):
""" Create the list of Metric(s) """
coreL, errL = self._ctx.mcGetCoresByName(nameL)
for core in coreL:
metric = Metric(core)
self.update({metric.name: metric})
self._altD.update({metric.pmid: metric})
n = len(self)
self._pmidArray = (c_uint * n)()
for x, key in enumerate(self.keys()):
self._pmidArray[x] = c_uint(self[key].pmid)
def mgFetch(self):
""" Fetch the list of Metric values. Save the old value. """
try:
self.result = self._ctx.pmFetch(self._pmidArray)
# update the result entries in each metric
result = self.result.contents
for i in range(self.result.contents.numpmid):
pmid = self.result.contents.get_pmid(i)
vset = self.result.contents.get_vset(i)
self._altD[pmid]._prevvset = self._altD[pmid]._vset
self._altD[pmid]._vset = vset
except pmErr as error:
if error.args[0] == PM_ERR_EOL:
raise SystemExit(0)
fail = "%s: pmFetch: %s" % (error.progname(), error.message())
print >> stderr, fail
raise SystemExit(1)
def mgDelta(self):
"""
Sample delta - used for rate conversion calculations, which
requires timestamps from successive samples.
"""
if self._prev != None:
prevTimestamp = float(self.prevTimestamp)
else:
prevTimestamp = 0.0
return float(self.timestamp) - prevTimestamp
class MetricGroupPrinter(object):
"""
Handles reporting of MetricGroups within a GroupManager.
This object is called upon at the end of each fetch when
new values are available. It is also responsible for
producing any initial (or on-going) header information
that the tool may wish to report.
"""
def report(self, manager):
""" Base implementation, all tools should override """
for group_name in manager.keys():
group = manager[group_name]
for metric_name in group.keys():
group[metric_name].metricPrint()
def convert(self, manager):
""" Do conversion for all metrics across all groups """
for group_name in manager.keys():
group = manager[group_name]
delta = group.mgDelta()
for metric_name in group.keys():
group[metric_name].metricConvert(delta)
class MetricGroupManager(dict, MetricCache):
"""
Manages a dictionary of MetricGroups which can be pmFetch'ed
inherits from MetricCache, which inherits from pmContext
"""
##
# property access methods
def _R_options(self): # command line option object
return self._options
def _W_options(self, options):
self._options = options
def _R_default_delta(self): # default interval unless command line set
return self._default_delta
def _W_default_delta(self, delta):
self._default_delta = delta
def _R_default_pause(self): # default reporting delay (archives only)
return self._default_pause
def _W_default_pause(self, pause):
self._default_pause = pause
def _W_printer(self, printer): # helper class for reporting
self._printer = printer
def _R_counter(self): # fetch iteration count, useful for printer
return self._counter
##
# property definitions
options = property(_R_options, _W_options, None, None)
default_delta = property(_R_default_delta, _W_default_delta, None, None)
default_pause = property(_R_default_pause, _W_default_pause, None, None)
printer = property(None, _W_printer, None, None)
counter = property(_R_counter, None, None, None)
##
# overloads
def __init__(self, typed = PM_CONTEXT_HOST, target = "local:"):
dict.__init__(self)
MetricCache.__init__(self, typed, target)
self._options = None
self._default_delta = timeval(1, 0)
self._default_pause = None
self._printer = None
self._counter = 0
def __setitem__(self, attr, value = []):
if attr in self:
raise KeyError("metric group with that key already exists")
else:
dict.__setitem__(self, attr, MetricGroup(self, inL = value))
@classmethod
def builder(build, options, argv):
""" Helper interface, simple PCP monitor argument parsing. """
manager = build.fromOptions(options, argv)
manager._default_delta = timeval(options.delta, 0)
manager._options = options
return manager
##
# methods
def _computeSamples(self):
""" Calculate the number of samples we are to take.
This is based on command line options --samples but also
must consider --start, --finish and --interval. If none
of these were presented, a zero return means "infinite".
"""
if self._options == None:
return 0 # loop until interrupted or PM_ERR_EOL
samples = self._options.pmGetOptionSamples()
if samples != None:
return samples
if self._options.pmGetOptionFinishOptarg() == None:
return 0 # loop until interrupted or PM_ERR_EOL
origin = self._options.pmGetOptionOrigin()
finish = self._options.pmGetOptionFinish()
delta = self._options.pmGetOptionInterval()
if delta == None:
delta = self._default_delta
period = (delta.tv_sec * 1.0e6 + delta.tv_usec) / 1e6
window = float(finish.tv_sec - origin.tv_sec)
window += float((finish.tv_usec - origin.tv_usec) / 1e6)
window /= period
return int(window + 0.5) # roundup to positive number
def _computePauseTime(self):
""" Figure out how long to sleep between samples.
This needs to take into account whether we were explicitly
asked for a delay (independent of context type, --pause),
whether this is an archive or live context, and the sampling
--interval (including the default value, if none requested).
"""
if self._default_pause != None:
return self._default_pause
if self.type == PM_CONTEXT_ARCHIVE:
self._default_pause = timeval(0, 0)
elif self._options != None:
pause = self._options.pmGetOptionInterval()
if pause != None:
self._default_pause = pause
else:
self._default_pause = self._default_delta
else:
self._default_pause = self._default_delta
return self._default_pause
def fetch(self):
""" Perform fetch operation on all of the groups. """
for group in self.keys():
self[group].mgFetch()
def run(self):
""" Using options specification, loop fetching and reporting,
pausing for the requested time interval between updates.
Transparently handles archive/live mode differences.
Note that this can be different to the sampling interval
in archive mode, but is usually the same as the sampling
interval in live mode.
"""
samples = self._computeSamples()
timer = self._computePauseTime()
try:
self.fetch()
while True:
if samples == 0 or self._counter <= samples:
self._printer.report(self)
if self._counter == samples:
break
# for need two fetches to report rate converted counter
# metrics. so the actual output samples will be less than
# the speicified number when using '-s' and '-T' option.
# '+1' can fix this issue.
self._counter += 1
timer.sleep()
self.fetch()
except SystemExit as code:
return code
except KeyboardInterrupt:
pass
return 0
|
lgpl-2.1
| -2,193,649,800,487,159,300
| 35.161695
| 82
| 0.584545
| false
| 3.997744
| false
| false
| false
|
Osmose/pontoon
|
pontoon/administration/management/commands/sync_projects.py
|
1
|
17746
|
from collections import Counter
from datetime import datetime
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from django.utils import timezone
from bulk_update.helper import bulk_update
from pontoon.administration.files import update_from_repository
from pontoon.administration.vcs import commit_to_vcs, CommitToRepositoryException
from pontoon.base.models import (
ChangedEntityLocale,
Entity,
Locale,
Project,
Resource,
Translation,
update_stats
)
from pontoon.base.utils import match_attr
from pontoon.base.vcs_models import VCSProject
class Command(BaseCommand):
args = '<project_slug project_slug ...>'
help = 'Synchronize database and remote repositories.'
def add_arguments(self, parser):
parser.add_argument(
'--no-commit',
action='store_true',
dest='no_commit',
default=False,
help='Do not commit changes to VCS'
)
def log(self, msg, *args, **kwargs):
"""Log a message to the console."""
self.stdout.write(msg.format(*args, **kwargs))
def info(self, msg, *args, **kwargs):
"""Log a message to the console if --verbosity=1 or more."""
if self.verbosity >= 1:
self.log(msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Log a message to the console if --verbosity=2."""
if self.verbosity == 2:
self.log(msg, *args, **kwargs)
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.no_commit = options['no_commit']
self.log('SYNC PROJECTS: start')
projects = Project.objects.filter(disabled=False)
if args:
projects = projects.filter(slug__in=args)
if len(projects) < 1:
raise CommandError('No matching projects found.')
for project in projects:
if not project.can_commit:
self.log(u'Skipping project {0}, cannot commit to repository.'
.format(project.name))
else:
self.handle_project(project)
self.log('SYNC PROJECTS: done')
# Once we've synced, we can delete all translations scheduled
# for deletion.
Translation.deleted_objects.all().delete()
def handle_project(self, db_project):
# Pull changes from VCS and update what we know about the files.
update_from_repository(db_project)
vcs_project = VCSProject(db_project)
self.update_resources(db_project, vcs_project)
# Collect all entities across VCS and the database and get their
# keys so we can match up matching entities.
vcs_entities = self.get_vcs_entities(vcs_project)
db_entities = self.get_db_entities(db_project)
entity_keys = set().union(db_entities.keys(), vcs_entities.keys())
changeset = ChangeSet(db_project, vcs_project)
for key in entity_keys:
db_entity = db_entities.get(key, None)
vcs_entity = vcs_entities.get(key, None)
self.handle_entity(changeset, db_project, key, db_entity, vcs_entity)
# Apply the changeset to the files, commit them, and update stats
# entries in the DB.
changeset.execute()
if not self.no_commit:
self.commit_changes(db_project, changeset)
self.update_stats(db_project, vcs_project, changeset)
# Clear out the list of changed locales for entity in this
# project now that we've finished syncing.
(ChangedEntityLocale.objects
.filter(entity__resource__project=db_project)
.delete())
self.log(u'Synced project {0}', db_project.slug)
def handle_entity(self, changeset, db_project, key, db_entity, vcs_entity):
"""
Determine what needs to be synced between the database and VCS versions
of a single entity and log what needs to be changed in the changeset.
"""
if vcs_entity is None:
if db_entity is None:
# This should never happen. What? Hard abort.
raise CommandError('No entities found for key {0}'.format(key))
else:
# VCS no longer has the entity, remove it from Pontoon.
changeset.obsolete_db_entity(db_entity)
elif db_entity is None:
# New VCS entities are added to Pontoon.
changeset.create_db_entity(vcs_entity)
else:
for locale in db_project.locales.all():
if not vcs_entity.has_translation_for(locale.code):
# VCS lacks an entity for this locale, so we can't
# pull updates nor edit it. Skip it!
continue
if db_entity.has_changed(locale):
# Pontoon changes overwrite whatever VCS has.
changeset.update_vcs_entity(locale.code, db_entity, vcs_entity)
else:
# If Pontoon has nothing or has not changed, and the VCS
# still has the entity, update Pontoon with whatever may
# have changed.
changeset.update_db_entity(locale.code, db_entity, vcs_entity)
def update_resources(self, db_project, vcs_project):
"""Update the database on what resource files exist in VCS."""
relative_paths = vcs_project.resources.keys()
db_project.resource_set.exclude(path__in=relative_paths).delete()
for relative_path, vcs_resource in vcs_project.resources.items():
resource, created = db_project.resource_set.get_or_create(path=relative_path)
resource.format = Resource.get_path_format(relative_path)
resource.entity_count = len(vcs_resource.entities)
resource.save()
def update_stats(self, db_project, vcs_project, changeset):
"""
Update the Stats entries in the database for locales that had
translation updates.
"""
for resource in db_project.resource_set.all():
for locale in changeset.updated_locales:
# We only want to create/update the stats object if the resource
# exists in the current locale, UNLESS the file is asymmetric.
vcs_resource = vcs_project.resources[resource.path]
resource_exists = vcs_resource.files.get(locale) is not None
if resource_exists or resource.is_asymmetric:
update_stats(resource, locale)
def get_vcs_entities(self, vcs_project):
return {self.entity_key(entity): entity for entity in vcs_project.entities}
def get_db_entities(self, db_project):
entities = (Entity.objects
.select_related('resource')
.prefetch_related('changed_locales')
.filter(resource__project=db_project, obsolete=False))
return {self.entity_key(entity): entity for entity in entities}
def entity_key(self, entity):
"""
Generate a key for the given entity that is unique within the
project.
"""
key = entity.key or entity.string
return ':'.join([entity.resource.path, key])
def commit_changes(self, db_project, changeset):
"""Commit the changes we've made back to the VCS."""
for locale in db_project.locales.all():
authors = changeset.commit_authors_per_locale.get(locale.code, [])
# Use the top translator for this batch as commit author, or
# the fake Pontoon user if there are no authors.
if len(authors) > 0:
commit_author = Counter(authors).most_common(1)[0][0]
else:
commit_author = User(first_name="Pontoon", email="pontoon@mozilla.com")
commit_message = render_to_string('commit_message.jinja', {
'locale': locale,
'project': db_project,
'authors': authors
})
try:
result = commit_to_vcs(
db_project.repository_type,
db_project.locale_directory_path(locale.code),
commit_message,
commit_author,
db_project.repository_url
)
except CommitToRepositoryException as err:
result = {'message': unicode(err)}
if result is not None:
self.log(
u'Committing project {project.name} for {locale.name} '
u'({locale.code}) failed: {reason}',
project=db_project,
locale=locale,
reason=result['message']
)
class ChangeSet(object):
"""
Stores a set of changes to be made to the database and the
translations stored in VCS. Once all the necessary changes have been
stored, execute all the changes at once efficiently.
"""
def __init__(self, db_project, vcs_project):
self.db_project = db_project
self.vcs_project = vcs_project
self.executed = False
self.changes = {
'update_vcs': [],
'update_db': [],
'obsolete_db': [],
'create_db': []
}
self.entities_to_update = []
self.translations_to_update = []
self.translations_to_create = []
self.commit_authors_per_locale = {}
self.updated_locales = set()
def update_vcs_entity(self, locale_code, db_entity, vcs_entity):
"""
Replace the translations in VCS with the translations from the
database.
"""
self.changes['update_vcs'].append((locale_code, db_entity, vcs_entity))
def create_db_entity(self, vcs_entity):
"""Create a new entity in the database."""
self.changes['create_db'].append(vcs_entity)
def update_db_entity(self, locale_code, db_entity, vcs_entity):
"""Update the database with translations from VCS."""
self.changes['update_db'].append((locale_code, db_entity, vcs_entity))
def obsolete_db_entity(self, db_entity):
"""Mark the given entity as obsolete."""
self.changes['obsolete_db'].append(db_entity.pk)
def execute(self):
"""
Execute the changes stored in this changeset. Execute can only
be called once per changeset; subsequent calls raise a
RuntimeError, even if the changes failed.
"""
if self.executed:
raise RuntimeError('execute() can only be called once per changeset.')
else:
self.executed = True
# Store locales and resources for FK relationships.
self.locales = {l.code: l for l in Locale.objects.all()}
self.resources = {r.path: r for r in self.db_project.resource_set.all()}
# Perform the changes and fill the lists for bulk creation and
# updating.
self.execute_update_vcs()
self.execute_create_db()
self.execute_update_db()
self.execute_obsolete_db()
# Apply the built-up changes to the DB
if len(self.entities_to_update) > 0:
bulk_update(self.entities_to_update, update_fields=[
'resource',
'string',
'string_plural',
'key',
'comment',
'order',
'source'
])
Translation.objects.bulk_create(self.translations_to_create)
if len(self.translations_to_update) > 0:
bulk_update(self.translations_to_update, update_fields=[
'entity',
'locale',
'string',
'plural_form',
'approved',
'approved_user_id',
'approved_date',
'fuzzy',
'extra'
])
# Track which locales were updated.
for translation in self.translations_to_update:
self.updated_locales.add(translation.locale)
def execute_update_vcs(self):
resources = self.vcs_project.resources
changed_resources = set()
for locale_code, db_entity, vcs_entity in self.changes['update_vcs']:
changed_resources.add(resources[db_entity.resource.path])
vcs_translation = vcs_entity.translations[locale_code]
db_translations = (db_entity.translation_set
.filter(approved=True, locale__code=locale_code))
# If no DB translations are fuzzy, set fuzzy to False.
# Otherwise, it's true.
vcs_translation.fuzzy = any(t for t in db_translations if t.fuzzy)
if len(db_translations) > 0:
last_translation = max(db_translations, key=lambda t: t.date or datetime.min)
vcs_translation.last_updated = last_translation.date
vcs_translation.last_translator = last_translation.user
# Replace existing translations with ones from the database.
vcs_translation.strings = {
db.plural_form: db.string for db in db_translations
}
# Track which translators were involved.
self.commit_authors_per_locale[locale_code] = [t.user for t in db_translations if t.user]
for resource in changed_resources:
resource.save()
def get_entity_updates(self, vcs_entity):
"""
Return a dict of the properties and values necessary to create
or update a database entity from a VCS entity.
"""
return {
'resource': self.resources[vcs_entity.resource.path],
'string': vcs_entity.string,
'string_plural': vcs_entity.string_plural,
'key': vcs_entity.key,
'comment': '\n'.join(vcs_entity.comments),
'order': vcs_entity.order,
'source': vcs_entity.source
}
def execute_create_db(self):
for vcs_entity in self.changes['create_db']:
entity = Entity(**self.get_entity_updates(vcs_entity))
entity.save() # We can't use bulk_create since we need a PK
for locale_code, vcs_translation in vcs_entity.translations.items():
for plural_form, string in vcs_translation.strings.items():
self.translations_to_create.append(Translation(
entity=entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=timezone.now() if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy
))
def execute_update_db(self):
for locale_code, db_entity, vcs_entity in self.changes['update_db']:
for field, value in self.get_entity_updates(vcs_entity).items():
setattr(db_entity, field, value)
if db_entity.is_dirty(check_relationship=True):
self.entities_to_update.append(db_entity)
# Update translations for the entity.
vcs_translation = vcs_entity.translations[locale_code]
db_translations = db_entity.translation_set.filter(locale__code=locale_code)
approved_translations = []
for plural_form, string in vcs_translation.strings.items():
# Check if we need to modify an existing translation or
# create a new one.
db_translation = match_attr(db_translations,
plural_form=plural_form,
string=string)
if db_translation:
if not db_translation.approved:
db_translation.approved = True
db_translation.approved_date = timezone.now()
db_translation.fuzzy = vcs_translation.fuzzy
db_translation.extra = vcs_translation.extra
if db_translation.is_dirty():
self.translations_to_update.append(db_translation)
if not db_translation.fuzzy:
approved_translations.append(db_translation)
else:
self.translations_to_create.append(Translation(
entity=db_entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=timezone.now() if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy,
extra=vcs_translation.extra
))
# Any existing translations that were not approved get unapproved.
for translation in db_translations:
if translation not in approved_translations:
translation.approved = False
translation.approved_user = None
translation.approved_date = None
if translation.is_dirty():
self.translations_to_update.append(translation)
def execute_obsolete_db(self):
(Entity.objects
.filter(pk__in=self.changes['obsolete_db'])
.update(obsolete=True))
|
bsd-3-clause
| 6,697,951,671,892,925,000
| 39.515982
| 101
| 0.575679
| false
| 4.520122
| false
| false
| false
|
sunweaver/ganetimgr
|
ganeti/utils.py
|
1
|
18377
|
import requests
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup
import json
from gevent.pool import Pool
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.contrib.auth.models import User, Group
from django.db import close_connection
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import filesizeformat
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from ganeti.models import Cluster, Instance, InstanceAction
from util.client import GanetiApiError
def memsize(value):
return filesizeformat(value * 1024 ** 2)
def disksizes(value):
return [filesizeformat(v * 1024 ** 2) for v in value]
def get_instance_data(instance, cluster, node=None):
instance.cpu_url = reverse(
'graph',
args=(cluster.slug, instance.name, 'cpu-ts')
)
instance.net_url = []
for (nic_i, link) in enumerate(instance.nic_links):
instance.net_url.append(
reverse(
'graph',
args=(
cluster.slug,
instance.name,
'net-ts',
'/eth%s' % nic_i
)
)
)
return {
'node': instance.pnode,
'name': instance.name,
'cluster': instance.cluster.slug,
'cpu': instance.cpu_url,
'network': instance.net_url,
}
def get_nodes_with_graphs(cluster_slug, nodes=None):
cluster = Cluster.objects.get(slug=cluster_slug)
instances = Instance.objects.filter(cluster=cluster)
response = []
for i in instances:
# if we have set a nodes, then we should check if the
# instance belongs to them
if not nodes:
response.append(get_instance_data(i, cluster))
else:
for node in nodes:
if i.pnode == node:
response.append(get_instance_data(i, cluster, node))
return response
def prepare_clusternodes(cluster=None):
if not cluster:
# get only enabled clusters
clusters = Cluster.objects.filter(disabled=False)
else:
clusters = Cluster.objects.filter(slug=cluster)
p = Pool(15)
nodes = []
bad_clusters = []
bad_nodes = []
def _get_nodes(cluster):
try:
for node in cluster.get_cluster_nodes():
nodes.append(node)
if node['offline'] is True:
bad_nodes.append(node['name'])
except (GanetiApiError, Exception):
cluster._client = None
bad_clusters.append(cluster)
finally:
close_connection()
p.map(_get_nodes, clusters)
return nodes, bad_clusters, bad_nodes
def generate_json(instance, user, locked_nodes):
jresp_list = []
i = instance
inst_dict = {}
if not i.admin_view_only:
inst_dict['name_href'] = "%s" % (
reverse(
'instance-detail',
kwargs={
'cluster_slug': i.cluster.slug, 'instance': i.name
}
)
)
inst_dict['name'] = i.name
if user.is_superuser or user.has_perm('ganeti.view_instances'):
inst_dict['cluster'] = i.cluster.slug
inst_dict['pnode'] = i.pnode
else:
inst_dict['cluster'] = i.cluster.description
inst_dict['clusterslug'] = i.cluster.slug
inst_dict['node_group_locked'] = i.pnode in locked_nodes
inst_dict['memory'] = memsize(i.beparams['maxmem'])
inst_dict['disk'] = ", ".join(disksizes(i.disk_sizes))
inst_dict['vcpus'] = i.beparams['vcpus']
inst_dict['ipaddress'] = [ip for ip in i.nic_ips if ip]
if not user.is_superuser and not user.has_perm('ganeti.view_instances'):
inst_dict['ipv6address'] = [ip for ip in i.ipv6s if ip]
# inst_dict['status'] = i.nic_ips[0] if i.nic_ips[0] else "-"
if i.admin_state == i.oper_state:
if i.admin_state:
inst_dict['status'] = "Running"
inst_dict['status_style'] = "success"
else:
inst_dict['status'] = "Stopped"
inst_dict['status_style'] = "important"
else:
if i.oper_state:
inst_dict['status'] = "Running"
else:
inst_dict['status'] = "Stopped"
if i.admin_state:
inst_dict['status'] = "%s, should be running" % inst_dict['status']
else:
inst_dict['status'] = "%s, should be stopped" % inst_dict['status']
inst_dict['status_style'] = "warning"
if i.status == 'ERROR_nodedown':
inst_dict['status'] = "Generic cluster error"
inst_dict['status_style'] = "important"
if i.adminlock:
inst_dict['adminlock'] = True
if i.isolate:
inst_dict['isolate'] = True
if i.needsreboot:
inst_dict['needsreboot'] = True
# When renaming disable clicking on instance for everyone
if hasattr(i, 'admin_lock'):
if i.admin_lock:
try:
del inst_dict['name_href']
except KeyError:
pass
if i.joblock:
inst_dict['locked'] = True
inst_dict['locked_reason'] = "%s" % ((i.joblock).capitalize())
if inst_dict['locked_reason'] in ['Deleting', 'Renaming']:
try:
del inst_dict['name_href']
except KeyError:
pass
if 'cdrom_image_path' in i.hvparams.keys():
if i.hvparams['cdrom_image_path'] and i.hvparams['boot_order'] == 'cdrom':
inst_dict['cdrom'] = True
inst_dict['nic_macs'] = ', '.join(i.nic_macs)
if user.is_superuser or user.has_perm('ganeti.view_instances'):
inst_dict['nic_links'] = ', '.join(i.nic_links)
inst_dict['network'] = []
for (nic_i, link) in enumerate(i.nic_links):
if i.nic_ips[nic_i] is None:
inst_dict['network'].append("%s" % (i.nic_links[nic_i]))
else:
inst_dict['network'].append(
"%s@%s" % (i.nic_ips[nic_i], i.nic_links[nic_i])
)
inst_dict['users'] = [
{
'user': user_item.username,
'email': user_item.email,
'user_href': "%s" % (
reverse(
"user-info",
kwargs={
'type': 'user',
'usergroup': user_item.username
}
)
)
} for user_item in i.users]
inst_dict['groups'] = [
{
'group': group.name,
'groupusers': [
"%s,%s" % (u.username, u.email) for u in group.userset
],
'group_href':"%s" % (
reverse(
"user-info",
kwargs={
'type': 'group',
'usergroup': group.name
}
)
)
} for group in i.groups
]
jresp_list.append(inst_dict)
return jresp_list
def generate_json_light(instance, user):
jresp_list = []
i = instance
inst_dict = {}
if not i.admin_view_only:
inst_dict['name_href'] = "%s" % (
reverse(
"instance-detail",
kwargs={
'cluster_slug': i.cluster.slug,
'instance': i.name
}
)
)
inst_dict['name'] = i.name
inst_dict['clusterslug'] = i.cluster.slug
inst_dict['memory'] = i.beparams['maxmem']
inst_dict['vcpus'] = i.beparams['vcpus']
inst_dict['disk'] = sum(i.disk_sizes)
if user.is_superuser or user.has_perm('ganeti.view_instances'):
inst_dict['users'] = [
{
'user': user_item.username
} for user_item in i.users
]
jresp_list.append(inst_dict)
return jresp_list
def clear_cluster_user_cache(username, cluster_slug):
cache.delete("user:%s:index:instances" % username)
cache.delete("cluster:%s:instances" % cluster_slug)
def notifyuseradvancedactions(
user,
cluster_slug,
instance,
action_id,
action_value,
new_operating_system
):
action_id = int(action_id)
if action_id not in [1, 2, 3]:
action = {'action': _("Not allowed action")}
return action
cluster = get_object_or_404(Cluster, slug=cluster_slug)
instance = cluster.get_instance_or_404(instance)
reinstalldestroy_req = InstanceAction.objects.create_action(
user,
instance,
cluster,
action_id,
action_value,
new_operating_system
)
fqdn = Site.objects.get_current().domain
url = "https://%s%s" % \
(
fqdn,
reverse(
"reinstall-destroy-review",
kwargs={
'application_hash': reinstalldestroy_req.activation_key,
'action_id': action_id
}
)
)
email = render_to_string(
"instances/emails/reinstall_mail.txt",
{
"instance": instance,
"user": user,
"action": reinstalldestroy_req.get_action_display(),
"action_value": reinstalldestroy_req.action_value,
"url": url,
"operating_system": reinstalldestroy_req.operating_system
}
)
if action_id == 1:
action_mail_text = _("re-installation")
if action_id == 2:
action_mail_text = _("destruction")
if action_id == 3:
action_mail_text = _("rename")
try:
send_mail(
_("%(pref)sInstance %(action)s requested: %(instance)s") % {
"pref": settings.EMAIL_SUBJECT_PREFIX,
"action": action_mail_text,
"instance": instance.name
},
email,
settings.SERVER_EMAIL,
[user.email]
)
# if anything goes wrong do nothing.
except:
# remove entry
reinstalldestroy_req.delete()
action = {'action': _("Could not send email")}
else:
action = {'action': _("Mail sent")}
return action
try:
from ganetimgr.settings import OPERATING_SYSTEMS_URLS
except ImportError:
OPERATING_SYSTEMS_URLS = False
else:
from ganetimgr.settings import OPERATING_SYSTEMS_PROVIDER, OPERATING_SYSTEMS_SSH_KEY_PARAM
try:
from ganetimgr.settings import OPERATING_SYSTEMS
except ImportError:
OPERATING_SYSTEMS = False
def discover_available_operating_systems():
operating_systems = {}
if OPERATING_SYSTEMS_URLS:
for url in OPERATING_SYSTEMS_URLS:
try:
raw_response = requests.get(url)
except ConnectionError:
# fail silently if url is unreachable
break
else:
if raw_response.ok:
soup = BeautifulSoup(raw_response.text)
extensions = {
'.tar.gz': 'tarball',
'.img': 'qemu',
'-root.dump': 'dump',
}
architectures = ['-x86_', '-amd' '-i386']
for link in soup.findAll('a'):
try:
if '.' + '.'.join(link.attrs.get('href').split('.')[-2:]) == '.tar.gz':
extension = '.tar.gz'
elif '.' + '.'.join(link.attrs.get('href').split('.')[-1:]) == '.img':
extension = '.img'
else:
extension = '.' + '.'.join(link.attrs.get('href').split('.')[-1:])
# in case of false link
except IndexError:
pass
else:
# if the file is tarball, qemu or dump then it is valid
if extension in extensions.keys() or '-root.dump' in link.attrs.get('href'):
re = requests.get(url + link.attrs.get('href') + '.dsc')
if re.ok:
name = re.text
else:
name = link.attrs.get('href')
for arch in architectures:
if arch in link.attrs.get('href'):
img_id = link.attrs.get('href').replace(extension, '').split(arch)[0]
architecture = arch
break
description = name
img_format = extensions[extension]
if link.attrs.get('href').split('-')[0] == 'nomount':
operating_systems.update({
img_id: {
'description': description,
'provider': OPERATING_SYSTEMS_PROVIDER,
'ssh_key_param': OPERATING_SYSTEMS_SSH_KEY_PARAM,
'arch': architecture,
'osparams': {
'img_id': img_id,
'img_format': img_format,
'img_nomount': 'yes',
}
}
})
else:
operating_systems.update({
img_id: {
'description': description,
'provider': OPERATING_SYSTEMS_PROVIDER,
'ssh_key_param': OPERATING_SYSTEMS_SSH_KEY_PARAM,
'arch': architecture,
'osparams': {
'img_id': img_id,
'img_format': img_format,
}
}
})
return operating_systems
else:
return {}
def get_operating_systems_dict():
if OPERATING_SYSTEMS:
return OPERATING_SYSTEMS
else:
return {}
def operating_systems():
# check if results exist in cache
response = cache.get('operating_systems')
# if no items in cache
if not response:
discovery = discover_available_operating_systems()
dictionary = get_operating_systems_dict()
operating_systems = sorted(dict(discovery.items() + dictionary.items()).items())
# move 'none' on the top of the list for ui purposes.
for os in operating_systems:
if os[0] == 'none':
operating_systems.remove(os)
operating_systems.insert(0, os)
response = json.dumps({'status': 'success', 'operating_systems': operating_systems})
# add results to cache for one day
cache.set('operating_systems', response, timeout=86400)
return response
# find os info given its img_id
def get_os_details(img_id):
oss = json.loads(operating_systems()).get('operating_systems')
for os in oss:
if os[0] == img_id:
return os[1]
return False
def refresh_cluster_cache(cluster, instance):
cluster.force_cluster_cache_refresh(instance)
for u in User.objects.all():
cache.delete("user:%s:index:instances" % u.username)
nodes, bc, bn = prepare_clusternodes()
cache.set('allclusternodes', nodes, 90)
cache.set('badclusters', bc, 90)
cache.set('badnodes', bn, 90)
def clusterdetails_generator(slug):
cluster_profile = {}
cluster_profile['slug'] = slug
cluster = Cluster.objects.get(slug=slug)
cluster_profile['description'] = cluster.description
cluster_profile['hostname'] = cluster.hostname
# We want to fetch info about the cluster per se, networks,
# nodes and nodegroups plus a really brief instances outline.
# Nodegroups
nodegroups = cluster.get_node_group_stack()
nodes = cluster.get_cluster_nodes()
# Networks
networks = cluster.get_networks()
# Instances later on...
cluster_profile['clusterinfo'] = cluster.get_cluster_info()
cluster_profile['clusterinfo']['mtime'] = str(cluster_profile['clusterinfo']['mtime'])
cluster_profile['clusterinfo']['ctime'] = str(cluster_profile['clusterinfo']['ctime'])
cluster_profile['nodegroups'] = nodegroups
cluster_profile['nodes'] = nodes
cluster_profile['networks'] = networks
return cluster_profile
def prepare_cluster_node_group_stack(cluster):
cluster_info = cluster.get_cluster_info()
len_instances = len(cluster.get_cluster_instances())
res = {}
res['slug'] = cluster.slug
res['cluster_id'] = cluster.pk
res['num_inst'] = len_instances
res['description'] = cluster.description
res['disk_templates'] = cluster_info['ipolicy']['disk-templates']
res['node_groups'] = cluster.get_node_group_stack()
return res
def prepare_tags(taglist):
tags = []
for i in taglist:
#User
if i.startswith('u'):
tags.append(
"%s:user:%s" % (
settings.GANETI_TAG_PREFIX, User.objects.get(
pk=i.replace('u_', '')
).username
)
)
#Group
if i.startswith('g'):
tags.append("%s:group:%s" % (
settings.GANETI_TAG_PREFIX,
Group.objects.get(pk=i.replace('g_','')).name
))
return list(set(tags))
|
gpl-3.0
| -5,637,533,463,556,239,000
| 34.545455
| 109
| 0.502313
| false
| 4.262816
| false
| false
| false
|
saydulk/newfies-dialer
|
newfies/mod_utils/templatetags/utils_tags.py
|
1
|
2429
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.utils.safestring import mark_safe
from django.template.defaultfilters import register
from django_lets_go.common_functions import word_capital
import re
from string import Template
def striphtml(data):
p = re.compile(r'<.*?>')
return mark_safe(p.sub('', data))
@register.simple_tag(name='field_html_code')
def field_html_code(field, main_class='col-md-6 col-xs-8', flag_error_text=True, flag_help_text=True):
"""
Usage: {% field_html_code field 'col-md-6 col-xs-8' %}
"""
tmp_div = Template("""
<div class="$main_class">
<div class="form-group $has_error">
<label class="control-label" for="$field_auto_id">$field_label</label>
$field
$field_errors
$field_help_text
</div>
</div>
""")
has_error = 'has-error' if field.errors else ''
field_errors = ''
if field.errors and flag_error_text:
field_errors = '<span class="help-block">%s</span>\n' % striphtml(str(field.errors)).capitalize()
field_help_text = ''
if flag_help_text:
field_help_text = '<span class="help-block">%s</span>\n' % (field.help_text.capitalize())
htmlcell = tmp_div.substitute(
main_class=main_class, has_error=has_error,
field_auto_id=field.auto_id, field_label=word_capital(field.label),
field=str(field).decode("utf-8"), field_errors=field_errors,
field_help_text=field_help_text)
return mark_safe(htmlcell)
@register.filter(name='check_url_for_template_width')
def check_url_for_template_width(current_url):
""""""
full_width_on_requested_path = [
'/dashboard/', '/sms_dashboard/', '/campaign/', '/sms_campaign/',
'user_detail_change', '/audio/', '/user_notification/',
]
if current_url == '/':
return True
else:
current_url = str(current_url)
for path in full_width_on_requested_path:
if path in current_url:
return True
return False
|
mpl-2.0
| 6,704,997,681,144,459,000
| 31.824324
| 105
| 0.625772
| false
| 3.383008
| false
| false
| false
|
soedinglab/hh-suite
|
scripts/a3m.py
|
1
|
8020
|
#!/usr/bin/env python
class A3MFormatError(Exception):
def __init__(self, value):
self.value = "ERROR: "+value
def __str__(self):
return repr(self.value)
class A3M_Container:
RESIDUES = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
VALID_MATCH_STATES = set(RESIDUES)
VALID_INSERTION_STATES = set(RESIDUES.lower())
VALID_GAP_STATES = set("-.")
VALID_SS_CONF_STATES = set("0123456789")
VALID_SS_STATES = set("ECH")
VALID_DSSP_STATES = set("CHBEGITS-")
def __init__(self):
self.header = None
self.annotations = dict()
self.consensus = None
self.sequences = []
self.nr_match_states = None
@property
def number_sequences(self):
"""get the current number of protein sequences"""
return len(self.sequences)
def check_and_add_sequence(self, header, sequence):
try:
if (not self.check_and_add_annotation(header, sequence) and
not self.check_and_add_consensus(header, sequence)):
self.check_sequence(sequence)
self.sequences.append((header, sequence))
except A3MFormatError as e:
raise e
def check_and_add_consensus(self, header, sequence):
header_name = header[1:].split()[0]
if header_name.endswith("_consensus"):
if self.consensus:
raise A3MFormatError("Multiple definitions of consensus!")
else:
self.check_sequence(sequence)
self.consensus = (header, sequence)
return True
else:
return False
def check_and_add_annotation(self, header, sequence):
annotation_classes = [
("ss_conf", self.check_ss_conf),
("ss_pred", self.check_ss_pred),
("ss_dssp", self.check_dssp)
]
for (annotation_name, check) in annotation_classes:
if(header[1:].startswith(annotation_name)):
if(annotation_name in self.annotations):
raise A3MFormatError(
"Multiple definitions of {}!".format(annotation_name)
)
elif check(sequence):
self.annotations[annotation_name] = sequence
return True
return False
def check_match_states(self, match_states):
if not self.nr_match_states:
self.nr_match_states = match_states
if match_states == 0:
raise A3MFormatError("Sequence with zero match states!")
elif match_states != self.nr_match_states:
raise A3MFormatError(
("Sequence with diverging number "
"of match states ({} vs. {})!").format(
match_states,
self.nr_match_states
)
)
def check_ss_conf(self, sequence):
count_match_states = sum((c in self.VALID_SS_CONF_STATES
or c in self.VALID_GAP_STATES)
for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_SS_CONF_STATES
invalid_states -= self.VALID_GAP_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in predicted "
"secondary structure confidence!").format(invalid_states))
else:
return True
def check_ss_pred(self, sequence):
count_match_states = sum((c in self.VALID_SS_STATES
or c in self.VALID_GAP_STATES)
for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_SS_STATES
invalid_states -= self.VALID_GAP_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in predicted "
"secondary structure!").format(invalid_states))
else:
return True
def check_dssp(self, sequence):
count_match_states = sum(
(c in self.VALID_DSSP_STATES) for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_DSSP_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in "
"dssp annotation!").format(invalid_states))
else:
return True
def check_sequence(self, sequence):
count_match_states = sum((c in self.VALID_MATCH_STATES
or c in self.VALID_GAP_STATES)
for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_MATCH_STATES
invalid_states -= self.VALID_GAP_STATES
invalid_states -= self.VALID_INSERTION_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in "
"protein sequence!").format(invalid_states))
else:
return True
def get_sub_sequence(self, sequence, limits):
sub_sequence = []
for (start, end) in limits:
start_pos = 0
pos = -1
for i in range(len(sequence)):
if (sequence[i] in self.VALID_MATCH_STATES or
sequence[i] in self.VALID_GAP_STATES):
pos += 1
if pos + 1 == start:
start_pos = i
break
end_pos = 0
pos = -1
for i in range(len(sequence)):
if (sequence[i] in self.VALID_MATCH_STATES or
sequence[i] in self.VALID_GAP_STATES):
pos += 1
if pos + 1 == end:
end_pos = i
break
sub_sequence.append(sequence[start_pos:end_pos+1])
return "".join(sub_sequence)
def __str__(self):
content = []
if self.header:
content.append(self.header)
if self.consensus:
content.append(self.consensus[0])
content.append(self.consensus[1])
for (header, sequence) in self.sequences:
content.append(header)
content.append(sequence)
return "\n".join(content)
def split_a3m(self, limits):
new_a3m = A3M_Container()
if self.consensus:
new_consensus_sequence = self.get_sub_sequence(self.consensus[1],
limits)
new_a3m.consensus = (self.consensus[0], new_consensus_sequence)
for (header, sequence) in self.sequences:
new_sequence = self.get_sub_sequence(sequence, limits)
new_a3m.sequences.append((header, new_sequence))
return new_a3m
def read_a3m(self, fh):
lines = fh.readlines()
self.read_a3m_from_lines(lines)
fh.close()
def read_a3m_from_lines(self, lines):
sequence_header = None
sequence = []
is_first_line = True
for line in lines:
line = line.strip()
if len(line) == 0:
continue
elif line[0] == "#":
if is_first_line:
self.header = line
elif line[0] == ">":
if sequence_header:
self.check_and_add_sequence(sequence_header,
"".join(sequence))
sequence = []
sequence_header = line.rstrip()
else:
sequence.append(line.strip().strip("\x00"))
is_first_line = False
if sequence_header:
self.check_and_add_sequence(sequence_header, "".join(sequence))
|
gpl-3.0
| 6,906,977,804,485,290,000
| 32.416667
| 77
| 0.522818
| false
| 4.261424
| false
| false
| false
|
meteotest/hurray
|
hurray/server/platform/posix.py
|
1
|
1906
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
# Modifications copyright 2016 Meteotest
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Posix implementations of platform-specific functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import fcntl
import os
from hurray.server.platform import interface
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class Waker(interface.Waker):
def __init__(self):
r, w = os.pipe()
_set_nonblocking(r)
_set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self.reader = os.fdopen(r, "rb", 0)
self.writer = os.fdopen(w, "wb", 0)
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.write(b"x")
except IOError:
pass
def consume(self):
try:
while True:
result = self.reader.read()
if not result:
break
except IOError:
pass
def close(self):
self.reader.close()
self.writer.close()
|
bsd-3-clause
| 186,925,933,638,133,950
| 25.84507
| 80
| 0.640084
| false
| 3.708171
| false
| false
| false
|
kartikshah1/Test
|
discussion_forum/views.py
|
1
|
29002
|
"""
Views for Discussion Forum
Keeping activity for add operations only. Can be extended easily if required
TODO
- introduce user specific variable "follow" for thread
Whether user is following thread or not ?
- introduce 'liked', variable for Thread/Comment/Reply
- handle anonymity while serializing thread/comment/reply: instructor \
can see the User
- send notification to thread subscriber about new content
"""
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.shortcuts import get_object_or_404, render
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action, link
from rest_framework.response import Response
from discussion_forum import models
from discussion_forum import permissions
from discussion_forum import serializers
from courseware.models import Concept
ORDER_CHOICES = ['recent', 'earlier', 'popularity']
PAGINATED_BY = 5
@login_required
def forum(request):
"""
Serves forum.html template
"""
context = {"request": request}
return render(request, "discussion_forum/forum.html", context)
@login_required
def forum_admin(request):
"""
Serves forum.html template
"""
context = {"request": request}
return render(request, "discussion_forum/admin.html", context)
def apply_content_filters(order='recent', queryset=None):
""" Apply sorting_order, disable and pinned filter """
queryset = queryset.filter(disabled=False)
if order == 'earlier':
queryset = queryset.order_by('-pinned', 'created')
elif order == 'popularity':
queryset = queryset.order_by('-pinned', '-popularity', '-created')
else:
#default order recent
queryset = queryset.order_by('-pinned', '-created')
return queryset
def paginated_serializer(request=None, queryset=None, serializer=None):
"""
Returns the serializer containing objects corresponding to paginated page
"""
paginator = Paginator(queryset, PAGINATED_BY)
page = request.QUERY_PARAMS.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
items = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
items = paginator.page(paginator.num_pages)
serializer_context = {'request': request}
return serializer(items, context=serializer_context)
def get_threads(forum=None, tag=None, request=None, search_term=None):
"""
Return threads according to the specifications.
Returns HTTP_400_BAD_REQUEST if any error occurs.
"""
if tag:
queryset = models.Thread.objects.filter(tags__pk=tag.pk)
else:
queryset = models.Thread.objects.filter(forum=forum)
if search_term:
queryset = queryset.filter(content__contains=search_term)
order = request.QUERY_PARAMS.get('order')
queryset = apply_content_filters(order=order, queryset=queryset)
serializer = paginated_serializer(
request=request,
queryset=queryset,
serializer=serializers.PaginatedThreadSerializer
)
response = serializer.data
for result in response["results"]:
thread = models.Thread.objects.get(pk=result["id"])
result["subscribed"] = thread.subscription.is_subscribed(request.user)
return Response(response)
class DiscussionForumViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
Methods for this ViewSet. Only retrieve and update are allowed
"""
model = models.DiscussionForum
serializer_class = serializers.DiscussionForumSettingSerializer
permission_classes = [permissions.IsForumAdminOrReadOnly]
paginate_by = 2
def retrieve(self, request, pk=None):
""" Returns discussion_forum object along with tags """
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
print "RETRIEVE CALLED"
serializer = serializers.DiscussionForumSerializer(forum)
return Response(serializer.data)
@action(methods=['POST'], permission_classes=(permissions.IsForumAdmin, ))
def add_tag(self, request, pk=None):
"""
Add tag to this DiscussionForum
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
serializer = serializers.ForumTagSerializer(data=request.DATA)
if serializer.is_valid():
tag = models.Tag(
forum=forum,
title=serializer.data['title'],
tag_name=serializer.data['tag_name'],
auto_generated=False
)
tag.save()
return Response(serializers.TagSerializer(tag).data)
else:
content = {"detail": "tag-name should be unique across forum-tags"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@link(permission_classes=([permissions.IsForumUser]))
def activity(self, request, pk):
"""
Returns activities of particular discussion_forum
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
activities = models.Activity.objects.filter(forum=forum)
activities = activities.order_by('-happened_at')
serializer = serializers.ActivitySerializer(activities, many=True)
return Response(serializer.data)
@link(permission_classes=([permissions.IsForumUser]))
def user_setting(self, request, pk):
"""
Returns the user_setting for currently loggedIn user
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
setting = get_object_or_404(
models.UserSetting,
forum=forum,
user=request.user
)
serializer = serializers.UserSettingSerializer(setting)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def threads(self, request, pk):
"""
Return list of threads in a particular order
"""
print "THREAD CLAAED"
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
return get_threads(forum=forum, tag=None, request=request)
@link(permission_classes=((permissions.IsForumUser, )))
def search_threads(self, request, pk):
"""
Return list of threads in a particular order
"""
search_term = request.GET.get('search', None)
forum = models.DiscussionForum.objects.get(pk=pk, Content_Forum__content__contains=search_term)
#forum = get_object_or_404(models.DiscussionForum, pk=pk, Content_Forum__content__contains=search_term)
self.check_object_permissions(request, forum)
serializer = serializers.DiscussionForumSerializer(forum)
return Response(serializer.data)
return get_threads(forum=forum,
tag=None,
request=request,
search_term=search_term)
@action(methods=['POST'], permission_classes=((permissions.IsForumUser,)))
def add_thread(self, request, pk=None):
"""
Add a new post to the forum
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
serializer = serializers.ForumThreadSerializer(data=request.DATA)
try:
user_setting = models.UserSetting.objects.get(
forum=forum,
user=request.user
)
except:
content = {'detail': 'Not enough permissions'}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
if serializer.is_valid():
thread = models.Thread(
forum=forum,
author=request.user,
author_badge=user_setting.badge,
title=serializer.data['title'],
content=serializer.data['content'],
anonymous=serializer.data['anonymous'],
)
thread.save()
forum.thread_count += 1
forum.save()
subscribe = serializer.data['subscribe']
if subscribe:
thread.subscription.subscribe(request.user)
models.Activity.activity(
forum=forum,
user=request.user,
operation=models.ActivityOperation.add,
object_type=models.ActivityObject.thread,
object_id=thread.pk
)
serializer = serializers.ThreadSerializer(thread)
return Response(serializer.data)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@link(permission_classes=((permissions.IsForumModerator, )))
def review_content(self, request, pk=None):
"""
Returns list of disabled content to user
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
content_set = models.Content.objects.filter(forum=forum)
content_set = content_set.filter(
Q(spam_count__gt=forum.review_threshold) | Q(disabled=True))
serializer = paginated_serializer(
request=request,
queryset=content_set,
serializer=serializers.PaginatedContentSerializer
)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumAdmin, )))
def users(self, request, pk=None):
"""
Retuns list of all moderator's UserSetting object
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
queryset = models.UserSetting.objects.filter(forum=forum)
utype = request.QUERY_PARAMS.get('type')
if utype == "moderators":
queryset = queryset.filter(
Q(super_user=True) | Q(moderator=True)
)
elif utype == "search":
search_str = request.QUERY_PARAMS.get('query')
queryset = queryset.filter(user__username__icontains=search_str)
serializer = paginated_serializer(
request=request,
queryset=queryset,
serializer=serializers.PaginatedUserSettingSerializer
)
return Response(serializer.data)
class TagViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Tag
serializer_class = serializers.TagSerializer
permission_classes = [permissions.IsForumAdminOrReadOnly]
# Modified IsForumAdminOrReadOnly permission to restrict admin from \
# deleting auto_generated tags
@link(permission_classes=((permissions.IsForumUser, )))
def threads(self, request, pk=None):
""" Return list of threads in a particular order """
tag = get_object_or_404(models.Tag, pk=pk)
self.check_object_permissions(request, tag)
return get_threads(forum=tag.forum, tag=tag, request=request)
class UserSettingViewSet(
mixins.UpdateModelMixin,
#mixins.DestroyModelMixin,: Automatically deleted on dropping course
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.UserSetting
serializer_class = serializers.UserSettingSerializer
permission_classes = [permissions.IsOwnerOrModeratorReadOnly]
@action(methods=['POST'],
permission_classes=((permissions.IsForumModerator, )))
def update_badge(self, request, pk=None):
"""
Updates badge for a Current User. Only course moderator can update \
badge
"""
user_setting = get_object_or_404(models.UserSetting, pk=pk)
self.check_object_permissions(request, user_setting)
# Checking for current user's permission
try:
current_user_setting = models.UserSetting.objects.get(
forum=user_setting.forum,
user=request.user)
except:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
if not current_user_setting.moderator:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
serializer = serializers.BadgeSerializer(data=request.DATA)
if serializer.is_valid():
user_setting.badge = serializer.data['badge']
user_setting.save()
serializer = serializers.UserSettingSerializer(user_setting)
return Response(serializer.data)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'],
permission_classes=((permissions.IsForumAdmin, )))
def update_moderation_permission(self, request, pk=None):
"""
Update moderator value of UserSetting for this object. Only \
allowed for Super Usersself.
"""
user_setting = get_object_or_404(models.UserSetting, pk=pk)
self.check_object_permissions(request, user_setting)
# Checking for current user's permission
try:
current_user_setting = models.UserSetting.objects.get(
forum=user_setting.forum,
user=request.user)
except:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
if not current_user_setting.super_user:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
serializer = serializers.BooleanSerializer(data=request.DATA)
if serializer.is_valid():
user_setting.moderator = serializer.data['mark']
user_setting.save()
serializer = serializers.UserSettingSerializer(user_setting)
return Response(serializer.data)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
class ContentViewSet(mixins.DestroyModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Content
serializer_class = serializers.ContentSerializer
permission_classes = [permissions.IsOwnerOrModerator]
def destroy(self, request, pk=None):
"""
Downcast to appropriate class member and delete that content
"""
try:
content = models.Content.objects.get_subclass(id=pk)
self.check_object_permissions(request, content)
content.delete()
response = {"detail": "Content deleted."}
return Response(response, status=status.HTTP_204_NO_CONTENT)
except:
response = {"detail": "invalid delete request"}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
@link(permission_classes=((permissions.IsForumUser, )))
def upvote(self, request, pk=None):
"""
Do upvote for content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.vote_up(request.user)
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def downvote(self, request, pk=None):
"""
Do downvote for content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.vote_down(request.user)
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def mark_spam(self, request, pk=None):
"""
Mark content as spam. If spam count exceeds threshold then content \
gets disabled
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.mark_spam(request.user)
if content.disabled:
return Response({"detail": "Content is disabled."})
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def pin_content(self, request, pk=None):
"""
Pin the content
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.pinned = not content.pinned
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def disable(self, request, pk=None):
"""
Disable the content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.disabled = True
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def enable(self, request, pk=None):
"""
Disable the content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.disabled = False
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def reset_spam_flags(self, request, pk=None):
"""
Reset spam_count and spammers and enable the content
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.reset_spam_flags()
content.disabled = False
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
class ThreadViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Thread
serializer_class = serializers.ThreadSerializer
permission_classes = [permissions.IsOwnerOrModeratorOrReadOnly]
def retrieve(self, request, pk=None):
"""
Send a single thread instance. Perform make_hit operation.
If thread is disabled then it sends HTTP_404_NOT_FOUND
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
thread.make_hit()
if thread.disabled:
content = {'detail': 'Content is disabled'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
else:
return Response(serializers.ThreadSerializer(thread).data)
@link(permission_classes=((permissions.IsForumUser, )))
def comments(self, request, pk=None):
"""
Returns list of comments
"""
web_request = request._request
if 'order' in web_request.GET.keys():
order = web_request.GET['order']
else:
order = 'earlier'
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
comments = models.Comment.objects.filter(thread=thread)
comments = apply_content_filters(queryset=comments, order=order)
serializer = paginated_serializer(
request=request,
queryset=comments,
serializer=serializers.PaginatedCommentSerializer
)
if serializer.data["previous"] is None:
thread.make_hit()
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def get_tag_list(self, request, pk=None):
## Get all concept Names for this course
print "PK=", pk
queryset = Concept.objects.filter(is_published=True).filter(group__course_id=pk)
data = {}
print queryset.values()
data['results'] = queryset.values("id", "title")
return Response(data)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def add_comment(self, request, pk=None):
"""
Add a new comment for to the Thread
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
serializer = serializers.ForumContentSerializer(data=request.DATA)
try:
user_setting = models.UserSetting.objects.get(
forum=thread.forum,
user=request.user
)
except:
content = {'detail': 'Not enough permissions'}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
if serializer.is_valid():
comment = models.Comment(
thread=thread,
forum=thread.forum,
author=request.user,
author_badge=user_setting.badge,
content=serializer.data['content'],
anonymous=serializer.data['anonymous']
)
comment.save()
thread.children_count += 1
thread.save()
models.Activity.activity(
forum=thread.forum,
user=request.user,
operation=models.ActivityOperation.add,
object_type=models.ActivityObject.comment,
object_id=comment.pk
)
return Response(serializers.CommentSerializer(comment).data)
else:
content = {"detail": "inconsistent data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def add_tag(self, request, pk=None):
"""
Adds a new tag to this thread
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
serializer = serializers.IntegerSerializer(data=request.DATA)
if serializer.is_valid():
tag_id = serializer.data['value']
tag = get_object_or_404(models.Tag, pk=tag_id)
if tag.forum == thread.forum:
thread.tags.add(tag)
serializer = serializers.TagSerializer(
thread.tags.all(),
many=True
)
return Response(serializer.data)
content = {"detail": "un-identified tag"}
return Response(content, status.HTTP_400_BAD_REQUEST)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def remove_tag(self, request, pk=None):
"""
Removes tag from this thread
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
serializer = serializers.IntegerSerializer(data=request.DATA)
if serializer.is_valid():
tag_id = serializer.data['value']
tag = get_object_or_404(models.Tag, pk=tag_id)
if tag.forum == thread.forum:
thread.tags.remove(tag)
serializer = serializers.TagSerializer(
thread.tags.all(),
many=True
)
return Response(serializer.data)
content = {"detail": "un-identified tag"}
return Response(content, status.HTTP_400_BAD_REQUEST)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@link(permission_classes=((permissions.IsForumUser, )))
def subscribe(self, request, pk=None):
"""
Subscribe to this thread notifications
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
thread.subscription.subscribe(request.user)
response = {"success": "your subscribed to thread notifications"}
response["subscribed"] = True
return Response(response)
@link(permission_classes=((permissions.IsForumUser, )))
def unsubscribe(self, request, pk=None):
"""
Subscribe to this thread notifications
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
thread.subscription.unsubscribe(request.user)
response = {"success": "you will no longer recieve notifications"}
response["subscribed"] = False
return Response(response)
class CommentViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Comment
serializer_class = serializers.CommentSerializer
permission_classes = [permissions.IsOwnerOrModeratorOrReadOnly]
@link(permission_classes=((permissions.IsForumUser, )))
def replies(self, request, pk=None):
"""
Returns list of replies in discussion_forum
"""
web_request = request._request
if 'order' in web_request.GET.keys():
order = web_request.GET['order']
else:
order = 'earlier'
comment = get_object_or_404(models.Comment, pk=pk)
self.check_object_permissions(request, comment)
replies = models.Reply.objects.filter(comment=comment)
replies = apply_content_filters(queryset=replies, order=order)
serializer = paginated_serializer(
request=request,
queryset=replies,
serializer=serializers.PaginatedReplySerializer
)
return Response(serializer.data)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def add_reply(self, request, pk=None):
"""
Add a new reply for to the comment
"""
comment = get_object_or_404(models.Comment, pk=pk)
self.check_object_permissions(request, comment)
serializer = serializers.ForumContentSerializer(data=request.DATA)
try:
user_setting = models.UserSetting.objects.get(
forum=comment.forum,
user=request.user
)
except:
content = {'detail': 'Not enough permissions'}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
if serializer.is_valid():
reply = models.Reply(
thread=comment.thread,
comment=comment,
forum=comment.forum,
author=request.user,
author_badge=user_setting.badge,
content=serializer.data['content'],
anonymous=serializer.data['anonymous']
)
reply.save()
comment.children_count += 1
comment.save()
models.Activity.activity(
forum=comment.forum,
user=request.user,
operation=models.ActivityOperation.add,
object_type=models.ActivityObject.reply,
object_id=reply.pk
)
return Response(serializers.ReplySerializer(reply).data)
else:
content = {"detail": "inconsistent data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
class ReplyViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Reply ViewSet.
Allowed methods are retrieve, content update and delete
"""
model = models.Reply
serializer_class = serializers.ReplySerializer
permission_classes = [permissions.IsOwnerOrModeratorOrReadOnly]
|
mit
| -2,295,580,879,099,539,200
| 36.373711
| 111
| 0.623716
| false
| 4.400243
| false
| false
| false
|
gobins/python-madclient
|
madclient/openstack/common/apiclient/base.py
|
1
|
17430
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
########################################################################
#
# THIS MODULE IS DEPRECATED
#
# Please refer to
# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
# the discussion leading to this deprecation.
#
# We recommend checking out the python-openstacksdk project
# (https://launchpad.net/python-openstacksdk) instead.
#
########################################################################
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from oslo_utils import strutils
import six
from six.moves.urllib import parse
from openstack.common._i18n import _
from openstack.common.apiclient import exceptions
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in six.iteritems(kwargs.copy()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
|
apache-2.0
| -13,708,069,029,369,212
| 31.763158
| 79
| 0.572117
| false
| 4.176851
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/language_constant_service/transports/grpc.py
|
1
|
10459
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import language_constant
from google.ads.googleads.v8.services.types import language_constant_service
from .base import LanguageConstantServiceTransport, DEFAULT_CLIENT_INFO
class LanguageConstantServiceGrpcTransport(LanguageConstantServiceTransport):
"""gRPC backend transport for LanguageConstantService.
Service to fetch language constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_language_constant(
self,
) -> Callable[
[language_constant_service.GetLanguageConstantRequest],
language_constant.LanguageConstant,
]:
r"""Return a callable for the get language constant method over gRPC.
Returns the requested language constant.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetLanguageConstantRequest],
~.LanguageConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_language_constant" not in self._stubs:
self._stubs[
"get_language_constant"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.LanguageConstantService/GetLanguageConstant",
request_serializer=language_constant_service.GetLanguageConstantRequest.serialize,
response_deserializer=language_constant.LanguageConstant.deserialize,
)
return self._stubs["get_language_constant"]
__all__ = ("LanguageConstantServiceGrpcTransport",)
|
apache-2.0
| -6,344,020,487,974,135,000
| 41.173387
| 98
| 0.607611
| false
| 4.802112
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.