repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
linrio/WhetherOrNotMe
|
testme.py
|
1
|
2028
|
# -*- coding utf-8 -*-
import cv2
import os
import numpy as np
from sklearn.model_selection import train_test_split
import random
import tensorflow as tf
def read_data(img_path, image_h = 64, image_w = 64):
image_data = []
label_data = []
image = cv2.imread(img_path)
#cv2.namedWindow("Image")
#cv2.imshow("Image",image)
#cv2.waitKey(0)
h,w,_ = image.shape
longest_edge = max(h,w)
top, bottom, left, right = (0, 0, 0, 0)
dh,dw = (0,0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
image_pad = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
image_data = np.array(image_data)
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05,
random_state=random.randint(0, 100))
X = tf.placeholder(tf.float32,[None, 64, 64, 3])
Y = tf.placeholder(tf.float32, [None, 2])
return Y
#img_path = '4833.jpg'
#print(read_data(img_path))
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.100, 0.200], x_data) + 0.300
b = tf.Variable(tf.zeros([1]), name='B')
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0), name='W')
y = tf.add(tf.matmul(W, x_data, name='MatMul'), b ,name='add')
loss = tf.reduce_mean(tf.square(tf.subtract(y, y_data, name='Sub'), name='Square'), name='ReduceMean')
optimizer = tf.train.GradientDescentOptimizer(0.001, name='Optimizer')
train = optimizer.minimize(loss, name='minimize')
summaries = [tf.summary.histogram('W',W), tf.summary.histogram('b', b), tf.summary.scalar('loss', loss)]
summary_op = tf.summary.merge(summaries)
print(summary_op)
|
apache-2.0
| 568,228,649,818,764,700
| 33
| 105
| 0.594675
| false
| 2.909613
| false
| false
| false
|
hylandnp/CS7012_DISTRIBUTED_PROJECT
|
reducer.py
|
1
|
7038
|
#Listen and respond to SNMP GET/GETNEXT queries
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp, udp6, unix
from pyasn1.codec.ber import encoder, decoder
from pysnmp.proto import api
import time, bisect
from src.map_reduce import Mapper, Reducer, Shuffler
from collections import defaultdict
import word_count
import thread
import socket
import json
mapper_1 = "10.0.0.1"
mapper_2 = "10.0.0.2"
reducer = "10.0.0.3"
manager = "10.0.0.4"
class SysDescr:
name = (1,3,6,1,2,1,1,1,0)
def __eq__(self, other): return self.name == other
def __ne__(self, other): return self.name != other
def __lt__(self, other): return self.name < other
def __le__(self, other): return self.name <= other
def __gt__(self, other): return self.name > other
def __ge__(self, other): return self.name >= other
def __call__(self, protoVer):
test = "it it it it ti ti ti ti"
ans = self.word_count(test.strip().split(" "))
# print(str(ans).strip('[]'))
return api.protoModules[protoVer].OctetString(
'Job finished Captain'
)
def group_by_word(self, words):
result = defaultdict(list)
for (word, c) in words:
result[word].append(c)
return result
def map_word(self, word):
return word, 1
def reduce_count(self, word, sequence):
return word, sum(sequence)
def word_count(self, document):
self.mapper = Mapper(self.map_word, document)
self.shuffler = Shuffler(self.group_by_word, self.mapper.run())
self.reducer = Reducer(self.reduce_count, self.shuffler.run().iteritems())
return self.reducer.run()
class Uptime:
name = (1,3,6,1,2,1,1,3,0)
birthday = time.time()
def __eq__(self, other): return self.name == other
def __ne__(self, other): return self.name != other
def __lt__(self, other): return self.name < other
def __le__(self, other): return self.name <= other
def __gt__(self, other): return self.name > other
def __ge__(self, other): return self.name >= other
def __call__(self, protoVer):
return api.protoModules[protoVer].TimeTicks(
(time.time()-self.birthday)*100
)
mibInstr = (
SysDescr(), Uptime(), # sorted by object name
)
mibInstrIdx = {}
for mibVar in mibInstr:
mibInstrIdx[mibVar.name] = mibVar
def cbFun(transportDispatcher, transportDomain, transportAddress, wholeMsg):
while wholeMsg:
msgVer = api.decodeMessageVersion(wholeMsg)
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
print('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
rspMsg = pMod.apiMessage.getResponse(reqMsg)
rspPDU = pMod.apiMessage.getPDU(rspMsg)
reqPDU = pMod.apiMessage.getPDU(reqMsg)
varBinds = []; pendingErrors = []
errorIndex = 0
# GETNEXT PDU
if reqPDU.isSameTypeWith(pMod.GetNextRequestPDU()):
# Produce response var-binds
for oid, val in pMod.apiPDU.getVarBinds(reqPDU):
errorIndex = errorIndex + 1
# Search next OID to report
nextIdx = bisect.bisect(mibInstr, oid)
if nextIdx == len(mibInstr):
# Out of MIB
varBinds.append((oid, val))
pendingErrors.append(
(pMod.apiPDU.setEndOfMibError, errorIndex)
)
else:
# Report value if OID is found
varBinds.append(
(mibInstr[nextIdx].name, mibInstr[nextIdx](msgVer))
)
elif reqPDU.isSameTypeWith(pMod.GetRequestPDU()):
for oid, val in pMod.apiPDU.getVarBinds(reqPDU):
if oid in mibInstrIdx:
varBinds.append((oid, mibInstrIdx[oid](msgVer)))
else:
# No such instance
varBinds.append((oid, val))
pendingErrors.append(
(pMod.apiPDU.setNoSuchInstanceError, errorIndex)
)
break
else:
# Report unsupported request type
pMod.apiPDU.setErrorStatus(rspPDU, 'genErr')
pMod.apiPDU.setVarBinds(rspPDU, varBinds)
# Commit possible error indices to response PDU
for f, i in pendingErrors:
f(rspPDU, i)
transportDispatcher.sendMessage(
encoder.encode(rspMsg), transportDomain, transportAddress
)
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode(('10.0.0.3', 1161))
)
## Local domain socket
#transportDispatcher.registerTransport(
# unix.domainName, unix.UnixSocketTransport().openServerMode('/tmp/snmp-agent')
#)
IP = "10.0.0.3"
port = 1162
def listen_for_data():
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.bind((IP, port))
while 1:
data1_recv = ""
data2_recv = ""
data, addr = sock.recvfrom(8192)
addr1 = addr
try:
while(data):
if addr == addr1:
data1_recv = data1_recv + data
print "get data from 1"
else:
data2_recv = data2_recv + data
print "get data from 2"
sock.settimeout(3)
data, addr = sock.recvfrom(8192)
except socket.timeout:
sock.close()
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.bind((IP, port))
print "reducer got everything"
data1_dict = json.loads(data1_recv)
data2_dict = json.loads(data2_recv)
data_result = data1_dict.copy()
for key in data2_dict.keys():
if key in data_result:
data_result[key] = data_result[key] + data2_dict[key]
else:
data_result[key] = data2_dict[key]
reducer = Reducer(word_count.reduce_count, data_result.iteritems())
result = reducer.run()
print result
file_out = open("result.txt","w")
for word in result:
file_out.write(str(word) + "\n")
file_out.close()
def listen_for_snmp():
transportDispatcher.jobStarted(1)
try:
# Dispatcher will never finish as job#1 never reaches zero
transportDispatcher.runDispatcher()
except:
transportDispatcher.closeDispatcher()
raise
try:
thread.start_new_thread(listen_for_data,())
thread.start_new_thread(listen_for_snmp,())
except:
raise
print "Error to start thread"
while 1:
pass
|
mit
| 280,178,073,929,246,180
| 31.583333
| 82
| 0.583262
| false
| 3.680962
| false
| false
| false
|
okanokumus/Programming
|
Python/IntroductionPython/variable_types.py
|
1
|
1599
|
# https://www.tutorialspoint.com/python/python_variable_types.htm
# Python variables do not need explicit declaration to reserve memory space.
# The declaration happens automatically when you assign a value to a variable.
# The equal sign (=) is used to assign values to variables.
number = 5
weight = 58.9
name = 'python'
print(weight)
print(number)
print(name)
# Multiple assignment
a = b = c = number
print(a * b * c)
string = 'Hello World !!!'
print (string[1:7])
print (string[5:]) # Prints string starting from 3rd character
print (string * 2 ) # Prints string two times
print (string + "python")
# Python Lists
list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]
tinylist = [123, 'john']
print (list) # Prints complete list
print (list[1:3]) # Prints elements starting from 2nd till 3rd
print (list[2:]) # Prints elements starting from 3rd element
print (tinylist * 2) # Prints list two times
print (list + tinylist) # Prints concatenated lists
# Python Tuples
# The main differences between lists and tuples are: Lists are enclosed in brackets ( [ ] ) and
# their elements and size can be changed, while tuples are enclosed in parentheses ( ( ) ) and
# cannot be updated. Tuples can be thought of as read-only lists.
tuple = ('abcd', 786, 2.23, 'john', 70.2)
tinytuple = (123, 'john')
print(tuple) # Prints complete list
print(tuple[1:3]) # Prints elements starting from 2nd till 3rd
print(tuple[2:]) # Prints elements starting from 3rd element
print(tinytuple * 2) # Prints list two times
print(tuple + tinytuple) # Prints concatenated lists
|
gpl-3.0
| -2,425,228,713,200,995,300
| 33.782609
| 95
| 0.699812
| false
| 3.423983
| false
| false
| false
|
cthoyt/onto2nx
|
src/onto2nx/ontospy/core/entities.py
|
1
|
12106
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
from itertools import count
from .utils import *
class RDF_Entity(object):
"""
Pythonic representation of an RDF resource - normally not instantiated but used for
inheritance purposes
<triples> : a structure like this:
[(rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/OnlineChatAccount'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.Literal(u'An online chat account.')),
(rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/OnlineChatAccount'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#subClassOf')]
"""
_ids = count(0)
def __repr__(self):
return "<OntoSpy: RDF_Entity object for uri *%s*>" % (self.uri)
def __init__(self, uri, rdftype=None, namespaces=None):
"""
Init ontology object. Load the graph in memory, then setup all necessary attributes.
"""
self.id = next(self._ids)
self.uri = uri # rdflib.Uriref
self.qname = self.__buildQname(namespaces)
self.locale = inferURILocalSymbol(self.uri)[0]
self.slug = None
self.rdftype = rdftype
self.triples = None
self.rdfgraph = rdflib.Graph()
self.namespaces = namespaces
self._children = []
self._parents = []
# self.siblings = []
def serialize(self, format="turtle"):
""" xml, n3, turtle, nt, pretty-xml, trix are built in"""
if self.triples:
if not self.rdfgraph:
self._buildGraph()
return self.rdfgraph.serialize(format=format)
else:
return None
def printSerialize(self, format="turtle"):
printDebug("\n" + self.serialize(format))
def __buildQname(self, namespaces):
""" extracts a qualified name for a uri """
return uri2niceString(self.uri, namespaces)
def _buildGraph(self):
"""
transforms the triples list into a proper rdflib graph
(which can be used later for querying)
"""
for n in self.namespaces:
self.rdfgraph.bind(n[0], rdflib.Namespace(n[1]))
if self.triples:
for terzetto in self.triples:
self.rdfgraph.add(terzetto)
# methods added to RDF_Entity even though they apply only to some subs
def ancestors(self, cl=None, noduplicates=True):
""" returns all ancestors in the taxonomy """
if not cl:
cl = self
if cl.parents():
bag = []
for x in cl.parents():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.ancestors(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return []
def descendants(self, cl=None, noduplicates=True):
""" returns all descendants in the taxonomy """
if not cl:
cl = self
if cl.children():
bag = []
for x in cl.children():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.descendants(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return []
def parents(self):
"""wrapper around property"""
return self._parents
def children(self):
"""wrapper around property"""
return self._children
def getValuesForProperty(self, aPropURIRef):
"""
generic way to extract some prop value eg
In [11]: c.getValuesForProperty(rdflib.RDF.type)
Out[11]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#Class'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#Class')]
"""
return list(self.rdfgraph.objects(None, aPropURIRef))
def bestLabel(self, prefLanguage="en", qname_allowed=True, quotes=True):
"""
facility for extrating the best available label for an entity
..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
"""
test = self.getValuesForProperty(rdflib.RDFS.label)
out = ""
if test:
out = firstEnglishStringInList(test)
else:
test = self.getValuesForProperty(rdflib.namespace.SKOS.prefLabel)
if test:
out = firstEnglishStringInList(test)
else:
if qname_allowed:
out = self.locale
if quotes and out:
return addQuotes(out)
else:
return out
def bestDescription(self, prefLanguage="en"):
"""
facility for extrating the best available description for an entity
..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
"""
test_preds = [rdflib.RDFS.comment, rdflib.namespace.DCTERMS.description, rdflib.namespace.DC.description,
rdflib.namespace.SKOS.definition]
for pred in test_preds:
test = self.getValuesForProperty(pred)
if test:
return addQuotes(firstEnglishStringInList(test))
return ""
class Ontology(RDF_Entity):
"""
Pythonic representation of an OWL ontology
"""
def __repr__(self):
return "<OntoSpy: Ontology object for uri *%s*>" % (self.uri)
def __init__(self, uri, rdftype=None, namespaces=None, prefPrefix=""):
"""
Init ontology object. Load the graph in memory, then setup all necessary attributes.
"""
super(Ontology, self).__init__(uri, rdftype, namespaces)
# self.uri = uri # rdflib.Uriref
self.prefix = prefPrefix
self.slug = "ontology-" + slugify(self.qname)
self.classes = []
self.properties = []
self.skosConcepts = []
def annotations(self, qname=True):
"""
wrapper that returns all triples for an onto.
By default resources URIs are transformed into qnames
"""
if qname:
return sorted([(uri2niceString(x, self.namespaces)), (uri2niceString(y, self.namespaces)), z] for x, y, z in
self.triples)
else:
return sorted(self.triples)
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printGenericTree()
self.printTriples()
self.stats()
def stats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("Classes.....: %d" % len(self.classes))
printDebug("Properties..: %d" % len(self.properties))
class OntoClass(RDF_Entity):
"""
Python representation of a generic class within an ontology.
Includes methods for representing and querying RDFS/OWL classes
domain_of_inferred: a list of dict
[{<Class *http://xmlns.com/foaf/0.1/Person*>:
[<Property *http://xmlns.com/foaf/0.1/currentProject*>,<Property *http://xmlns.com/foaf/0.1/familyName*>,
etc....]},
{<Class *http://www.w3.org/2003/01/geo/wgs84_pos#SpatialThing*>:
[<Property *http://xmlns.com/foaf/0.1/based_near*>, etc...]},
]
"""
def __init__(self, uri, rdftype=None, namespaces=None):
"""
...
"""
super(OntoClass, self).__init__(uri, rdftype, namespaces)
self.slug = "class-" + slugify(self.qname)
self.domain_of = []
self.range_of = []
self.domain_of_inferred = []
self.range_of_inferred = []
self.ontology = None
self.queryHelper = None # the original graph the class derives from
def __repr__(self):
return "<Class *%s*>" % (self.uri)
def instances(self): # = all instances
return self.all()
def all(self):
out = []
if self.queryHelper:
qres = self.queryHelper.getClassInstances(self.uri)
out = [x[0] for x in qres]
return out
def count(self):
if self.queryHelper:
return self.queryHelper.getClassInstancesCount(self.uri)
else:
return 0
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("----------------")
printDebug("Parents......: %d" % len(self.parents()))
printDebug("Children.....: %d" % len(self.children()))
printDebug("Ancestors....: %d" % len(self.ancestors()))
printDebug("Descendants..: %d" % len(self.descendants()))
printDebug("Domain of....: %d" % len(self.domain_of))
printDebug("Range of.....: %d" % len(self.range_of))
printDebug("Instances....: %d" % self.count())
printDebug("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
self.printTriples()
self.printStats()
# self.printGenericTree()
class OntoProperty(RDF_Entity):
"""
Python representation of a generic RDF/OWL property.
rdftype is one of:
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#ObjectProperty')
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#DatatypeProperty')
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#AnnotationProperty')
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Property')
"""
def __init__(self, uri, rdftype=None, namespaces=None):
"""
...
"""
super(OntoProperty, self).__init__(uri, rdftype, namespaces)
self.slug = "prop-" + slugify(self.qname)
self.rdftype = inferMainPropertyType(rdftype)
self.domains = []
self.ranges = []
self.ontology = None
def __repr__(self):
return "<Property *%s*>" % (self.uri)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("----------------")
printDebug("Parents......: %d" % len(self.parents()))
printDebug("Children.....: %d" % len(self.children()))
printDebug("Ancestors....: %d" % len(self.ancestors()))
printDebug("Descendants..: %d" % len(self.descendants()))
printDebug("Has Domain...: %d" % len(self.domains))
printDebug("Has Range....: %d" % len(self.ranges))
printDebug("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
self.printTriples()
self.printStats()
# self.printGenericTree()
class OntoSKOSConcept(RDF_Entity):
"""
Python representation of a generic SKOS concept within an ontology.
@todo: complete methods..
"""
def __init__(self, uri, rdftype=None, namespaces=None):
"""
...
"""
super(OntoSKOSConcept, self).__init__(uri, rdftype, namespaces)
self.slug = "concept-" + slugify(self.qname)
self.instance_of = []
self.ontology = None
self.queryHelper = None # the original graph the class derives from
def __repr__(self):
return "<SKOS Concept *%s*>" % (self.uri)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("----------------")
printDebug("Parents......: %d" % len(self.parents()))
printDebug("Children.....: %d" % len(self.children()))
printDebug("Ancestors....: %d" % len(self.ancestors()))
printDebug("Descendants..: %d" % len(self.descendants()))
printDebug("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
self.printTriples()
self.printStats()
self.printGenericTree()
|
gpl-3.0
| -5,157,829,616,975,822,000
| 32.076503
| 120
| 0.566331
| false
| 3.878885
| true
| false
| false
|
UASLab/ImageAnalysis
|
scripts/archive/5b-solver5.py
|
1
|
23350
|
#!/usr/bin/python
# 1. Iterate through all the image pairs and triangulate the match points.
# 2. Set the 3d location of features to triangulated position (possibly
# averaged if the feature is included in multiple matches
# 3. Compute new camera poses with solvePnP() using triangulated point locations
# 4. Repeat
import sys
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import json
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
import transformations
# constants
d2r = math.pi / 180.0
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--strategy', default='my_triangulate',
choices=['my_triangulate', 'triangulate', 'dem'], help='projection strategy')
parser.add_argument('--iterations', type=int, help='stop after this many solver iterations')
parser.add_argument('--target-mre', type=float, help='stop when mre meets this threshold')
parser.add_argument('--plot', action='store_true', help='plot the solution state')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
print "unique features:", len(matches_direct)
# compute keypoint usage map
proj.compute_kp_usage_new(matches_direct)
# setup SRTM ground interpolator
ref = proj.ned_reference_lla
sss = SRTM.NEDGround( ref, 2000, 2000, 30 )
start_mre = -1.0
# iterate through the matches list and triangulate the 3d location for
# all feature points, given the associated camera poses. Returns a
# new matches_dict with update point positions
import LineSolver
def my_triangulate(matches_direct, cam_dict):
IK = np.linalg.inv( proj.cam.get_K() )
for match in matches_direct:
#print match
points = []
vectors = []
for m in match[1:]:
image = proj.image_list[m[0]]
cam2body = image.get_cam2body()
body2ned = image.rvec_to_body2ned(cam_dict[image.name]['rvec'])
uv_list = [ image.uv_list[m[1]] ] # just one uv element
vec_list = proj.projectVectors(IK, body2ned, cam2body, uv_list)
points.append( cam_dict[image.name]['ned'] )
vectors.append( vec_list[0] )
#print ' ', image.name
#print ' ', uv_list
#print ' ', vec_list
p = LineSolver.ls_lines_intersection(points, vectors, transpose=True).tolist()
#print p, p[0]
match[0] = [ p[0][0], p[1][0], p[2][0] ]
# iterate through the project image list and triangulate the 3d
# location for all feature points, given the current camera pose.
# Returns a new matches_dict with update point positions
def triangulate(matches_direct, cam_dict):
IK = np.linalg.inv( proj.cam.get_K() )
match_pairs = proj.generate_match_pairs(matches_direct)
# zero the match NED coordinate and initialize the corresponding
# count array
counters = []
for match in matches_direct:
match[0] = np.array( [0.0, 0.0, 0.0] )
counters.append( 0)
for i, i1 in enumerate(proj.image_list):
#rvec1, tvec1 = i1.get_proj()
rvec1 = cam_dict[i1.name]['rvec']
tvec1 = cam_dict[i1.name]['tvec']
R1, jac = cv2.Rodrigues(rvec1)
PROJ1 = np.concatenate((R1, tvec1), axis=1)
for j, i2 in enumerate(proj.image_list):
matches = match_pairs[i][j]
if (j <= i) or (len(matches) == 0):
continue
# distance between two cameras
ned1 = np.array(cam_dict[i1.name]['ned'])
ned2 = np.array(cam_dict[i2.name]['ned'])
dist = np.linalg.norm(ned2 - ned1)
if dist < 40:
# idea: the closer together two poses are, the greater
# the triangulation error will be relative to small
# attitude errors. If we only compare more distance
# camera views the solver will be more stable.
continue
#rvec2, tvec2 = i2.get_proj()
rvec2 = cam_dict[i2.name]['rvec']
tvec2 = cam_dict[i2.name]['tvec']
R2, jac = cv2.Rodrigues(rvec2)
PROJ2 = np.concatenate((R2, tvec2), axis=1)
uv1 = []; uv2 = []; indices = []
for pair in matches:
p1 = i1.kp_list[ pair[0] ].pt
p2 = i2.kp_list[ pair[1] ].pt
uv1.append( [p1[0], p1[1], 1.0] )
uv2.append( [p2[0], p2[1], 1.0] )
# pair[2] is the index back into the matches_direct structure
indices.append( pair[2] )
pts1 = IK.dot(np.array(uv1).T)
pts2 = IK.dot(np.array(uv2).T)
points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])
points /= points[3]
#print "points:\n", points[0:3].T
# fixme: need to update result, sum_dict is no longer used
print "%s vs %s" % (i1.name, i2.name)
for k, p in enumerate(points[0:3].T):
match = matches_direct[indices[k]]
match[0] += p
counters[indices[k]] += 1
# divide each NED coordinate (sum of triangulated point locations)
# of matches_direct_dict by the count of references to produce an
# average NED coordinate for each match.
for i, match in enumerate(matches_direct):
if counters[i] > 0:
match[0] /= counters[i]
else:
print 'invalid match from images too close to each other:', match
for j in range(1, len(match)):
match[j] = [-1, -1]
# return the new match structure
return matches_direct
# Iterate through the project image list and run solvePnP on each
# image's feature set to derive new estimated camera locations
cam1 = []
def solvePnP(matches_direct):
# start with a clean slate
for image in proj.image_list:
image.img_pts = []
image.obj_pts = []
# build a new cam_dict that is a copy of the current one
cam_dict = {}
for image in proj.image_list:
cam_dict[image.name] = {}
rvec, tvec = image.get_proj()
ned, ypr, quat = image.get_camera_pose()
cam_dict[image.name]['rvec'] = rvec
cam_dict[image.name]['tvec'] = tvec
cam_dict[image.name]['ned'] = ned
# iterate through the match dictionary and build a per image list of
# obj_pts and img_pts
for match in matches_direct:
ned = match[0]
for p in match[1:]:
image = proj.image_list[ p[0] ]
kp = image.kp_list[ p[1] ]
image.img_pts.append( kp.pt )
image.obj_pts.append( ned )
camw, camh = proj.cam.get_image_params()
for image in proj.image_list:
# print image.name
if len(image.img_pts) < 4:
continue
scale = float(image.width) / float(camw)
K = proj.cam.get_K(scale)
rvec, tvec = image.get_proj()
(result, rvec, tvec) \
= cv2.solvePnP(np.float32(image.obj_pts),
np.float32(image.img_pts),
K, None,
rvec, tvec, useExtrinsicGuess=True)
# The idea of using the Ransac version of solvePnP() is to
# look past outliers instead of being affected by them. We
# don't use the outlier information at this point in the
# process for outlier rejection. However, it appears that
# this process leads to divergence, not convergence.
# (rvec, tvec, inliers) \
# = cv2.solvePnPRansac(np.float32(image.obj_pts),
# np.float32(image.img_pts),
# K, None,
# rvec, tvec, useExtrinsicGuess=True)
#print "rvec=", rvec
#print "tvec=", tvec
Rned2cam, jac = cv2.Rodrigues(rvec)
#print "Rraw (from SolvePNP):\n", Rraw
ned = image.camera_pose['ned']
#print "original ned = ", ned
#tvec = -np.matrix(R[:3,:3]) * np.matrix(ned).T
#print "tvec =", tvec
pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(tvec)
newned = pos.T[0].tolist()[0]
#print "new ned =", newned
# Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
# so solvePnP returns this combination. We can extract Rned by
# premultiplying by cam2body aka inv(body2cam).
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
#print "R (after M * R):\n", R
ypr = image.camera_pose['ypr']
#print "original ypr = ", ypr
Rbody2ned = np.matrix(Rned2body).T
IRo = transformations.euler_matrix(ypr[0]*d2r, ypr[1]*d2r, ypr[2]*d2r, 'rzyx')
IRq = transformations.quaternion_matrix(image.camera_pose['quat'])
#print "Original IR:\n", IRo
#print "Original IR (from quat)\n", IRq
#print "IR (from SolvePNP):\n", IR
(yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
#print "ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
#image.set_camera_pose( pos.T[0].tolist(), [yaw/d2r, pitch/d2r, roll/d2r] )
#print "Proj =", np.concatenate((R, tvec), axis=1)
cam_dict[image.name] = {}
cam_dict[image.name]['rvec'] = rvec
cam_dict[image.name]['tvec'] = tvec
cam_dict[image.name]['ned'] = newned
return cam_dict
# return a 3d affine tranformation between fitted camera locations and
# original camera locations.
def get_recenter_affine(cam_dict):
src = [[], [], [], []] # current camera locations
dst = [[], [], [], []] # original camera locations
for image in proj.image_list:
if image.feature_count > 0:
newned = cam_dict[image.name]['ned']
src[0].append(newned[0])
src[1].append(newned[1])
src[2].append(newned[2])
src[3].append(1.0)
origned, ypr, quat = image.get_camera_pose()
dst[0].append(origned[0])
dst[1].append(origned[1])
dst[2].append(origned[2])
dst[3].append(1.0)
#print image.name, '%s -> %s' % (origned, newned)
A = transformations.superimposition_matrix(src, dst, scale=True)
print "Affine 3D:\n", A
return A
# transform the camera ned positions with the provided affine matrix
# to keep all the camera poses best fitted to the original camera
# locations. Also rotate the camera poses by the rotational portion
# of the affine matrix to update the camera alignment.
def transform_cams(A, cam_dict):
# construct an array of camera positions
src = [[], [], [], []]
for image in proj.image_list:
new = cam_dict[image.name]['ned']
src[0].append(new[0])
src[1].append(new[1])
src[2].append(new[2])
src[3].append(1.0)
# extract the rotational portion of the affine matrix
scale, shear, angles, trans, persp = transformations.decompose_matrix(A)
R = transformations.euler_matrix(*angles)
#print "R:\n", R
# full transform the camera ned positions to best align with
# original locations
update_cams = A.dot( np.array(src) )
#print update_cams[:3]
for i, p in enumerate(update_cams.T):
key = proj.image_list[i].name
if not key in cam_dict:
cam_dict[key] = {}
ned = [ p[0], p[1], p[2] ]
# print "ned:", ned
cam_dict[key]['ned'] = ned
# adjust the camera projection matrix (rvec) to rotate by the
# amount of the affine transformation as well
rvec = cam_dict[key]['rvec']
tvec = cam_dict[key]['tvec']
Rcam, jac = cv2.Rodrigues(rvec)
# print "Rcam:\n", Rcam
Rcam_new = R[:3,:3].dot(Rcam)
# print "Rcam_new:\n", Rcam_new
rvec, jac = cv2.Rodrigues(Rcam_new)
cam_dict[key]['rvec'] = rvec
tvec = -np.matrix(Rcam_new) * np.matrix(ned).T
cam_dict[key]['tvec'] = tvec
# transform all the match point locations
def transform_points( A, pts_dict ):
src = [[], [], [], []]
for key in pts_dict:
p = pts_dict[key]
src[0].append(p[0])
src[1].append(p[1])
src[2].append(p[2])
src[3].append(1.0)
dst = A.dot( np.array(src) )
result_dict = {}
for i, key in enumerate(pts_dict):
result_dict[key] = [ dst[0][i], dst[1][i], dst[2][i] ]
return result_dict
# mark items that exceed the cutoff reprojection error for deletion
def mark_outliers(result_list, cutoff, matches_direct):
print " marking outliers..."
mark_count = 0
for line in result_list:
# print "line:", line
if line[0] > cutoff:
print " outlier index %d-%d err=%.2f" % (line[1], line[2],
line[0])
#if args.show:
# draw_match(line[1], line[2])
match = matches_direct[line[1]]
match[line[2]+1] = [-1, -1]
mark_count += 1
# mark matches not referencing images in the main group
def mark_non_group(main_group, matches_direct):
# construct set of image indices in main_group
group_dict = {}
for image in main_group:
for i, i1 in enumerate(proj.image_list):
if image == i1:
group_dict[i] = True
#print 'group_dict:', group_dict
print " marking non group..."
mark_sum = 0
for match in matches_direct:
for j, p in enumerate(match[1:]):
if not p[0] in group_dict:
match[j+1] = [-1, -1]
mark_sum += 1
print 'marked:', mark_sum, 'matches for deletion'
# delete marked matches
def delete_marked_matches(matches_direct):
print " deleting marked items..."
for i in reversed(range(len(matches_direct))):
match = matches_direct[i]
has_bad_elem = False
for j in reversed(range(1, len(match))):
p = match[j]
if p == [-1, -1]:
has_bad_elem = True
match.pop(j)
if len(match) < 4:
print "deleting match that is now in less than 3 images:", match
matches_direct.pop(i)
# any image with less than 25 matches has all it's matches marked for
# deletion
def mark_weak_images(matches_direct):
# count how many features show up in each image
for i in proj.image_list:
i.feature_count = 0
for i, match in enumerate(matches_direct):
for j, p in enumerate(match[1:]):
if p[1] != [-1, -1]:
image = proj.image_list[ p[0] ]
image.feature_count += 1
# make a dict of all images with less than 25 feature matches
weak_dict = {}
for i, img in enumerate(proj.image_list):
if img.feature_count < 25:
weak_dict[i] = True
if img.feature_count > 0:
print 'new weak image:', img.name
img.feature_count = 0 # will be zero very soon
print 'weak images:', weak_dict
# mark any features in the weak images list
mark_sum = 0
for i, match in enumerate(matches_direct):
#print 'before:', match
for j, p in enumerate(match[1:]):
if p[0] in weak_dict:
match[j+1] = [-1, -1]
mark_sum += 1
#print 'after:', match
def plot(surface0, cam0, surface1, cam1):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = []; ys = []; zs = []
for p in surface0:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='r', marker='.')
xs = []; ys = []; zs = []
for p in surface1:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='b', marker='.')
xs = []; ys = []; zs = []
for p in cam0:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='y', marker='^')
xs = []; ys = []; zs = []
for p in cam1:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='b', marker='^')
plt.show()
# temporary testing ....
# match_pairs = proj.generate_match_pairs(matches_direct)
# group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
# mark_non_group(group_list[0], matches_direct)
# quit()
# iterate through the match dictionary and build a simple list of
# starting surface points
surface0 = []
for match in matches_direct:
ned = match[0]
surface0.append( [ned[1], ned[0], -ned[2]] )
cam0 = []
for image in proj.image_list:
ned, ypr, quat = image.get_camera_pose()
cam0.append( [ned[1], ned[0], -ned[2]] )
# iterate through the image list and build the camera pose dictionary
# (and a simple list of camera locations for plotting)
# cam_dict = {}
# for image in proj.image_list:
# rvec, tvec, ned = image.get_proj()
# cam_dict[image.name] = {}
# cam_dict[image.name]['rvec'] = rvec
# cam_dict[image.name]['tvec'] = tvec
# cam_dict[image.name]['ned'] = ned
count = 0
while True:
# find the 'best fit' camera poses for the triangulation averaged
# together.
cam_dict = solvePnP(matches_direct)
# measure our current mean reprojection error and trim mre
# outliers from the match set (any points with mre 4x stddev) as
# well as any weak images with < 25 matches.
(result_list, mre, stddev) \
= proj.compute_reprojection_errors(cam_dict, matches_direct)
if start_mre < 0.0: start_mre = mre
print "mre = %.4f stddev = %.4f features = %d" % (mre, stddev, len(matches_direct))
cull_outliers = False
if cull_outliers:
mark_outliers(result_list, mre + stddev*4, matches_direct)
mark_weak_images(matches_direct)
delete_marked_matches(matches_direct)
# after outlier deletion, re-evalute matched pairs and connection
# cycles.
match_pairs = proj.generate_match_pairs(matches_direct)
group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
mark_non_group(group_list[0], matches_direct)
delete_marked_matches(matches_direct)
else:
# keep accounting structures happy
mark_weak_images(matches_direct)
# get the affine transformation required to bring the new camera
# locations back inqto a best fit with the original camera
# locations
A = get_recenter_affine(cam_dict)
# thought #1: if we are triangulating, this could be done once at the
# end to fix up the solution, not every iteration? But it doesn't
# seem to harm the triangulation.
# thought #2: if we are projecting onto the dem surface, we
# probably shouldn't transform the cams back to the original
# because this could perpetually pull things out of convergence
transform_cams(A, cam_dict)
if args.strategy == 'my_triangulate':
# run the triangulation step (modifies NED coordinates in
# place). This computes a best fit for all the feature
# locations based on the current best camera poses.
my_triangulate(matches_direct, cam_dict)
elif args.strategy == 'triangulate':
# run the triangulation step (modifies NED coordinates in
# place). This computes a best fit for all the feature
# locations based on the current best camera poses.
triangulate(matches_direct, cam_dict)
elif args.strategy == 'dem':
# project the keypoints back onto the DEM surface from the
# updated camera poses.
proj.fastProjectKeypointsTo3d(sss, cam_dict)
# estimate new world coordinates for each match point
for match in matches_direct:
sum = np.array( [0.0, 0.0, 0.0] )
for p in match[1:]:
sum += proj.image_list[ p[0] ].coord_list[ p[1] ]
ned = sum / len(match[1:])
# print "avg =", ned
match[0] = ned.tolist()
else:
print 'unknown triangulation strategy, script will probably fail to do anything useful'
surface1 = []
for match in matches_direct:
ned = match[0]
print ned
surface1.append( [ned[1], ned[0], -ned[2]] )
# transform all the feature points by the affine matrix (modifies
# matches_direct NED coordinates in place)
# fixme: transform_points(A, matches_direct)
# fixme: transform camera locations and orientations as well
# run solvePnP now on the updated points (hopefully this will
# naturally reorient the cameras as needed.)
# 9/6/2016: shouldn't be needed since transform_points() now rotates
# the camera orientation as well?
# cam_dict = solvePnP(newpts_dict)
cam1 = []
for key in cam_dict:
p = cam_dict[key]['ned']
cam1.append( [ p[1], p[0], -p[2] ] )
if args.plot:
plot(surface0, cam0, surface1, cam1)
count += 1
# test stop conditions
if args.iterations:
if count >= args.iterations:
print 'Stopping (by request) after', count, 'iterations.'
break
elif args.target_mre:
if mre <= args.target_mre:
print 'Stopping (by request) with mre:', mre
break
else:
print 'No stop condition specified, running one iteration and stopping.'
break
(result_list, mre, stddev) \
= proj.compute_reprojection_errors(cam_dict, matches_direct)
print 'Start mre:', start_mre, 'end mre:', mre
result=raw_input('Update matches and camera poses? (y/n):')
if result == 'y' or result == 'Y':
print 'Writing direct matches...'
pickle.dump(matches_direct, open(args.project+"/matches_direct", "wb"))
print 'Updating and saving camera poses...'
for image in proj.image_list:
pose = cam_dict[image.name]
Rned2cam, jac = cv2.Rodrigues(pose['rvec'])
pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(pose['tvec'])
ned = pos.T[0].tolist()[0]
# Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
# so solvePnP returns this combination. We can extract Rned by
# premultiplying by cam2body aka inv(body2cam).
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
Rbody2ned = np.matrix(Rned2body).T
(yaw, pitch, roll) \
= transformations.euler_from_matrix(Rbody2ned, 'rzyx')
# print "ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
print 'orig:', image.get_camera_pose()
image.set_camera_pose( ned, [yaw/d2r, pitch/d2r, roll/d2r] )
print 'new: ', image.get_camera_pose()
image.save_meta()
|
mit
| -8,557,831,333,197,678,000
| 36.479936
| 97
| 0.592248
| false
| 3.378672
| false
| false
| false
|
asherkhb/coge
|
web/gobe/gobe.py
|
1
|
7785
|
#!/usr/bin/python
import web
import os
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
import simplejson
import urllib
TMPDIR = "../tmp/GEvo/"
if not os.path.exists(TMPDIR):
TMPDIR = os.path.join(os.path.dirname(__file__), TMPDIR)
DBTMPL = os.path.join(TMPDIR, "%s.sqlite")
def getdb(dbname):
db = sqlite3.connect(DBTMPL % dbname)
db.row_factory = sqlite3.Row
return db
class info(object):
def GET(self, dbname):
web.header('Content-type', 'text/javascript')
db = getdb(dbname)
c = db.cursor()
c2 = db.cursor()
c.execute("SELECT * FROM image_info order by display_id")
c2.execute("SELECT min(xmin) as min, max(xmax) as max, image_id FROM image_data WHERE type='anchor' GROUP BY image_id ORDER BY image_id")
result = {}
for i, (row, anchor) in enumerate(zip(c, c2)):
result[row['iname']] = dict(
title=row['title'],
i=i,
img_width=row['px_width'],
bpmin=row['bpmin'],
bpmax=row['bpmax'],
idx=row['id'],
xmin=anchor['min'],
xmax=anchor['max']
)
return simplejson.dumps(result)
class follow(object):
def GET(self, dbname):
web.header('content-type', 'text/javascript')
db = getdb(dbname)
c = db.cursor()
c2 = db.cursor()
img = web.input(img=None).img
bbox = map(float, web.input().bbox.split(","))
ids = []
pair_ids = []
used_pairs = []
def get_pair_data(pair_id):
c.execute("""SELECT xmin, xmax, ymin, ymax, image_id, image_track FROM image_data WHERE id = ?""", (pair_id,))
p = c.fetchone()
return dict(
pair_id=pair_id,
pair_image_id=p['image_id'],
pair_track=p['image_track'],
pair_bbox=(p['xmin'], p['ymin'], p['xmax'], p['ymax']))
def get_pairs(img_id, bbox):
c.execute("""SELECT id, pair_id, image_id, xmin, xmax, ymin, ymax FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
? - 1 > ymin AND ? + 1 < ymax AND image_id = ? AND pair_id != -99 AND type = 'HSP'""", \
(bbox[2], bbox[0], bbox[3], bbox[1], img_id))
results = c.fetchall()
if not results: return None
pairs = []
for r in results:
d = dict(id=r['id'], bbox=(r['xmin'], r['ymin'], r['xmax'], r['ymax']), image_id=r['image_id'])
d.update(get_pair_data(r['pair_id']))
pairs.append(d)
return pairs
def get_pairs_for_bbox_image(xmin, xmax, img_id, exclude_track):
c.execute("""SELECT id, pair_id, image_id, xmin, xmax, ymin, ymax
FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
image_id = ? AND pair_id != -99 AND image_track != ? AND type = 'HSP'""", \
(xmax, xmin, img_id, exclude_track))
web.debug("""SELECT id, pair_id, image_id, xmin, xmax, ymin, ymax
FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
image_id = ? AND pair_id != -99 AND image_track != ? AND type = 'HSP'""")
web.debug((xmax, xmin, img_id, exclude_track))
results = c.fetchall()
pairs = []
for r in results:
d = dict(id=r['id'], bbox=(r['xmin'], r['ymin'], r['xmax'], r['ymax']), image_id=r['image_id'])
d.update(get_pair_data(r['pair_id']))
pairs.append(d)
return pairs
pairs = get_pairs(img, bbox)
i = 0
while True:
L = len(pairs)
if i == L: break
pair = pairs[i]
new_pairs = get_pairs(pair['pair_image_id'], pair['pair_bbox'])
for np in (_np for _np in new_pairs if not _np in pairs):
new2 = get_pairs_for_bbox_image(np['bbox'][0], np['bbox'][2], np['image_id'], np['pair_track'])
if new2 == []: continue
if not new2 in pairs:
pairs.append(new2)
pairs.extend([np for np in new_pairs if not np in pairs])
i += 1
if L == len(pairs): break
web.debug(pairs)
class query(object):
def GET(self, dbname):
db = getdb(dbname)
c = db.cursor()
img = web.input(img=None).img
if web.input(bbox=None).bbox:
bbox = map(float, web.input().bbox.split(","))
c.execute("""SELECT * FROM image_data WHERE ? + 1 > xmin AND ? - 1 < xmax AND
? - 1 > ymin AND ? + 1 < ymax AND image_id = ? AND pair_id != -99 AND type = 'HSP'""", \
(bbox[2], bbox[0], bbox[3], bbox[1], img))
elif web.input(all=None).all:
c.execute("""SELECT distinct(image_track) as image_track FROM image_data WHERE ?
BETWEEN ymin AND ymax AND image_id = ? ORDER BY
ABS(image_track) DESC""", (float(web.input().y), img))
track = c.fetchone()['image_track']
web.debug(track)
c.execute("""SELECT id, xmin, xmax, ymin, ymax, image_id, image_track, pair_id, color, link FROM image_data
WHERE ( (image_track = ?) or (image_track = (? * -1) ) )
and image_id = ? and pair_id != -99 and type = 'HSP'""", (track, track, img))
else: # point query.
x = float(web.input().x)
y = float(web.input().y)
c.execute("""SELECT * FROM image_data WHERE ? + 3 > xmin AND ? - 3
< xmax AND ? BETWEEN ymin and ymax and image_id = ?""",
(x, x, y, img))
c2 = db.cursor()
# now iterate over the cursor
results = []
for result in c:
c2.execute("""SELECT id, xmin, xmax, ymin, ymax, image_id,
image_track, pair_id, color FROM image_data where
id = ?""", (result['pair_id'], ));
pair = c2.fetchone()
try:
anno = result['annotation']
if anno.startswith('http'):
anno = urllib.urlopen(anno).read()
except:
anno = ""
f1pts = []
f2pts = []
for k in ('xmin', 'ymin', 'xmax', 'ymax'):
f1pts.append(int(round(result[k])))
if pair:
f2pts.append(int(round(pair[k])))
f1pts.extend([result['id'], result['image_track']])
if pair:
f2pts.extend([pair['id'], pair['image_track']])
results.append(dict(
# TODO: tell eric to add 'CoGe' to the start of his links.
link=result['link'],
annotation = anno,
# TODO has_pair
has_pair= bool(pair),
color=(result['color'] or (pair and pair['color'])).replace('#', '0x'),
features={
'key%i' % result['image_id']: f1pts,
'key%i' % (pair and pair['image_id'] or 999): f2pts}
))
web.header('Content-type', 'text/javascript')
return simplejson.dumps({'resultset':results})
urls = (
# the first pattern is always the sqlite db name. e.g.: /GEVo_WxUonWBr/info
'/([^\/]+)/info/', 'info',
'/([^\/]+)/follow/', 'follow',
'/([^\/]+)/query/', 'query',
)
app = web.application(urls, locals())
application = app.wsgifunc()
if __name__ == "__main__":
app.run()
|
bsd-2-clause
| 5,173,759,947,039,488,000
| 37.925
| 145
| 0.479897
| false
| 3.705378
| false
| false
| false
|
mjasher/gac
|
GAC/flopy/modpath/mpbas.py
|
1
|
6164
|
"""
mpbas module. Contains the ModpathBas class. Note that the user can access
the ModpathBas class as `flopy.modflow.ModpathBas`.
Additional information for this MODFLOW/MODPATH package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?bas6.htm>`_.
"""
import numpy as np
from numpy import empty, array
from flopy.mbase import Package
from flopy.utils import util_2d, util_3d
class ModpathBas(Package):
"""
MODPATH Basic Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modpath.mp.Modpath`) to which
this package will be added.
hnoflo : float
Head value assigned to inactive cells (default is -9999.).
hdry : float
Head value assigned to dry cells (default is -8888.).
def_face_ct : int
Number fo default iface codes to read (default is 0).
bud_label : str or list of strs
MODFLOW budget item to which a default iface is assigned.
def_iface : int or list of ints
Cell face (iface) on which to assign flows from MODFLOW budget file.
laytyp : int or list of ints
MODFLOW layer type (0 is convertible, 1 is confined).
ibound : array of ints, optional
The ibound array (the default is 1).
prsity : array of ints, optional
The porosity array (the default is 0.30).
prsityCB : array of ints, optional
The porosity array for confining beds (the default is 0.30).
extension : str, optional
File extension (default is 'mpbas').
Attributes
----------
heading : str
Text string written to top of package input file.
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modpath.Modpath()
>>> mpbas = flopy.modpath.ModpathBas(m)
"""
def __init__(self, model, hnoflo=-9999., hdry=-8888.,
def_face_ct=0, bud_label=None, def_iface=None,
laytyp=0, ibound=1, prsity=0.30, prsityCB=0.30,
extension='mpbas', unitnumber = 86):
"""
Package constructor.
"""
Package.__init__(self, model, extension, 'MPBAS', unitnumber)
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
self.parent.mf.get_name_file_entries()
self.heading1 = '# MPBAS for Modpath, generated by Flopy.'
self.heading2 = '#'
self.hnoflo = hnoflo
self.hdry = hdry
self.def_face_ct = def_face_ct
self.bud_label = bud_label
self.def_iface = def_iface
self.laytyp = laytyp
self.__ibound = util_3d(model, (nlay, nrow, ncol), np.int, ibound,
name='ibound', locat=self.unit_number[0])
self.prsity = prsity
self.prsityCB = prsityCB
self.prsity = util_3d(model,(nlay,nrow,ncol),np.float32,\
prsity,name='prsity',locat=self.unit_number[0])
self.prsityCB = util_3d(model,(nlay,nrow,ncol),np.float32,\
prsityCB,name='prsityCB',locat=self.unit_number[0])
self.parent.add_package(self)
def getibound(self):
"""
Return the ibound array.
Returns
-------
ibound : numpy.ndarray (nlay, nrow, ncol)
ibound object.
"""
return self.__ibound.array
def setibound(self, ibound):
"""
Set the ibound array.
"""
model = self.parent
nrow, ncol, nlay, nper = model.nrow_ncol_nlay_nper
self.__ibound = util_3d(model, (nlay, nrow, ncol), np.int, ibound,
name='ibound', locat=self.unit_number[0])
return
ibound = property(getibound, setibound)
def write_file(self):
"""
Write the package input file.
"""
nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper
ModflowDis = self.parent.mf.get_package('DIS')
# Open file for writing
f_bas = open(self.fn_path, 'w')
f_bas.write('#{0:s}\n#{1:s}\n'.format(self.heading1,self.heading2))
f_bas.write('{0:16.6f} {1:16.6f}\n'\
.format(self.hnoflo, self.hdry))
f_bas.write('{0:4d}\n'\
.format(self.def_face_ct))
if self.def_face_ct > 0:
for i in range(self.def_face_ct):
f_bas.write('{0:20s}\n'.format(self.bud_label[i]))
f_bas.write('{0:2d}\n'.format(self.def_iface[i]))
#f_bas.write('\n')
flow_package = self.parent.mf.get_package('BCF6')
if (flow_package != None):
lc = util_2d(self.parent,(nlay,),np.int,\
flow_package.laycon.get_value(),name='bas - laytype',\
locat=self.unit_number[0])
else:
flow_package = self.parent.mf.get_package('LPF')
if (flow_package != None):
lc = util_2d(self.parent,(nlay,),\
np.int,flow_package.laytyp.get_value(),\
name='bas - laytype',locat=self.unit_number[0])
else:
flow_package = self.parent.mf.get_package('UPW')
if (flow_package != None):
lc = util_2d(self.parent,(nlay,),\
np.int,flow_package.laytyp.get_value(),\
name='bas - laytype', locat=self.unit_number[0])
# need to reset lc fmtin
lc.set_fmtin('(40I2)')
f_bas.write(lc.string)
# from modpath bas--uses keyword array types
f_bas.write(self.__ibound.get_file_entry())
# from MT3D bas--uses integer array types
#f_bas.write(self.ibound.get_file_entry())
f_bas.write(self.prsity.get_file_entry())
f_bas.write(self.prsityCB.get_file_entry())
f_bas.close()
|
gpl-2.0
| 3,510,775,014,214,753,300
| 34.922156
| 91
| 0.540234
| false
| 3.496313
| false
| false
| false
|
dattalab/d_code
|
events/eventRoutines.py
|
1
|
23750
|
"""Event arrays are 2D label arrays (time x ROI) that are generated from an
array of fluorescent traces of the same size.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
See the findEvents() docstring for more info.
These routines are used to create and analyze event arrays. Note that
some of the event utility functions return masked numpy arrays. This
is because generally, there are different number of events in each
cell during each trial. Anywhere there wasn't an event is a 'np.nan'
value, and the mask will ensure that it isn't used to calcuate things
like mean(), min(), max() etc.
"""
import numpy as np
import traces as tm
from sklearn.mixture import GMM
import scipy.ndimage as nd
import mahotas
__all__ = ['findEvents', 'findEventsGMM', 'findEventsBackground',
'getCounts', 'getStartsAndStops', 'getDurations', 'getAvgAmplitudes', 'getWeightedEvents',
'fitGaussianMixture1D', 'getGMMBaselines']
#----------------------------------------EVENT FINDING FUNCTIONS AND WRAPPERS-----------------------------------
def findEvents(traces, stds, std_threshold=2.5, falling_std_threshold=None, baselines=None, boxWidth=3, minimum_length=2, alpha=None):
"""Core event finding routine with flexible syntax.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
By default, the baseline is 0.0 (the dF/F traces have been baselined). This baseline can be
explicitly specified using the `baselines` parameter. If `baselines` is a 1d array, it is a
global correction value. If `baselines` is exactly the same size as `traces`, the routine
assumes that the baselines have been explicitly specificed across all cells, trials and frames.
If `baselines` is of size (time x trials), then the routine assumes that the basline value has
been determined for the whole population on a trial by trial basis. This is done in the routines
`findEventsBackground` and `findEventsGMM`.
The `alpha` parameter is here for flexibility. It allows for the scaling of the threshold of detection
on a cell by cell, frame by frame basis indepedent of the noise of a cell or it's baseline value.
If specified it must be the exact same size as `traces`. By default it is set to 1.0.
The routine returns an event array exactly the same size as `traces`, where each event is labeled with
a unique number (an integer). The background is labeled with '0'. This can be used in all the utility
routines below.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:param: alpha - optional scaling parameter for adjusting thresholds
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
time, cells, trials = traces.shape
events = np.zeros_like(traces)
# broadcasting of baselines. ends up as time x cells x trials. this is really annoying,
# but relying on numpy to broadcast things was tricky and problembatic. idea here is to
# get baselines identical to traces
if baselines is None: # no baseline correction, default
full_baselines = np.zeros_like(traces)
elif baselines.shape == (time): # one global correction
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines
elif baselines.shape ==(time, cells): # full, but only one trial
full_baselines = baselines[:,:,None]
elif baselines.shape == (time, trials): # modeled on a trial by trial basis
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines[:,trial]
# this is a check to prevent a dip in the global population from calling stuff responders
# basically, if the estimated baseline falls below zero, we fall back to the implicit background
# value of 0.0
full_baselines[full_baselines<0.0] = 0.0
# alpha is a scaling factor for event detection. if used it has to be the same size and shape as traces.
# no broadcasting is done here. it scales the threshold for detection so by default it is 1.0 everywhere.
if alpha is None:
alpha = np.ones_like(full_baselines)
# smooth traces and baselines
if boxWidth is not 0:
traces_smoothed = nd.convolve1d(traces, np.array([1]*boxWidth)/float(boxWidth), axis=0)
baselines_smoothed = nd.convolve1d(full_baselines, np.array([1]*boxWidth)/float(boxWidth), axis=0)
# detect events
for trial in range(trials):
for cell in range(cells):
events[:,cell,trial] = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(std_threshold) * alpha[:,cell,trial])
# filter for minimum length
events = mahotas.label(events, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
for single_event in range(1, events.max()+1):
if (events == single_event).sum() <= minimum_length:
events[events == single_event] = 0
events = events>0
# if a falling std is specified, extend events until they drop below that threshold
if falling_std_threshold is not None:
for trial in range(trials):
for cell in range(cells):
falling_thresh_events = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(falling_std_threshold) * alpha[:,cell,trial])
for event_end in np.argwhere(np.diff(events[:,cell,trial].astype(int)) == -1):
j = event_end
while (j<time) and ((events[j,cell,trial]) or (falling_thresh_events[j])):
events[j,cell,trial] = events[j-1,cell,trial]
j = j + 1
# finally label the event array and return it.
events = mahotas.label(events>0, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
return np.squeeze(events)
def findEventsGMM(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using a mixture of gaussians model.
The major idea here is to use a mixture of two gaussians to model
the baselines within each trial as a mixture of two gaussians -
one for the 'baseline' and one for all the 'bright' responding
pixels. At each time point, the ROI brightnesses are fit with
with this GMM. The means of the two distributions are initialized
to the background 'cell' and all points brighter than the mean of
all ROIs. After fitting, the smaller of the two means at every
point is taken to be the 'background'. This generally is very
close to the average of the entire frame, but is generally smaller
during full field events, because the larger gaussian 'sucks up'
the spurious bright pixels.
See getGMMBaselines() for more information.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = getGMMBaselines(traces) # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
def findEventsBackground(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using the background..
Here, we estimate the population baseline for all the cells as the
'background cell', or cell 0. It is generally a fair estimation
of the general response of the field of view, but is imperfect due
to segmentation errors.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = traces[:,0,:].copy() # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
#----------------------------------------EVENT UTILITY FUNCTIONS-----------------------------------
def getStartsAndStops(event_array):
"""This routine takes an event_array and returns the starting and
stopping times for all events in the array.
:param: event_array - 2d or 3d numpy event array (time x cells, or time x cells x trials))
:returns: masked numpy arrays, one for starting times and stopping times.
size is cells x max event number or cells x trials x max event number.
masked array is to account for the variable number of events in each cell
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
starts = np.zeros((cells, trials, int(max_num_events)))
stops = np.zeros((cells, trials, int(max_num_events)))
starts[:] = np.nan
stops[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
starts[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[0]
stops[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[-1]
starts = np.ma.array(starts, mask=np.isnan(starts))
starts = np.squeeze(starts)
stops = np.ma.array(stops, mask=np.isnan(stops))
stops = np.squeeze(stops)
return starts, stops
def getCounts(event_array, time_range=None):
"""This routine takes an event_array and optionally a time range
and returns the number of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 1d or 2d numpy array of counts (cells or cells x trials)
"""
if time_range is not None:
event_array = event_array[time_range[0]:time_range[1],:] # note that this works for 2 or 3d arrays...
if event_array.ndim is 2:
event_array = event_array[:,:,np.newaxis]
time, cells, trials = event_array.shape
counts = np.zeros((cells,trials))
for trial in range(trials):
for cell in range(cells):
counts[cell, trial] = np.unique(event_array[:,cell,trial]).size - 1
return np.squeeze(counts)
def getDurations(event_array, time_range=None):
"""This routine takes an event_array (time x cells) and returns
the duration of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event durations. size is cells x largest number of events.
masked entries are to account for variable number of events
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
durations = np.zeros((cells, trials, int(max_num_events)))
durations[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
durations[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).size
durations = np.ma.array(durations, mask=np.isnan(durations))
durations = np.squeeze(durations)
return durations
def getAvgAmplitudes(event_array, trace_array, time_range=None):
"""This routine takes an event_array (time x cells) and
corresponding trace array and returns the average amplitudes of
events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event average amplitudes. size is cells x largest number of events.
masked entries are account for variable number of events
"""
event_array = np.atleast_3d(event_array)
trace_array= np.atleast_3d(trace_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
amps = np.zeros((cells, trials, int(max_num_events)))
amps[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
amps[cell, trial, i] = trace_array[event_array == event_id].mean()
amps = np.ma.array(amps, mask=np.isnan(amps))
amps = np.squeeze(amps)
return np.ma.masked_array(amps, np.isnan(amps))
def getWeightedEvents(event_array, trace_array):
"""This routine takes an event array and corresponding trace array
and replaces the event labels with the average amplitude of the
event.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: trace_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:returns: 2d numpy array same shape and size of event_array, zero where there
weren't events, and the average event amplitude for the event otherwise.
"""
weighted_events = np.zeros_like(event_array, dtype=float)
for i in np.unique(event_array)[1:]:
weighted_events[event_array==i] = trace_array[event_array==i].mean()
return weighted_events
#----------------------------------------GMM UTILITY FUNCTIONS-----------------------------------
def fitGaussianMixture1D(data, n=2, set_mean_priors=True):
"""Routine for fitting a 1d array to a mixture of `n` gaussians.
if 'set_mean_priors' is True (the default), we initialize the GMM
model with means equal to the first point (the 'background' cell)
and all ROIs larger than the mean. Otherwise, we have random means.
After fitting, we return the means, stds, and weights of the GMM,
along with the BIC, AIC, and the model itself.
:param: data - 1d array of data to fit
:param: n - number of gaussians to fit, defaults to 2
:param: set_mean_priors - boolean, if true, initializes the means of a mixture of 2 gaussians
:returns: tuple of (means, stds, weights, BIC, AIC, GMM model object)
"""
if set_mean_priors:
g = GMM(n_components=n, init_params='wc', n_init=5)
g.means_ = np.zeros((n, 1))
g.means_[0,0] = data[0] # first datapoint is the background value... should be near 0.0
g.means_[1,0] = data[data > data[0]].mean()
else:
g = GMM(n_components=n, n_init=5)
g.fit(data)
return (np.squeeze(g.means_.flatten()),
np.squeeze(np.sqrt(g.covars_).flatten()),
np.squeeze(g.weights_).flatten(),
g.bic(data),
g.aic(data),
g)
def getGMMBaselines(traces):
"""Wrapper for fitGaussianMixture1D() for findEventsGMM().
:param: traces - 2 or 3d numpy array of dF/F (time x cells, or time x cells x trials)
:returns: 1 or 2d numpy array of estimated baseline (time or time x trials).
"""
traces = np.atleast_3d(traces) # time x cells x trials
time, cells, trials = traces.shape
gmmBaselines = np.zeros((time, trials)) # one baseline estimation for each trial
for trial in range(trials):
for frame in range(time):
means, stds, weights, bic, aic, model = fitGaussianMixture1D(traces[frame,:,trial], 2)
gmmBaselines[frame, trial] = means.min()
return gmmBaselines
#----------------------------------------DEPRECATED EVENT FINDING FUNCTIONS-----------------------------------
def findEventsAtThreshold(traces, stds, rising_threshold, falling_threshold=0.75, first_mode='rising', second_mode='falling', boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
Routine to find events based on the method in Dombeck et al., 2007.
Relies on the multi-dimensional findLevels function in traceRoutines.
Finds all two sets of points in `traces` that cross threshold multiples
of `stds`. The first_mode and second_mode parameters determine if the
crossings are rising, or falling. The trace is filtered with a flat
kernel of width `boxWidth` and successive crossings are paired. Any
crossings less that `distance_cutoff` apart are discarded.
This routine is called by findEventsDombeck().
:param: traces - 2 or 3d numpy array of dF/F traces (time x cells, or time x cells x trial)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: rising_threshold - float used for first crossings
:param: falling_threshold - float used for second crossings
:param: boxWidth - filter size
:param: distance_cutoff - eliminate crossings pairs closer than this- eliminates noise
:returns: 2d or 3d array same size and dimension as traces, labeled with event number
"""
# insure that we have at least one 'trial' dimension.
if traces.ndim == 2:
traces = np.atleast_3d(traces)
stds = np.atleast_2d(stds)
time, cells, trials = traces.shape
# normally tm.findLevels works with a single number, but if the shapes are right then it will broadcast correctly with a larger array
first_crossings = tm.findLevelsNd(traces, np.array(stds)*rising_threshold, mode=first_mode, axis=0, boxWidth=boxWidth)
second_crossings = tm.findLevelsNd(traces, np.array(stds)*falling_threshold, mode=second_mode, axis=0, boxWidth=boxWidth)
events = np.zeros_like(traces)
i=1
for cell in range(cells):
for trial in range(trials):
rising_event_locations = np.where(first_crossings[:,cell,trial])[0] # peel off the tuple
falling_event_locations = np.where(second_crossings[:,cell,trial])[0] # peel off the tuple
possible_pairs = []
for r in rising_event_locations:
if possible_pairs:
prev_rising = zip(*possible_pairs)[0]
prev_falling = zip(*possible_pairs)[1]
if r <= prev_falling[-1]:
continue
try:
f = falling_event_locations[np.searchsorted(falling_event_locations, r)]
possible_pairs.append([r,f])
except IndexError:
possible_pairs.append([r,time])
for pair in possible_pairs:
if pair[1]-pair[0] > distance_cutoff:
events[pair[0]:pair[1], cell, trial] = i
i = i+1
return np.squeeze(events)
def findEventsDombeck(traces, stds, false_positive_rate=0.05, lower_sigma=1, upper_sigma=5, boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
This routine uses findEventsAtThreshold() at a range of thresholds to
detect both postive and going events, and calculates a false positive
rate based on the percentage of total negative events
(see Dombeck et al. 2007). It then calculates the threshold closest to
the specificed false postive rate and returns that event array for
positive going events.
The falling value is hardcoded at 0.75 * std of baseline, as per Dombeck et al. 2007.
:param: traces - 2 or 3d numpy array of traces (time x cells or time x cells x trials)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: false_positive_rate - float value of desired false positive rate (0.05 = 5%)
:param: lower_sigma - starting point for scan
:param: upper_sigma - stopping point for scan
:param: boxWidth - window size for pre-smoothing
:param: distance_cutoff - minimum length of event
:returns: events array for traces at desired false positive rate
"""
all_events = []
for sigma in np.arange(lower_sigma, upper_sigma, 0.125):
pos_events = findEventsAtThreshold(traces, stds, sigma, 0.75, first_mode='rising', second_mode='falling', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
neg_events = findEventsAtThreshold(traces, stds, -sigma, -0.75, first_mode='falling', second_mode='rising', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
temp_false_positive_rate = neg_events.max() / (pos_events.max() + neg_events.max())
all_events.append((sigma, pos_events.max(), neg_events.max(), temp_false_positive_rate, pos_events, neg_events))
closest_to_false_pos = np.argmin(np.abs(np.array(zip(*all_events)[3])-false_positive_rate)) # get all false positive rates, find index closest to 0.05
print 'Using sigma cutoff of: ' + str(all_events[closest_to_false_pos][0]) # get the right sigma
return all_events[closest_to_false_pos][4] # pos events are 4th in tuple
|
mit
| 7,388,882,840,149,779,000
| 49.211416
| 179
| 0.666695
| false
| 3.80792
| false
| false
| false
|
romses/FitView
|
fitparse/records.py
|
1
|
11248
|
import math
import struct
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
from numbers import Number
class RecordBase(object):
# namedtuple-like base class. Subclasses should must __slots__
__slots__ = ()
# TODO: switch back to namedtuple, and don't use default arguments as None
# and see if that gives us any performance improvements
def __init__(self, *args, **kwargs):
# WARNING: use of map(None, l1, l2) equivalent to zip_longest in py3k
for slot_name, value in izip_longest(self.__slots__, args):
# map(None, self.__slots__, args):
setattr(self, slot_name, value)
for slot_name, value in kwargs.items():
setattr(self, slot_name, value)
class MessageHeader(RecordBase):
__slots__ = ('is_definition', 'local_mesg_num', 'time_offset')
def __repr__(self):
return '<MessageHeader: %s -- local mesg: #%d%s>' % (
'definition' if self.is_definition else 'data',
self.local_mesg_num,
', time offset: %d' % self.time_offset
if self.time_offset else '', )
class DefinitionMessage(RecordBase):
__slots__ = ('header', 'endian', 'mesg_type', 'mesg_num', 'field_defs')
type = 'definition'
@property
def name(self):
return self.mesg_type.name if self.mesg_type else 'unknown_%d' % self.mesg_num
def __repr__(self):
return '<DefinitionMessage: %s (#%d) -- local mesg: #%d, field defs: [%s]>' % (
self.name,
self.mesg_num,
self.header.local_mesg_num,
', '.join([fd.name for fd in self.field_defs]), )
class FieldDefinition(RecordBase):
__slots__ = ('field', 'def_num', 'base_type', 'size')
@property
def name(self):
return self.field.name if self.field else 'unknown_%d' % self.def_num
@property
def type(self):
return self.field.type if self.field else self.base_type
def __repr__(self):
return '<FieldDefinition: %s (#%d) -- type: %s (%s), size: %d byte%s>' % (
self.name,
self.def_num,
self.type.name,
self.base_type.name,
self.size,
's' if self.size != 1 else '', )
class DataMessage(RecordBase):
__slots__ = ('header', 'def_mesg', 'fields')
type = 'data'
def get(self, field_name, as_dict=False):
# SIMPLIFY: get rid of as_dict
for field_data in self.fields:
if field_data.is_named(field_name):
return field_data.as_dict() if as_dict else field_data
def get_value(self, field_name):
# SIMPLIFY: get rid of this completely
field_data = self.get(field_name)
if field_data:
return field_data.value
def get_values(self):
# SIMPLIFY: get rid of this completely
return dict((f.name if f.name else f.def_num, f.value)
for f in self.fields)
@property
def name(self):
return self.def_mesg.name
@property
def mesg_num(self):
# SIMPLIFY: get rid of this
return self.def_mesg.mesg_num
@property
def mesg_type(self):
# SIMPLIFY: get rid of this
return self.def_mesg.mesg_type
def as_dict(self):
# TODO: rethink this format
return {
'name': self.name,
'fields': [f.as_dict() for f in self.fields],
}
def __iter__(self):
# Sort by whether this is a known field, then its name
return iter(
sorted(
self.fields, key=lambda fd: (int(fd.field is None), fd.name)))
def __repr__(self):
return '<DataMessage: %s (#%d) -- local mesg: #%d, fields: [%s]>' % (
self.name,
self.mesg_num,
self.header.local_mesg_num,
', '.join(
["%s: %s" % (fd.name, fd.value) for fd in self.fields]), )
def __str__(self):
# SIMPLIFY: get rid of this
return '%s (#%d)' % (self.name, self.mesg_num)
class FieldData(RecordBase):
__slots__ = ('field_def', 'field', 'parent_field', 'value', 'raw_value',
'units')
def __init__(self, *args, **kwargs):
super(FieldData, self).__init__(self, *args, **kwargs)
if not self.units and self.field:
# Default to units on field, otherwise None.
# NOTE:Not a property since you may want to override this in a data processor
self.units = self.field.units
@property
def name(self):
return self.field.name if self.field else 'unknown_%d' % self.def_num
# TODO: Some notion of flags
def is_named(self, name):
if self.field:
if name in (self.field.name, self.field.def_num):
return True
if self.parent_field:
if name in (self.parent_field.name, self.parent_field.def_num):
return True
if self.field_def:
if name == self.field_def.def_num:
return True
return False
@property
def def_num(self):
# Prefer to return the def_num on the field
# since field_def may be None if this field is dynamic
return self.field.def_num if self.field else self.field_def.def_num
@property
def base_type(self):
# Try field_def's base type, if it doesn't exist, this is a
# dynamically added field, so field doesn't be None
return self.field_def.base_type if self.field_def else self.field.base_type
@property
def is_base_type(self):
return self.field.is_base_type if self.field else True
@property
def type(self):
return self.field.type if self.field else self.base_type
@property
def field_type(self):
return self.field.field_type if self.field else 'field'
def as_dict(self):
return {
'name': self.name,
'def_num': self.def_num,
'base_type': self.base_type.name,
'type': self.type.name,
'units': self.units,
'value': self.value,
'raw_value': self.raw_value,
}
def __repr__(self):
return '<FieldData: %s: %s%s, def num: %d, type: %s (%s), raw value: %s>' % (
self.name,
self.value,
' [%s]' % self.units if self.units else '',
self.def_num,
self.type.name,
self.base_type.name,
self.raw_value, )
def __str__(self):
return '%s: %s%s' % (
self.name,
self.value,
' [%s]' % self.units if self.units else '', )
class BaseType(RecordBase):
__slots__ = ('name', 'identifier', 'fmt', 'parse')
values = None # In case we're treated as a FieldType
@property
def size(self):
return struct.calcsize(self.fmt)
@property
def type_num(self):
return self.identifier & 0x1F
def __repr__(self):
return '<BaseType: %s (#%d [0x%X])>' % (
self.name,
self.type_num,
self.identifier, )
class FieldType(RecordBase):
__slots__ = ('name', 'base_type', 'values')
def __repr__(self):
return '<FieldType: %s (%s)>' % (self.name, self.base_type)
class MessageType(RecordBase):
__slots__ = ('name', 'mesg_num', 'fields')
def __repr__(self):
return '<MessageType: %s (#%d)>' % (self.name, self.mesg_num)
class FieldAndSubFieldBase(RecordBase):
__slots__ = ()
@property
def base_type(self):
return self.type if self.is_base_type else self.type.base_type
@property
def is_base_type(self):
return isinstance(self.type, BaseType)
def render(self, raw_value):
if self.type.values and (raw_value in self.type.values):
return self.type.values[raw_value]
return raw_value
class Field(FieldAndSubFieldBase):
__slots__ = ('name', 'type', 'def_num', 'scale', 'offset', 'units',
'components', 'subfields')
field_type = 'field'
class SubField(FieldAndSubFieldBase):
__slots__ = ('name', 'def_num', 'type', 'scale', 'offset', 'units',
'components', 'ref_fields')
field_type = 'subfield'
class ReferenceField(RecordBase):
__slots__ = ('name', 'def_num', 'value', 'raw_value')
class ComponentField(RecordBase):
__slots__ = ('name', 'def_num', 'scale', 'offset', 'units', 'accumulate',
'bits', 'bit_offset')
field_type = 'component'
def render(self, raw_value):
if raw_value is None:
return None
# If it's a tuple, then it's a byte array and unpack it as such
# (only type that uses this is compressed speed/distance)
if isinstance(raw_value, tuple):
unpacked_num = 0
# Unpack byte array as little endian
for value in reversed(raw_value):
unpacked_num = (unpacked_num << 8) + value
raw_value = unpacked_num
# Mask and shift like a normal number
if isinstance(raw_value, Number):
raw_value = (raw_value >> self.bit_offset) & ((1 << self.bits) - 1)
return raw_value
# The default base type
BASE_TYPE_BYTE = BaseType(
name='byte',
identifier=0x0D,
fmt='B',
parse=lambda x: None if all(b == 0xFF for b in x) else x)
BASE_TYPES = {
0x00: BaseType(
name='enum',
identifier=0x00,
fmt='B',
parse=lambda x: None if x == 0xFF else x),
0x01: BaseType(
name='sint8',
identifier=0x01,
fmt='b',
parse=lambda x: None if x == 0x7F else x),
0x02: BaseType(
name='uint8',
identifier=0x02,
fmt='B',
parse=lambda x: None if x == 0xFF else x),
0x83: BaseType(
name='sint16',
identifier=0x83,
fmt='h',
parse=lambda x: None if x == 0x7FFF else x),
0x84: BaseType(
name='uint16',
identifier=0x84,
fmt='H',
parse=lambda x: None if x == 0xFFFF else x),
0x85: BaseType(
name='sint32',
identifier=0x85,
fmt='i',
parse=lambda x: None if x == 0x7FFFFFFF else x),
0x86: BaseType(
name='uint32',
identifier=0x86,
fmt='I',
parse=lambda x: None if x == 0xFFFFFFFF else x),
0x07: BaseType(
name='string',
identifier=0x07,
fmt='s',
parse=lambda x: x.split(b'\x00')[0] or None),
0x88: BaseType(
name='float32',
identifier=0x88,
fmt='f',
parse=lambda x: None if math.isnan(x) else x),
0x89: BaseType(
name='float64',
identifier=0x89,
fmt='d',
parse=lambda x: None if math.isnan(x) else x),
0x0A: BaseType(
name='uint8z',
identifier=0x0A,
fmt='B',
parse=lambda x: None if x == 0x0 else x),
0x8B: BaseType(
name='uint16z',
identifier=0x8B,
fmt='H',
parse=lambda x: None if x == 0x0 else x),
0x8C: BaseType(
name='uint32z',
identifier=0x8C,
fmt='I',
parse=lambda x: None if x == 0x0 else x),
0x0D: BASE_TYPE_BYTE,
}
|
bsd-3-clause
| -8,365,860,220,540,263,000
| 28.291667
| 89
| 0.55192
| false
| 3.534884
| false
| false
| false
|
crossgovernmentservices/csd-notes
|
app/blueprints/sso/views.py
|
1
|
1740
|
# -*- coding: utf-8 -*-
"""
Single Sign-On views
"""
from urllib.parse import unquote, urlparse, urlunparse
from flask import (
Blueprint,
redirect,
request,
session,
url_for
)
from flask_security.utils import login_user, logout_user
from app.extensions import (
user_datastore,
oidc
)
sso = Blueprint('sso', __name__)
def sanitize_url(url):
if url:
parts = list(urlparse(url))
parts[0] = ''
parts[1] = ''
parts[3] = ''
url = urlunparse(parts[:6])
return url
@sso.route('/login')
def login():
"login redirects to Dex for SSO login/registration"
next_url = sanitize_url(unquote(request.args.get('next', '')))
if next_url:
session['next_url'] = next_url
return redirect(oidc.login('dex'))
@sso.route('/logout')
def logout():
logout_user()
return redirect(url_for('base.index'))
@sso.route('/callback')
@oidc.callback
def oidc_callback():
user_info = oidc.authenticate('dex', request)
user = user_datastore.get_user(user_info['email'])
if not user:
user = create_user(user_info)
login_user(user)
next_url = url_for('base.index')
if 'next_url' in session:
next_url = session['next_url']
del session['next_url']
return redirect(next_url)
def create_user(user_info):
email = user_info['email']
name = user_info.get('nickname', user_info.get('name'))
user = add_role('USER', user_datastore.create_user(
email=email,
full_name=name))
user_datastore.commit()
return user
def add_role(role, user):
user_role = user_datastore.find_or_create_role(role)
user_datastore.add_role_to_user(user, user_role)
return user
|
mit
| 8,933,051,197,988,040,000
| 17.913043
| 66
| 0.616667
| false
| 3.359073
| false
| false
| false
|
CFIS-Octarine/octarine
|
planning/ObsStatus.py
|
1
|
9227
|
from __future__ import absolute_import
import argparse
import logging
import math
import sys
import tempfile
import time
import ephem
import matplotlib
import requests
from astropy.io.votable import parse
matplotlib.use('Agg')
from matplotlib.pyplot import figure, close
from matplotlib.patches import Rectangle
from matplotlib.backends.backend_pdf import PdfPages
from src.daomop import (storage)
from src.planning import parameters
dbimages = 'vos:jkavelaars/CFIS/dbimages'
storage.DBIMAGES = dbimages
parameters.RUNIDS = ['17AP30', '17AP31']
def query_for_observations(mjd, observable, runids):
"""Do a QUERY on the TAP service for all observations that are part of runid,
where taken after mjd and have calibration 'observable'.
Schema is at: http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/tables
mjd : float
observable: str ( 2 or 1 )
runid: tuple eg. ('13AP05', '13AP06')
"""
data = {"QUERY": ("SELECT Observation.target_name as TargetName, "
"COORD1(CENTROID(Plane.position_bounds)) AS RA,"
"COORD2(CENTROID(Plane.position_bounds)) AS DEC, "
"Plane.time_bounds_lower AS StartDate, "
"Plane.time_exposure AS ExposureTime, "
"Observation.instrument_name AS Instrument, "
"Plane.energy_bandpassName AS Filter, "
"Observation.observationID AS dataset_name, "
"Observation.proposal_id AS ProposalID, "
"Observation.proposal_pi AS PI "
"FROM caom2.Observation AS Observation "
"JOIN caom2.Plane AS Plane ON "
"Observation.obsID = Plane.obsID "
"WHERE ( Observation.collection = 'CFHT' ) "
"AND Plane.time_bounds_lower > %d "
"AND Plane.calibrationLevel=%s "
"AND Observation.proposal_id IN %s " ) %
( mjd, observable, str(runids)),
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
result = requests.get(storage.TAP_WEB_SERVICE, params=data, verify=False)
assert isinstance(result, requests.Response)
logging.debug("Doing TAP Query using url: %s" % (str(result.url)))
tmpFile = tempfile.NamedTemporaryFile()
with open(tmpFile.name, 'w') as outfile:
outfile.write(result.text)
try:
vot = parse(tmpFile.name).get_first_table()
except:
print result.text
raise
vot.array.sort(order='StartDate')
t = vot.array
tmpFile.close()
logging.debug("Got {} lines from tap query".format(len(t)))
return t
def create_ascii_table(obsTable, outfile):
"""Given a table of observations create an ascii log file for easy parsing.
Store the result in outfile (could/should be a vospace dataNode)
obsTable: astropy.votable.array object
outfile: str (target_name of the vospace dataNode to store the result to)
"""
logging.info("writing text log to %s" % ( outfile))
stamp = "#\n# Last Updated: " + time.asctime() + "\n#\n"
header = "| %20s | %20s | %20s | %20s | %20s | %20s | %20s |\n" % (
"EXPNUM", "OBS-DATE", "FIELD", "EXPTIME(s)", "RA", "DEC", "RUNID")
bar = "=" * (len(header) - 1) + "\n"
if outfile[0:4] == "vos:":
tmpFile = tempfile.NamedTemporaryFile(suffix='.txt')
fout = tmpFile
else:
fout = open(outfile, 'w')
t2 = None
fout.write(bar + stamp + bar + header)
populated = storage.list_dbimages(dbimages=dbimages)
for i in range(len(obsTable) - 1, -1, -1):
row = obsTable.data[i]
if row['dataset_name'] not in populated:
storage.populate(row['dataset_name'])
sDate = str(ephem.date(row.StartDate +
2400000.5 -
ephem.julian_date(ephem.date(0))))[:20]
t1 = time.strptime(sDate, "%Y/%m/%d %H:%M:%S")
if t2 is None or math.fabs(time.mktime(t2) - time.mktime(t1)) > 3 * 3600.0:
fout.write(bar)
t2 = t1
ra = str(ephem.hours(math.radians(row.RA)))
dec = str(ephem.degrees(math.radians(row.DEC)))
line = "| %20s | %20s | %20s | %20.1f | %20s | %20s | %20s |\n" % (
str(row.dataset_name),
str(ephem.date(row.StartDate + 2400000.5 -
ephem.julian_date(ephem.date(0))))[:20],
row.TargetName[:20],
row.ExposureTime, ra[:20], dec[:20], row.ProposalID[:20] )
fout.write(line)
fout.write(bar)
if outfile[0:4] == "vos:":
fout.flush()
storage.copy(tmpFile.name, outfile)
fout.close()
return
def create_sky_plot(obstable, outfile, night_count=1, stack=True):
"""Given a VOTable that describes the observation coverage provide a PDF of the skycoverge.
obstable: vostable.arrary
stack: BOOL (true: stack all the observations in a series of plots)
"""
# camera dimensions
width = 0.98
height = 0.98
if outfile[0:4] == 'vos:':
tmpFile = tempfile.NamedTemporaryFile(suffix='.pdf')
pdf = PdfPages(tmpFile.name)
else:
pdf = PdfPages(outfile)
saturn = ephem.Saturn()
uranus = ephem.Uranus()
t2 = None
fig = None
proposalID = None
limits = {'13A': ( 245, 200, -20, 0),
'13B': ( 0, 45, 0, 20)}
for row in reversed(obstable.data):
date = ephem.date(row.StartDate + 2400000.5 - ephem.julian_date(ephem.date(0)))
sDate = str(date)
# Saturn only a problem in 2013A fields
saturn.compute(date)
sra = math.degrees(saturn.ra)
sdec = math.degrees(saturn.dec)
uranus.compute(date)
ura = math.degrees(uranus.ra)
udec = math.degrees(uranus.dec)
t1 = time.strptime(sDate, "%Y/%m/%d %H:%M:%S")
if t2 is None or (math.fabs(time.mktime(t2) - time.mktime(
t1)) > 3 * 3600.0 and opt.stack) or proposalID is None or proposalID != row.ProposalID:
if fig is not None:
pdf.savefig()
close()
proposalID = row.ProposalID
fig = figure(figsize=(7, 2))
ax = fig.add_subplot(111, aspect='equal')
ax.set_title("Data taken on %s-%s-%s" % ( t1.tm_year, t1.tm_mon, t1.tm_mday), fontdict={'fontsize': 8})
ax.axis(limits.get(row.ProposalID[0:3], (0, 20, 0, 20))) # appropriate only for 2013A fields
ax.grid()
ax.set_xlabel("RA (deg)", fontdict={'fontsize': 8})
ax.set_ylabel("DEC (deg)", fontdict={'fontsize': 8})
t2 = t1
ra = row.RA - width / 2.0
dec = row.DEC - height / 2.0
color = 'b'
if 'W' in row['TargetName']:
color = 'g'
ax.add_artist(Rectangle(xy=(ra, dec), height=height, width=width,
edgecolor=color, facecolor=color,
lw=0.5, fill='g', alpha=0.33))
ax.add_artist(Rectangle(xy=(sra, sdec), height=0.3, width=0.3,
edgecolor='r',
facecolor='r',
lw=0.5, fill='k', alpha=0.33))
ax.add_artist(Rectangle(xy=(ura, udec), height=0.3, width=0.3,
edgecolor='b',
facecolor='b',
lw=0.5, fill='b', alpha=0.33))
if ax is not None:
ax.axis((270, 215, -20, 0))
pdf.savefig()
close()
pdf.close()
if outfile[0:4] == "vos:":
storage.copy(tmpFile.name, outfile)
tmpFile.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Query the CADC for OSSOS observations.")
parser.add_argument('date', nargs='?', action='store',
default=parameters.SURVEY_START)
parser.add_argument('--runid', nargs='*', action='store',
default=parameters.RUNIDS)
parser.add_argument('--cal', action='store', default=1)
parser.add_argument('--outfile', action='store',
default='vos:OSSOS/ObservingStatus/obsList')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--stack', action='store_true', default=False,
help=( "Make single status plot that stacks"
" data accross multiple nights, instead of nightly sub-plots." ))
opt = parser.parse_args()
runids = tuple(opt.runid)
if opt.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
try:
mjd_yesterday = ephem.date(ephem.julian_date(ephem.date(opt.date))) - 2400000.5
except Exception as e:
logging.error("you said date = %s" % (opt.date))
logging.error(str(e))
sys.exit(-1)
obs_table = query_for_observations(mjd_yesterday, opt.cal, runids)
create_ascii_table(obs_table, opt.outfile + ".txt")
# create_sky_plot(obs_table, opt.outfile + ".pdf", stack=opt.stack)
|
gpl-3.0
| -8,040,165,882,080,599,000
| 34.08365
| 115
| 0.563672
| false
| 3.491109
| false
| false
| false
|
enthought/uchicago-pyanno
|
pyanno/ui/annotations_view.py
|
1
|
9266
|
# Copyright (c) 2011, Enthought, Ltd.
# Author: Pietro Berkes <pberkes@enthought.com>
# License: Modified BSD license (2-clause)
from traits.has_traits import HasTraits, on_trait_change
from traits.trait_numeric import Array
from traits.trait_types import (Instance, Int, ListFloat, Button, Event, File,
Any)
from traits.traits import Property
from traitsui.api import View, VGroup
from traitsui.editors.file_editor import FileEditor
from traitsui.editors.range_editor import RangeEditor
from traitsui.editors.tabular_editor import TabularEditor
from traitsui.group import HGroup, VGrid, Group
from traitsui.handler import ModelView
from traitsui.item import Item, Spring, Label
from traitsui.menu import OKCancelButtons
from pyanno.annotations import AnnotationsContainer
from pyanno.ui.appbase.wx_utils import is_display_small
from pyanno.ui.arrayview import Array2DAdapter
from pyanno.plots.hinton_plot import HintonDiagramPlot
from pyanno.util import labels_frequency, MISSING_VALUE, PyannoValueError
import numpy as np
import logging
logger = logging.getLogger(__name__)
WIDTH_CELL = 60
MAX_WIDTH = 1000
W_MARGIN = 150
class DataView(HasTraits):
data = Array(dtype=object)
def traits_view(self):
ncolumns = len(self.data[0])
w_table = min(WIDTH_CELL * ncolumns, MAX_WIDTH)
w_view = min(w_table + W_MARGIN, MAX_WIDTH)
return View(
Group(
Item('data',
editor=TabularEditor
(
adapter=Array2DAdapter(ncolumns=ncolumns,
format='%s',
show_index=True)),
show_label=False,
width=w_table,
padding=10),
),
title='Annotations',
width=w_view,
height=800,
resizable=True,
buttons=OKCancelButtons
)
class AnnotationsView(ModelView):
""" Traits UI Model/View for annotations."""
# reference to main application
application = Any
### Model-related traits ###
# container for annotations and their metadata
annotations_container = Instance(AnnotationsContainer)
# this can be set by the current model (could be different from the
# number of classes in the annotations themselves)
nclasses = Int(1)
frequency = ListFloat
@on_trait_change('annotations_container,annotations_updated,nclasses')
def _update_frequency(self):
nclasses = max(self.nclasses, self.annotations_container.nclasses)
try:
frequency = labels_frequency(
self.annotations_container.annotations,
nclasses).tolist()
except PyannoValueError as e:
logger.debug(e)
frequency = np.zeros((nclasses,)).tolist()
self.frequency = frequency
self.frequency_plot = HintonDiagramPlot(
data=self.frequency,
title='Observed label frequencies')
### Traits UI definitions ###
# event raised when annotations are updated
annotations_updated = Event
## frequency plot definition
frequency_plot = Instance(HintonDiagramPlot)
## edit data button opens annotations editor
edit_data = Button(label='Edit annotations...')
# save current annotations
save_data = Button(label='Save annotations...')
def _edit_data_fired(self):
data_view = DataView(data=self.annotations_container.raw_annotations)
data_view.edit_traits(kind='livemodal', parent=self.info.ui.control)
self.annotations_container = AnnotationsContainer.from_array(
data_view.data,
name = self.annotations_container.name
)
if self.application is not None:
self.application.main_window.set_annotations(
self.annotations_container)
def _save_data_fired(self):
save_filename = SaveAnnotationsDialog.open()
if save_filename is not None:
self.annotations_container.save_to(save_filename, set_name=True)
if self.application is not None:
self.application.main_window.set_annotations(
self.annotations_container)
### View definition ###
_name = Property
def _get__name(self):
return self.annotations_container.name
_nitems = Property
def _get__nitems(self):
return self.annotations_container.nitems
_nclasses = Property
def _get__nclasses(self):
return self.annotations_container.nclasses
_labels = Property
def _get__labels(self):
return str(self.annotations_container.labels)
_nannotators = Property
def _get__nannotators(self):
return str(self.annotations_container.nannotators)
def traits_view(self):
if is_display_small():
w_view = 350
else:
w_view = 450
info_group = VGroup(
Item('_name',
label='Annotations name:',
style='readonly',
padding=0),
VGrid(
Item('_nclasses',
label='Number of classes:',
style='readonly',
width=10),
Item('_labels',
label='Labels:',
style='readonly'),
Item('_nannotators',
label='Number of annotators:',
style='readonly', width=10),
Item('_nitems',
label='Number of items:',
style='readonly'),
padding=0
),
padding=0
)
body = VGroup(
info_group,
Item('_'),
HGroup(
VGroup(
Spring(),
Item('frequency_plot',
style='custom',
resizable=False,
show_label=False,
width=w_view
),
Spring()
),
Spring(),
VGroup(
Spring(),
Item('edit_data',
enabled_when='annotations_are_defined',
show_label=False),
Item('save_data',
enabled_when='annotations_are_defined',
show_label=False),
Spring()
)
),
Spring(),
Item('_'),
)
traits_view = View(body)
return traits_view
class SaveAnnotationsDialog(HasTraits):
filename = File
def _filename_default(self):
import os
home = os.getenv('HOME') or os.getenv('HOMEPATH')
return os.path.join(home, 'annotations.txt')
@staticmethod
def open():
dialog = SaveAnnotationsDialog()
dialog_ui = dialog.edit_traits(kind='modal')
if dialog_ui.result:
# user presser 'OK'
return dialog.filename
else:
return None
traits_view = View(
Item('filename', label='Save to:',
editor=FileEditor(allow_dir=False,
dialog_style='save',
entries=0),
style='simple'
),
width = 400,
resizable = True,
buttons = ['OK', 'Cancel']
)
class CreateNewAnnotationsDialog(HasTraits):
nannotators = Int(8)
nitems = Int(100)
@staticmethod
def create_annotations_dialog():
dialog = CreateNewAnnotationsDialog()
dialog_ui = dialog.edit_traits(kind='modal')
if dialog_ui.result:
# user pressed 'Ok'
annotations = np.empty((dialog.nitems, dialog.nannotators),
dtype=int)
annotations.fill(MISSING_VALUE)
return annotations
else:
return None
def traits_view(self):
view = View(
VGroup(
Item(
'nannotators',
editor=RangeEditor(mode='spinner', low=3, high=1000),
label='Number of annotators:'
),
Item(
'nitems',
editor=RangeEditor(mode='spinner', low=2, high=1000000),
label='Number of items'
),
),
buttons = ['OK', 'Cancel']
)
return view
#### Testing and debugging ####################################################
def main():
""" Entry point for standalone testing/debugging. """
from pyanno.modelBt_loopdesign import ModelBtLoopDesign
model = ModelBtLoopDesign.create_initial_state(5)
annotations = model.generate_annotations(2)
anno = AnnotationsContainer.from_array(annotations, name='blah')
model_view = AnnotationsView(annotations_container=anno, model=HasTraits())
model_view.configure_traits()
return model, annotations, model_view
if __name__ == '__main__':
m, a, mv = main()
|
bsd-2-clause
| -7,870,631,480,164,798,000
| 29.281046
| 79
| 0.54878
| false
| 4.548846
| false
| false
| false
|
try-dash-now/idash
|
lib/winTelnet.py
|
1
|
17448
|
__author__ = 'Sean Yu'
'''created @2015/9/14'''
'''a windows telnet session'''
from telnetlib import Telnet as spawn
import socket
import select
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry dutinal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # dutinal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # dutinal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # dutinal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
from dut import dut
import threading
import os
import time
import re
class winTelnet(dut, object):#, spawn
def __del__(self):
self.SessionAlive= False
time.sleep(0.1)
if self.sock:
self.write('exit')
self.write('exit')
self.write('exit')
self.send(']',Ctrl=True)
self.write('quit')
#self.sock.close()
def __init__(self, name, attr =None,logger=None, logpath= None, shareData=None):
dut.__init__(self, name,attr,logger, logpath , shareData)
try:
host=""
port=23
reHostOnly= re.compile('\s*telnet\s+([\d.\w\-_]+)\s*',re.I)
reHostPort = re.compile('\s*telnet\s+([\d.\w]+)\s+(\d+)', re.I )
command = self.attribute.get('CMD')
m1=re.match(reHostOnly, command)
m2=re.match(reHostPort, command)
if m2:
host= m2.group(1)
port= int(m2.group(2))
elif m1:
host= m1.group(1)
#import socket
#timeout = 30
#self.sock = socket.create_connection((host, port), timeout)
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
timeout=0.5
self.timeout = timeout
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
self._has_poll = hasattr(select, 'poll')
if host is not None and self.is_simulation() == False:
self.open(str(host), port, timeout)
th =threading.Thread(target=self.ReadOutput)
th.start()
time.sleep(1)
if self.attribute.has_key('LOGIN'):
self.login()
self.debuglevel=0
except Exception as e:
self.closeSession()
import traceback
print(traceback.format_exc())
raise e
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
buffer =buffer.encode(encoding='utf-8')
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
if self.sock:
self.sock.sendall(buffer)
try:
super(winTelnet, self).write()
except:
pass
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print ('Telnet(%s,%s):' % (self.host, self.port),)
if args:
print (msg % args)
else:
print (msg)
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.sock==0 or self.sock==None:
return
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + self.removeSpecChar(buf[0])
self.sbdataq = self.sbdataq + buf[1]
def removeSpecChar(self, inputString):
#^@ \x00 \000 0
#^A \x01 \001 1
#^B \x02 \002 2
#^C \x03 \003 3
#^D \x04 \004 4
#^E \x05 \005 5
#^F \x06 \006 6
#^G \x07 \007 7
#^H \x08 \010 8
#^I \x09 \011 9
#^J \x0a \012 10
#^K \x0b \013 11
#^L \x0c \014 12
#^M \x0d \015 13
#^N \x0e \016 14
#^O \x0f \017 15
#^P \x10 \020 16
#^Q \x11 \021 17
#^R \x12 \022 18
#^S \x13 \023 19
#^T \x14 \024 20
#^U \x15 \025 21
#^V \x16 \026 22
#^W \x17 \027 23
#^X \x18 \030 24
#^Y \x19 \031 25
#^Z \x1a \032 26
#^[ \x1b \033 27
#^\ \x1c \034 28
#^] \x1d \035 29
#^^ \x1e \036 30
inputString = inputString.replace(chr(0x08), '')
inputString = inputString.replace(chr(0x03), '^C')
inputString = inputString.replace(chr(0x04), '^D')
inputString = inputString.replace(chr(0x18), '^X')
return inputString
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
if self.is_simulation():
return
else:
self.sock = socket.create_connection((host, port), timeout)
def ReadOutput(self):
maxInterval = 60
if self.timestampCmd ==None:
self.timestampCmd= time.time()
fail_counter = 0
while self.SessionAlive:
try:
#if not self.sock:
# self.relogin()
if self.is_simulation():
if self.get_search_buffer()=='':
self.cookedq = self.fake_in.pop()
else:
if self.sock:
#self.info('time in ReadOutput',time.time(), 'timestampCmd', self.timestampCmd, 'max interval', maxInterval, 'delta', time.time()-self.timestampCmd)
if (time.time()-self.timestampCmd)>maxInterval:
self.write('\r\n')
self.timestampCmd = time.time()
#self.info('anti-idle', fail_counter )
else:
raise Exception('[Errno 10053] An established connection was aborted by the software in your host machine')
self.fill_rawq()
self.cookedq=''
self.process_rawq()
self.checkLine(self.cookedq)
self.lockStreamOut.acquire()
self.streamOut+=self.cookedq
self.lockStreamOut.release()
if self.logfile and self.cookedq.__len__()!=0:
self.logfile.write(self.cookedq)
self.logfile.flush()
#if fail_counter:
# self.info(fail_counter, 'time out error cleared')
fail_counter = 0
except KeyboardInterrupt:
break
except Exception as e:
if self.loginDone:
fail_counter+=1
if self.debuglevel and fail_counter%10==0:
print('\n%s Exception %d:'%(self.name, fail_counter)+e.__str__()+'\n')
if str(e).find('timed out')==-1:
self.error('fail_counter', fail_counter, 'max_output_time_out',self.max_output_time_out, e)
try:
if self.sock:
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
self.open(self.host,self.port,self.timeout)
if self.autoReloginFlag:
fail_counter = 0
th =threading.Thread(target=self.relogin)
th.start()
except Exception as e:
self.error('\n%s Exception: %d:'%(self.name, fail_counter)+e.__str__()+'\n')
if str(e) =='[Errno 10053] An established connection was aborted by the software in your host machine' or '[Errno 9] Bad file descriptor'==str(e) or str(e) =='[Errno 10054] An existing connection was forcibly closed by the remote host':
break
time.sleep(0.2)
self.closeSession()
def closeSession(self):
print('\nquit %s'%self.name)
self.SessionAlive = False
try:
for i in xrange(1,3,1):
self.send('exit')
self.sleep(0.5)
self.send(']',Ctrl=True)
self.send('quit')
self.logfile.flush()
except:
pass
def show(self):
'''return the delta of streamOut from last call of function Print,
and move idxUpdate to end of streamOut'''
newIndex = self.streamOut.__len__()
result = self.streamOut[self.idxUpdate : newIndex+1]
self.idxUpdate= newIndex
#print('print::%d'%result.__len__())
if result!='':
result= self.colorString(result)
print('\t%s'%(result.replace('\n', '\n\t')))
return result
def relogin(self, retry=1):
#time.sleep(3)
tmp_retry = 0
while tmp_retry< retry:
tmp_retry+=1
self.lockRelogin.acquire()
try:
if self.counterRelogin>0:
self.lockRelogin.release()
return
self.counterRelogin+=1
self.loginDone=False
if self.sock:
self.write('quit\n\r\n')
for i in range(0,3):
self.write('exit')
self.send(']',Ctrl=True)
self.send('quit')
self.send(']',Ctrl=True)
self.send('e')
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
self.info('retry login: %d/%d'%(tmp_retry,retry))
self.open(self.host,self.port,self.timeout)
import time
time.sleep(1)
self.login()
self.counterRelogin-=1
self.loginDone=True
break
except Exception as e:
self.counterRelogin-=1
self.lockRelogin.release()
if tmp_retry>retry:
raise e
else:
self.sleep(5)
self.lockRelogin.release()
|
mit
| 7,032,868,279,089,488,000
| 33.414201
| 256
| 0.504814
| false
| 3.746618
| false
| false
| false
|
ValvePython/steam
|
steam/core/crypto.py
|
1
|
3356
|
"""
All function in this module take and return :class:`bytes`
"""
import sys
from os import urandom as random_bytes
from struct import pack
from base64 import b64decode
from Cryptodome.Hash import MD5, SHA1, HMAC
from Cryptodome.PublicKey.RSA import import_key as rsa_import_key, construct as rsa_construct
from Cryptodome.Cipher import PKCS1_OAEP, PKCS1_v1_5
from Cryptodome.Cipher import AES as AES
class UniverseKey(object):
"""Public keys for Universes"""
Public = rsa_import_key(b64decode("""
MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDf7BrWLBBmLBc1OhSwfFkRf53T
2Ct64+AVzRkeRuh7h3SiGEYxqQMUeYKO6UWiSRKpI2hzic9pobFhRr3Bvr/WARvY
gdTckPv+T1JzZsuVcNfFjrocejN1oWI0Rrtgt4Bo+hOneoo3S57G9F1fOpn5nsQ6
6WOiu4gZKODnFMBCiQIBEQ==
"""))
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * pack('B', BS - len(s) % BS)
if sys.version_info < (3,):
unpad = lambda s: s[0:-ord(s[-1])]
else:
unpad = lambda s: s[0:-s[-1]]
def generate_session_key(hmac_secret=b''):
"""
:param hmac_secret: optional HMAC
:type hmac_secret: :class:`bytes`
:return: (session_key, encrypted_session_key) tuple
:rtype: :class:`tuple`
"""
session_key = random_bytes(32)
encrypted_session_key = PKCS1_OAEP.new(UniverseKey.Public, SHA1)\
.encrypt(session_key + hmac_secret)
return (session_key, encrypted_session_key)
def symmetric_encrypt(message, key):
iv = random_bytes(BS)
return symmetric_encrypt_with_iv(message, key, iv)
def symmetric_encrypt_ecb(message, key):
return AES.new(key, AES.MODE_ECB).encrypt(pad(message))
def symmetric_encrypt_HMAC(message, key, hmac_secret):
prefix = random_bytes(3)
hmac = hmac_sha1(hmac_secret, prefix + message)
iv = hmac[:13] + prefix
return symmetric_encrypt_with_iv(message, key, iv)
def symmetric_encrypt_iv(iv, key):
return AES.new(key, AES.MODE_ECB).encrypt(iv)
def symmetric_encrypt_with_iv(message, key, iv):
encrypted_iv = symmetric_encrypt_iv(iv, key)
cyphertext = AES.new(key, AES.MODE_CBC, iv).encrypt(pad(message))
return encrypted_iv + cyphertext
def symmetric_decrypt(cyphertext, key):
iv = symmetric_decrypt_iv(cyphertext, key)
return symmetric_decrypt_with_iv(cyphertext, key, iv)
def symmetric_decrypt_ecb(cyphertext, key):
return unpad(AES.new(key, AES.MODE_ECB).decrypt(cyphertext))
def symmetric_decrypt_HMAC(cyphertext, key, hmac_secret):
""":raises: :class:`RuntimeError` when HMAC verification fails"""
iv = symmetric_decrypt_iv(cyphertext, key)
message = symmetric_decrypt_with_iv(cyphertext, key, iv)
hmac = hmac_sha1(hmac_secret, iv[-3:] + message)
if iv[:13] != hmac[:13]:
raise RuntimeError("Unable to decrypt message. HMAC does not match.")
return message
def symmetric_decrypt_iv(cyphertext, key):
return AES.new(key, AES.MODE_ECB).decrypt(cyphertext[:BS])
def symmetric_decrypt_with_iv(cyphertext, key, iv):
return unpad(AES.new(key, AES.MODE_CBC, iv).decrypt(cyphertext[BS:]))
def hmac_sha1(secret, data):
return HMAC.new(secret, data, SHA1).digest()
def sha1_hash(data):
return SHA1.new(data).digest()
def md5_hash(data):
return MD5.new(data).digest()
def rsa_publickey(mod, exp):
return rsa_construct((mod, exp))
def pkcs1v15_encrypt(key, message):
return PKCS1_v1_5.new(key).encrypt(message)
|
mit
| -1,118,242,270,544,146,800
| 30.660377
| 93
| 0.703516
| false
| 2.851317
| false
| false
| false
|
ddu7/PyLC
|
063Unique Paths II.py
|
1
|
1314
|
# -*- coding: utf-8 -*-
# Follow up for "Unique Paths":
#
# Now consider if some obstacles are added to the grids. How many unique paths would there be?
#
# An obstacle and empty space is marked as 1 and 0 respectively in the grid.
#
# For example,
# There is one obstacle in the middle of a 3x3 grid as illustrated below.
#
# [ 1 1 1 1
# [0,0,0], 1 2 3 4
# [0,1,0], 1 3 * 4
# [0,0,0] 1 4 4 8
# ]
# The total number of unique paths is 2.
#
# Note: m and n will be at most 100.
# 直接对grid输入矩阵进行改造, 0 对应 1, 1 对应 0, 然后使用同样方法更新,返回最后一个值即可
class Solution():
def uniquePaths(self, grid):
m = len(grid)
n = len(grid[0])
for i in range(0, m):
for j in range(0, n):
if grid[i][j] == 0:
grid[i][j] = 1
else:
grid[i][j] = 0
for i in range(1, m):
for j in range(1, n):
if grid[i][j] != 0:
grid[i][j] = grid[i - 1][j] + grid[i][j - 1]
else:
grid[i][j] = 0
return grid[m - 1][n - 1]
print Solution().uniquePaths([
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]
]
)
|
mit
| -4,832,800,005,191,433,000
| 26.711111
| 94
| 0.463082
| false
| 2.645435
| false
| false
| false
|
mbourqui/django-echoices
|
echoices/enums/enums.py
|
1
|
9631
|
import warnings
from enum import Enum, EnumMeta
from types import DynamicClassAttribute
class EChoiceMeta(EnumMeta):
"""
Used to override some methods.
See Also
--------
https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/#restrictions-with-multiple-metaclasses
"""
def __getitem__(cls, value):
try:
# Should always be there (at least in Python 3.5)
return cls._value2member_map_[value]
except AttributeError:
value2member_map_ = {}
for echoice in list(cls):
value2member_map_[echoice.value] = echoice
cls._value2member_map_ = value2member_map_
return cls._value2member_map_[value]
class EChoice(Enum, metaclass=EChoiceMeta):
"""
Custom Enum to ease the usage of choices outside the model.
Works by overriding the default _value_ field. This is done to offer a harmonized interface
when using auto-generated numeric values.
By the way, `value` is now the actual value to be stored in the DB.
Notes
-----
Interface differs slightly from the Enum:
`EChoice.value` returns the actual value to be stored in the DB, while the legacy `Enum.value`
would return the whole tuple used when defining the enumeration item.
Raises
------
AttributeError
in case of duplicated values
See Also
--------
http://stackoverflow.com/a/24105344
"""
def __new__(cls, value, label, *args, **kwargs):
if len(cls) == 0:
cls.__value_type_ = type(value)
# SEE: https://stackoverflow.com/a/35953630/
# SEE: https://docs.djangoproject.com/en/stable/ref/templates/api/#variables-and-lookups
cls.do_not_call_in_templates = True
else:
if type(value) is not cls.__value_type_:
raise TypeError("Incompatible type: {}. All values must be {}.".format(type(value), cls.__value_type_))
if value in [c.value for c in list(cls)]:
raise AttributeError(
"Duplicate value: '{}'. Only unique values are supported in {}.".format(value, EChoice))
obj = object.__new__(cls)
obj._value_ = value # Overrides default _value_
obj._label_ = label
return obj
@DynamicClassAttribute
def label(self):
"""The label of the Enum member."""
return self._label_
@property
def choice(self):
return self.value, self.label
def __call__(self, attr='value'):
"""
Hack to get the "selected" tag. Does actually nothing else than returning the attribute `attr`. If `attr` is
a callable, it will be called.
Gets called in `django.forms.boundfield#BoundField.initial`.
Parameters
----------
attr : str
Certainly not needed as redundant, but since __call__ is implemented anyway let's add a selector for the
field to return.
Returns
-------
`attr`, or `attr()` if `attr` is a callable
"""
attr = self.__getattribute__(attr)
if callable(attr):
return attr()
return attr
def __len__(self):
"""
If `len(value)` is supported, returns that length. Otherwise, returns 1.
This is mainly a hack to pass the validations. Since the validation ensures that the value will fit in the DB
field, it applies (solely?) on textual values. So it does no harm to return a non-null constant for a numeric
`value`.
Returns
-------
int : `len(value)` if supported, else 1.
"""
# FIXME: find a way to set it *only* to EChoice with values supporting len()
try:
return len(self.value)
except TypeError:
return 1
@classmethod
def values(cls):
"""
Returns
-------
tuple
of all the values of this Enum
"""
if not hasattr(cls, '__values_'):
cls.__values_ = tuple([c.value for c in list(cls)])
return cls.__values_
@classmethod
def max_value_length(cls):
"""
Not to be used when using numeric values.
Returns
-------
int
the maximal length required by this Enum to be stored in the database
"""
if not hasattr(cls, '__max_value_length_'):
cls.__max_value_length_ = max([len(c.value) for c in list(cls)])
return cls.__max_value_length_
@classmethod
def choices(cls):
"""
Generate the choices as required by Django models.
Returns
-------
tuple
"""
# "natural" order, aka as given when instantiating
if not hasattr(cls, '__choices_'):
cls.__choices_ = tuple([c.choice for c in list(cls)])
return cls.__choices_
@classmethod
def from_value(cls, value):
"""
Return the EChoice object associated with this value, if any.
Parameters
----------
value
In the type of the `value` field, as set when instantiating this EChoice.
Returns
-------
EChoice
Raises
------
KeyError
if `value` does not exist in any element
"""
warnings.warn("{0}.{1} will be deprecated in a future release. "
"Please use {0}.{2} instead".format(cls.__name__, cls.from_value.__name__, cls.get.__name__),
PendingDeprecationWarning)
return cls[value]
@classmethod
def get(cls, value, default=None):
"""
Return the EChoice object associated with this value, else `default`. If default is not given, it defaults to
None, so that this method never raises a KeyError.
Parameters
----------
value
In the type of the `value` field, as set when instantiating this EChoice.
default
Returned if the value is not found.
Returns
-------
EChoice
"""
try:
return cls[value]
except KeyError:
return default
@classmethod
def __getvaluetype__(cls):
return cls.__value_type_
@classmethod
def coerce(cls, other):
"""
Return the `value` in the type of the value of this EChoice. Typically, `value` is a string. Intended use case
is to convert `other` coming from a HTML form, typically a select choice.
Parameters
----------
other : str
Returns
-------
the `other` value in the type of the value of this EChoice.
"""
return cls.__value_type_(other)
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
try:
return self.value < other
except TypeError:
return self.value < self.coerce(other)
def __le__(self, other):
return self < other or self == other
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.value == other.value
try:
return self.value == self.coerce(other)
except (TypeError, ValueError):
return False
def __ge__(self, other):
return self == other or self > other
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
try:
return self.value > other
except TypeError:
return self.value > self.coerce(other)
def __hash__(self):
# Somewhat required since comparison operators are defined
return super().__hash__()
class EOrderedChoice(EChoice):
"""Provide ordering of the elements"""
@classmethod
def choices(cls, order='natural'):
"""
Generate the choices as required by Django models.
Parameters
----------
order : str
in which the elements should be returned. Possible values are:
* 'sorted', the elements will be sorted by `value`
* 'reverse', the elements will be sorted by `value` as if each comparison were
reversed
* 'natural' (default), the elements are ordered as when instantiated in the enumeration
Returns
-------
iterable of tuple
"""
INC, DEC, NAT = 'sorted', 'reverse', 'natural'
options = [INC, DEC, NAT]
assert order in options, "Sorting order not recognized: {}. Available options are: {}".format(order, options)
if order in [INC, DEC]:
reverse = order == DEC
if reverse:
attr = '__choices_reverse_'
else:
attr = '__choices_sorted_'
if not hasattr(cls, attr):
setattr(cls, attr, tuple([(c.value, c.label) for c in sorted(list(cls), reverse=reverse)]))
return getattr(cls, attr)
else:
return super(EOrderedChoice, cls).choices()
class EAutoChoice(EOrderedChoice):
"""
Auto-generated numeric `value`s. Thus support sorting by `value`.
See Also
--------
https://docs.python.org/3.5/library/enum.html#autonumber
"""
def __new__(cls, label, *args, **kwargs):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
obj._label_ = label
return obj
|
gpl-3.0
| -4,122,943,399,944,168,000
| 29.003115
| 119
| 0.554979
| false
| 4.545068
| false
| false
| false
|
lagopus/lagopus
|
test/integration_test/tools/lib/ofp/ofp_group_mod.py
|
1
|
1137
|
import os
import sys
import copy
import logging
from checker import *
from .ofp import register_ofp_creators
from .ofp import OfpBase
from .ofp_bucket import SCE_BUCKETS
from .ofp_bucket import OfpBucketCreator
# YAML:
# group_mod:
# command: 0
# type: 0
# group_id: 0
# buckets:
# - bucket:
# weight: 0
# watch_port: 0
# watch_group: 0
# actions
# - output:
# port: 0
SCE_GROUP_MOD = "group_mod"
@register_ofp_creators(SCE_GROUP_MOD)
class OfpGroupModCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# GroupMod.
kws = copy.deepcopy(params)
# buckets.
buckets = []
if SCE_BUCKETS in params:
buckets = OfpBucketCreator.create(test_case_obj,
dp, ofproto,
ofp_parser,
params[SCE_BUCKETS])
kws[SCE_BUCKETS] = buckets
# create GroupMod.
msg = ofp_parser.OFPGroupMod(dp, **kws)
return msg
|
apache-2.0
| 6,999,811,155,225,049,000
| 22.204082
| 68
| 0.5365
| false
| 3.679612
| false
| false
| false
|
MatrixGamesHub/mtxPython
|
src/mtxNet/RendererClient.py
|
1
|
4645
|
"""
mtxPython - A framework to create matrix games.
Copyright (C) 2016 Tobias Stampfl <info@matrixgames.rocks>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation in version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import mtx
from .rendererService import RendererService
from .rendererService.ttypes import LevelInfo, Value
from thrift.Thrift import TException
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
class RendererClient(mtx.Renderer):
def __init__(self, host, port):
self._transport = TSocket.TSocket(host, port)
protocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._client = RendererService.Client(protocol)
self._connected = False
self._host = host
self._port = port
def GetHost(self):
return self._host
def GetPort(self):
return self._port
def Connect(self):
try:
self._transport.open()
self._connected = True
except TTransport.TTransportException:
self._connected = False
return self._connected
def Disconnect(self):
self._transport.close()
self._connected = False
def IsConnected(self):
return self._connected
def _CallClientCommand(self, cmd, *args, **kwargs):
if not self._connected:
return False
try:
cmd(*args, **kwargs)
except TException:
logging.error("Connection to renderer client lost...", exc_info=1)
self.Disconnect()
def ProcessActGroup(self, actGrp):
self._CallClientCommand(self._client.Freeze)
try:
for act in actGrp:
if act.id == mtx.Act.CLEAR:
self._CallClientCommand(self._client.Clear)
elif act.id in mtx.Act.LEVEL:
level = act.level
field = level._field
netField = []
for y in range(field._height):
row = []
for x in range(field._width):
cell = []
for obj in reversed(field._cells[y][x]):
cell.append([obj._id, ord(obj._symbol)])
row.append(cell)
netField.append(row)
if act.id == mtx.Act.LOAD_LEVEL:
self._CallClientCommand(self._client.LoadLevel, netField, LevelInfo(level._name, level._groundTexture, level._wallTexture))
else:
self._CallClientCommand(self._client.ResetLevel, netField)
elif act.id == mtx.Act.UPDATE:
if type(act.value) == str:
value = Value(strValue=act.value)
elif type(act.value) == bool:
value = Value(boolValue=act.value)
elif type(act.value) == int:
value = Value(intValue=act.value)
else:
value = Value(doubleValue=act.value)
self._CallClientCommand(self._client.UpdateObject, act.objId, act.key, value)
elif act.id == mtx.Act.SPAWN:
self._CallClientCommand(self._client.Spawn, act.objId, ord(act.symbol), act.x, act.y)
elif act.id == mtx.Act.REMOVE:
self._CallClientCommand(self._client.Remove, act.objId, act.sourceId)
elif act.id == mtx.Act.COLLECT:
self._CallClientCommand(self._client.Collect, act.objId, act.sourceId)
elif act.id == mtx.Act.MOVE:
self._CallClientCommand(self._client.Move, act.objId, act.direction, act.fromX, act.fromY, act.toX, act.toY)
elif act.id == mtx.Act.JUMP:
self._CallClientCommand(self._client.Jump, act.objId, act.direction, act.fromX, act.fromY, act.toX, act.toY)
finally:
self._CallClientCommand(self._client.Thaw)
|
gpl-3.0
| -2,398,894,517,825,313,300
| 38.033613
| 147
| 0.575027
| false
| 4.345182
| false
| false
| false
|
WatanukiRasadar/kylin
|
kylin/_injector.py
|
1
|
1117
|
from functools import wraps
from typing import Callable
from ._scope import Scope
class Injector(Callable):
"""
class decorator to inject dependencies into a callable decorated function
"""
def __init__(self, dependencies: dict, fun: Callable):
self.dependencies = dependencies
self.fun = fun
@property
def scope(self) -> Scope:
return Scope()
def __call__(self, *args, **kwargs):
injections = {}
for dependency_name, service_name in self.dependencies.items():
injections[dependency_name] = kwargs.get(dependency_name) or self.scope[service_name]
kwargs.update(injections)
return self.fun(*args, **kwargs)
class Inject(Callable):
"""
class to recive the callable dependencies
"""
__injector__ = Injector
def __init__(self, **dependencies):
self.dependencies = dependencies
def __call__(self, fun: Callable):
def call(*args, **kwargs):
return self.__injector__(self.dependencies, fun).__call__(*args, **kwargs)
return wraps(fun).__call__(call)
|
mit
| -8,151,589,430,398,917,000
| 26.925
| 97
| 0.622202
| false
| 4.296154
| false
| false
| false
|
tymofij/adofex
|
transifex/projects/templatetags/project_tags.py
|
1
|
1755
|
from django import template
from django.db.models import Sum
from transifex.languages.models import Language
from transifex.resources.models import RLStats, Resource
from transifex.txcommon.utils import StatBarsPositions
register = template.Library()
@register.inclusion_tag('resources/stats_bar_simple.html')
def progress_for_project(project, language_code=None, width=100):
"""Render a progressbar for the specified project."""
stats = RLStats.objects.by_project(project).filter(
language__code=language_code
).values('language__code').annotate(
trans=Sum('translated'),
untrans=Sum('untranslated')
).order_by()
total = Resource.objects.by_project(project).aggregate(
total_entities=Sum('total_entities')
)['total_entities']
if not stats:
# Project has no resources
bar_data = [
('trans', 0),
('untrans', 100)
]
return {
'untrans_percent': 100,
'trans_percent': 0,
'untrans': 0,
'trans': 0,
'pos': StatBarsPositions(bar_data, width),
'width': width
}
stats = stats[0]
translated = stats['trans']
untranslated = stats['untrans']
try:
translated_perc = translated * 100 / total
except ZeroDivisionError:
translated_perc = 100
untranslated_perc = 100 - translated_perc
bar_data = [
('trans', translated_perc),
('untrans', untranslated_perc)
]
return {
'untrans_percent': untranslated_perc,
'trans_percent': translated_perc,
'untrans': untranslated,
'trans': translated,
'pos': StatBarsPositions(bar_data, width),
'width': width
}
|
gpl-3.0
| 2,672,739,128,950,525,400
| 27.306452
| 65
| 0.611966
| false
| 4.016018
| false
| false
| false
|
google/google-ctf
|
third_party/edk2/BaseTools/Source/Python/GenFds/FdfParser.py
|
1
|
193525
|
## @file
# parse FDF file
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2015, Hewlett Packard Enterprise Development, L.P.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from re import compile, DOTALL
from string import hexdigits
from uuid import UUID
from Common.BuildToolError import *
from Common import EdkLogger
from Common.Misc import PathClass, tdict, ProcessDuplicatedInf
from Common.StringUtils import NormPath, ReplaceMacro
from Common import GlobalData
from Common.Expression import *
from Common.DataType import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.LongFilePathOs as os
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.RangeExpression import RangeExpression
from collections import OrderedDict
from .Fd import FD
from .Region import Region
from .Fv import FV
from .AprioriSection import AprioriSection
from .FfsInfStatement import FfsInfStatement
from .FfsFileStatement import FileStatement
from .VerSection import VerSection
from .UiSection import UiSection
from .FvImageSection import FvImageSection
from .DataSection import DataSection
from .DepexSection import DepexSection
from .CompressSection import CompressSection
from .GuidSection import GuidSection
from .Capsule import EFI_CERT_TYPE_PKCS7_GUID, EFI_CERT_TYPE_RSA2048_SHA256_GUID, Capsule
from .CapsuleData import CapsuleFfs, CapsulePayload, CapsuleFv, CapsuleFd, CapsuleAnyFile, CapsuleAfile
from .RuleComplexFile import RuleComplexFile
from .RuleSimpleFile import RuleSimpleFile
from .EfiSection import EfiSection
from .OptionRom import OPTIONROM
from .OptRomInfStatement import OptRomInfStatement, OverrideAttribs
from .OptRomFileStatement import OptRomFileStatement
from .GenFdsGlobalVariable import GenFdsGlobalVariable
T_CHAR_CR = '\r'
T_CHAR_TAB = '\t'
T_CHAR_DOUBLE_QUOTE = '\"'
T_CHAR_SINGLE_QUOTE = '\''
T_CHAR_BRACE_R = '}'
SEPARATORS = {TAB_EQUAL_SPLIT, TAB_VALUE_SPLIT, TAB_COMMA_SPLIT, '{', T_CHAR_BRACE_R}
ALIGNMENTS = {"Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K", "64K", "128K",
"256K", "512K", "1M", "2M", "4M", "8M", "16M"}
ALIGNMENT_NOAUTO = ALIGNMENTS - {"Auto"}
CR_LB_SET = {T_CHAR_CR, TAB_LINE_BREAK}
RegionSizePattern = compile("\s*(?P<base>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<size>(?:0x|0X)?[a-fA-F0-9]+)\s*")
RegionSizeGuidPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*\|\s*(?P<size>\w+\.\w+[\.\w\[\]]*)\s*")
RegionOffsetPcdPattern = compile("\s*(?P<base>\w+\.\w+[\.\w\[\]]*)\s*$")
ShortcutPcdPattern = compile("\s*\w+\s*=\s*(?P<value>(?:0x|0X)?[a-fA-F0-9]+)\s*\|\s*(?P<name>\w+\.\w+)\s*")
BaseAddrValuePattern = compile('^0[xX][0-9a-fA-F]+')
FileExtensionPattern = compile(r'([a-zA-Z][a-zA-Z0-9]*)')
TokenFindPattern = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
AllIncludeFileList = []
# Get the closest parent
def GetParentAtLine (Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile
return None
# Check include loop
def IsValidInclude (File, Line):
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line) and Profile.FileName == File:
return False
return True
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in AllIncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber and Profile.Level == 1:
InsertedLines += Profile.GetTotalLines()
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FdfParser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.OriginalLineNumber = Line
self.Message = Str
self.ToolName = 'FdfParser'
def __str__(self):
return self.Message
# helper functions to facilitate consistency in warnings
# each function is for a different common warning
@staticmethod
def Expected(Str, File, Line):
return Warning("expected {}".format(Str), File, Line)
@staticmethod
def ExpectedEquals(File, Line):
return Warning.Expected("'='", File, Line)
@staticmethod
def ExpectedCurlyOpen(File, Line):
return Warning.Expected("'{'", File, Line)
@staticmethod
def ExpectedCurlyClose(File, Line):
return Warning.Expected("'}'", File, Line)
@staticmethod
def ExpectedBracketClose(File, Line):
return Warning.Expected("']'", File, Line)
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
for index, line in enumerate(self.FileLinesList):
if not line.endswith(TAB_LINE_BREAK):
self.FileLinesList[index] += TAB_LINE_BREAK
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
self.IncludeFileList = []
self.Level = 1 # first level include file
def GetTotalLines(self):
TotalLines = self.InsertAdjust + len(self.FileLinesList)
for Profile in self.IncludeFileList:
TotalLines += Profile.GetTotalLines()
return TotalLines
def IsLineInFile(self, Line):
if Line >= self.InsertStartLineNumber and Line < self.InsertStartLineNumber + self.GetTotalLines():
return True
return False
def GetLineInFile(self, Line):
if not self.IsLineInFile (Line):
return (self.FileName, -1)
InsertedLines = self.InsertStartLineNumber
for Profile in self.IncludeFileList:
if Profile.IsLineInFile(Line):
return Profile.GetLineInFile(Line)
elif Line >= Profile.InsertStartLineNumber:
InsertedLines += Profile.GetTotalLines()
return (self.FileName, Line - InsertedLines + 1)
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
with open(FileName, "r") as fsock:
self.FileLinesList = fsock.readlines()
except:
EdkLogger.error("FdfParser", FILE_OPEN_FAILURE, ExtraData=FileName)
self.FileName = FileName
self.PcdDict = OrderedDict()
self.PcdLocalDict = OrderedDict()
self.InfList = []
self.InfDict = {'ArchTBD':[]}
# ECC will use this Dict and List information
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FdNameNotSet = False
self.FvDict = {}
self.CapsuleDict = {}
self.RuleDict = {}
self.OptRomDict = {}
self.FmpPayloadDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser:
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self._Token = ""
self._SkippedChars = ""
GlobalData.gFdfParser = self
# Used to section info
self._CurSection = []
# Key: [section name, UI name, arch]
# Value: {MACRO_NAME: MACRO_VALUE}
self._MacroDict = tdict(True, 3)
self._PcdDict = OrderedDict()
self._WipeOffArea = []
if GenFdsGlobalVariable.WorkSpaceDir == '':
GenFdsGlobalVariable.WorkSpaceDir = os.getenv("WORKSPACE")
## _SkipWhiteSpace() method
#
# Skip white spaces from current char.
#
# @param self The object pointer
#
def _SkipWhiteSpace(self):
while not self._EndOfFile():
if self._CurrentChar() in {TAB_PRINTCHAR_NUL, T_CHAR_CR, TAB_LINE_BREAK, TAB_SPACE_SPLIT, T_CHAR_TAB}:
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
else:
return
return
## _EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def _EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
if self.CurrentLineNumber > NumberOfLines:
return True
return False
## _EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def _EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
# @param DestLine Optional new destination line number.
# @param DestOffset Optional new destination offset.
#
def Rewind(self, DestLine = 1, DestOffset = 0):
self.CurrentLineNumber = DestLine
self.CurrentOffsetWithinLine = DestOffset
## _UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def _UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self._CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## _GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def _GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## _CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def _CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## _NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def _NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## _SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def _SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## _CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def _CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def _StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
if not self.Profile.FileLinesList:
EdkLogger.error('FdfParser', FILE_READ_FAILURE, 'The file is empty!', File=self.FileName)
self.Profile.FileLinesList[-1].append(' ')
def _ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in CR_LB_SET:
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def _SetMacroValue(self, Macro, Value):
if not self._CurSection:
return
MacroDict = {}
if not self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]:
self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]] = MacroDict
else:
MacroDict = self._MacroDict[self._CurSection[0], self._CurSection[1], self._CurSection[2]]
MacroDict[Macro] = Value
def _GetMacroValue(self, Macro):
# Highest priority
if Macro in GlobalData.gCommandLineDefines:
return GlobalData.gCommandLineDefines[Macro]
if Macro in GlobalData.gGlobalDefines:
return GlobalData.gGlobalDefines[Macro]
if self._CurSection:
MacroDict = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if MacroDict and Macro in MacroDict:
return MacroDict[Macro]
# Lowest priority
if Macro in GlobalData.gPlatformDefines:
return GlobalData.gPlatformDefines[Macro]
return None
def _SectionHeaderParser(self, Section):
# [Defines]
# [FD.UiName]: use dummy instead if UI name is optional
# [FV.UiName]
# [Capsule.UiName]
# [Rule]: don't take rule section into account, macro is not allowed in this section
# [OptionRom.DriverName]
self._CurSection = []
Section = Section.strip()[1:-1].upper().replace(' ', '').strip(TAB_SPLIT)
ItemList = Section.split(TAB_SPLIT)
Item = ItemList[0]
if Item == '' or Item == 'RULE':
return
if Item == TAB_COMMON_DEFINES.upper():
self._CurSection = [TAB_COMMON, TAB_COMMON, TAB_COMMON]
elif len(ItemList) > 1:
self._CurSection = [ItemList[0], ItemList[1], TAB_COMMON]
elif len(ItemList) > 0:
self._CurSection = [ItemList[0], 'DUMMY', TAB_COMMON]
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self._EndOfFile():
if self._CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self._CurrentChar() == TAB_LINE_BREAK:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self._CurrentChar() == TAB_STAR and self._NextChar() == TAB_BACK_SLASH:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
# check for // comment
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_BACK_SLASH and not self._EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self._CurrentChar() == TAB_COMMENT_SPLIT and not self._EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self._CurrentChar() == TAB_BACK_SLASH and self._NextChar() == TAB_STAR:
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
self._SetCurrentCharValue(TAB_SPACE_SPLIT)
self._GetOneChar()
InComment = True
else:
self._GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
# nested include support
Processed = False
MacroDict = {}
while self._GetNextToken():
if self._Token == TAB_DEFINE:
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
MacroDict[Macro] = Value
elif self._Token == TAB_INCLUDE:
Processed = True
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len(TAB_INCLUDE)
if not self._GetNextToken():
raise Warning.Expected("include file name", self.FileName, self.CurrentLineNumber)
IncFileName = self._Token
PreIndex = 0
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1:
Macro = IncFileName[StartPos+2: EndPos]
MacroVal = self._GetMacroValue(Macro)
if not MacroVal:
if Macro in MacroDict:
MacroVal = MacroDict[Macro]
if MacroVal is not None:
IncFileName = IncFileName.replace('$(' + Macro + ')', MacroVal, 1)
if MacroVal.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroVal)
else:
raise Warning("The Macro %s is not defined" %Macro, self.FileName, self.CurrentLineNumber)
StartPos = IncFileName.find('$(', PreIndex)
EndPos = IncFileName.find(')', StartPos+2)
IncludedFile = NormPath(IncFileName)
#
# First search the include file under the same directory as FDF file
#
IncludedFile1 = PathClass(IncludedFile, os.path.dirname(self.FileName))
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Then search the include file under the same directory as DSC file
#
PlatformDir = ''
if GenFdsGlobalVariable.ActivePlatform:
PlatformDir = GenFdsGlobalVariable.ActivePlatform.Dir
elif GlobalData.gActivePlatform:
PlatformDir = GlobalData.gActivePlatform.MetaFile.Dir
IncludedFile1 = PathClass(IncludedFile, PlatformDir)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
#
# Also search file under the WORKSPACE directory
#
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode = IncludedFile1.Validate()[0]
if ErrorCode != 0:
raise Warning("The include file does not exist under below directories: \n%s\n%s\n%s\n"%(os.path.dirname(self.FileName), PlatformDir, GlobalData.gWorkspace),
self.FileName, self.CurrentLineNumber)
if not IsValidInclude (IncludedFile1.Path, self.CurrentLineNumber):
raise Warning("The include file {0} is causing a include loop.\n".format (IncludedFile1.Path), self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(IncludedFile1.Path)
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
ParentProfile = GetParentAtLine (CurrentLine)
if ParentProfile is not None:
ParentProfile.IncludeFileList.insert(0, IncFileProfile)
IncFileProfile.Level = ParentProfile.Level + 1
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self._GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self._CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
# reversely sorted to better determine error in file
AllIncludeFileList.insert(0, IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, TAB_COMMENT_SPLIT)
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
if Processed: # Nested and back-to-back support
self.Rewind(DestLine = IncFileProfile.InsertStartLineNumber - 1)
Processed = False
# Preprocess done.
self.Rewind()
@staticmethod
def _GetIfListCurrentItemStat(IfList):
if len(IfList) == 0:
return True
for Item in IfList:
if Item[1] == False:
return False
return True
## PreprocessConditionalStatement() method
#
# Preprocess conditional statement.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
RegionLayoutLine = 0
ReplacedLine = -1
while self._GetNextToken():
# Determine section name and the location dependent macro
if self._GetIfListCurrentItemStat(IfList):
if self._Token.startswith(TAB_SECTION_START):
Header = self._Token
if not self._Token.endswith(TAB_SECTION_END):
self._SkipToToken(TAB_SECTION_END)
Header += self._SkippedChars
if Header.find('$(') != -1:
raise Warning("macro cannot be used in section header", self.FileName, self.CurrentLineNumber)
self._SectionHeaderParser(Header)
continue
# Replace macros except in RULE section or out of section
elif self._CurSection and ReplacedLine != self.CurrentLineNumber:
ReplacedLine = self.CurrentLineNumber
self._UndoToken()
CurLine = self.Profile.FileLinesList[ReplacedLine - 1]
PreIndex = 0
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
while StartPos != -1 and EndPos != -1 and self._Token not in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF, TAB_ELSE_IF}:
MacroName = CurLine[StartPos+2: EndPos]
MacroValue = self._GetMacroValue(MacroName)
if MacroValue is not None:
CurLine = CurLine.replace('$(' + MacroName + ')', MacroValue, 1)
if MacroValue.find('$(') != -1:
PreIndex = StartPos
else:
PreIndex = StartPos + len(MacroValue)
else:
PreIndex = EndPos + 1
StartPos = CurLine.find('$(', PreIndex)
EndPos = CurLine.find(')', StartPos+2)
self.Profile.FileLinesList[ReplacedLine - 1] = CurLine
continue
if self._Token == TAB_DEFINE:
if self._GetIfListCurrentItemStat(IfList):
if not self._CurSection:
raise Warning("macro cannot be defined in Rule section or out of section", self.FileName, self.CurrentLineNumber)
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len(TAB_DEFINE)
if not self._GetNextToken():
raise Warning.Expected("Macro name", self.FileName, self.CurrentLineNumber)
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
self._SetMacroValue(Macro, Value)
self._WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == 'SET':
if not self._GetIfListCurrentItemStat(IfList):
continue
SetLine = self.CurrentLineNumber - 1
SetOffset = self.CurrentOffsetWithinLine - len('SET')
PcdPair = self._GetNextPcdSettings()
PcdName = "%s.%s" % (PcdPair[1], PcdPair[0])
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
self._PcdDict[PcdName] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
self._WipeOffArea.append(((SetLine, SetOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_IF_DEF, TAB_IF_N_DEF, TAB_IF}:
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
IfList.append([IfStartPos, None, None])
CondLabel = self._Token
Expression = self._GetExpression()
if CondLabel == TAB_IF:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
else:
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'in')
if CondLabel == TAB_IF_N_DEF:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token in {TAB_ELSE_IF, TAB_ELSE}:
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self._Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self._WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self._Token == TAB_ELSE_IF:
Expression = self._GetExpression()
ConditionSatisfied = self._EvaluateConditional(Expression, IfList[-1][0][0] + 1, 'eval')
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self._Token == '!endif':
if len(IfList) <= 0:
raise Warning("Missing !if statement", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
self._WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self._WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
elif not IfList: # Don't use PCDs inside conditional directive
if self.CurrentLineNumber <= RegionLayoutLine:
# Don't try the same line twice
continue
SetPcd = ShortcutPcdPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if SetPcd:
self._PcdDict[SetPcd.group('name')] = SetPcd.group('value')
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSize = RegionSizePattern.match(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if not RegionSize:
RegionLayoutLine = self.CurrentLineNumber
continue
RegionSizeGuid = RegionSizeGuidPattern.match(self.Profile.FileLinesList[self.CurrentLineNumber])
if not RegionSizeGuid:
RegionLayoutLine = self.CurrentLineNumber + 1
continue
self._PcdDict[RegionSizeGuid.group('base')] = RegionSize.group('base')
self._PcdDict[RegionSizeGuid.group('size')] = RegionSize.group('size')
RegionLayoutLine = self.CurrentLineNumber + 1
if IfList:
raise Warning("Missing !endif", self.FileName, self.CurrentLineNumber)
self.Rewind()
def _CollectMacroPcd(self):
MacroDict = {}
# PCD macro
MacroDict.update(GlobalData.gPlatformPcds)
MacroDict.update(self._PcdDict)
# Lowest priority
MacroDict.update(GlobalData.gPlatformDefines)
if self._CurSection:
# Defines macro
ScopeMacro = self._MacroDict[TAB_COMMON, TAB_COMMON, TAB_COMMON]
if ScopeMacro:
MacroDict.update(ScopeMacro)
# Section macro
ScopeMacro = self._MacroDict[
self._CurSection[0],
self._CurSection[1],
self._CurSection[2]
]
if ScopeMacro:
MacroDict.update(ScopeMacro)
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
for Item in GlobalData.BuildOptionPcd:
if isinstance(Item, tuple):
continue
PcdName, TmpValue = Item.split(TAB_EQUAL_SPLIT)
TmpValue = BuildOptionValue(TmpValue, {})
MacroDict[PcdName.strip()] = TmpValue
# Highest priority
return MacroDict
def _EvaluateConditional(self, Expression, Line, Op = None, Value = None):
MacroPcdDict = self._CollectMacroPcd()
if Op == 'eval':
try:
if Value:
return ValueExpression(Expression, MacroPcdDict)(True)
else:
return ValueExpression(Expression, MacroPcdDict)()
except WrnExpression as Excpt:
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self.FileName, ExtraData=self._CurrentLine(),
Line=Line)
return Excpt.result
except Exception as Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
Info = GlobalData.gPlatformOtherPcds[Excpt.Pcd]
raise Warning("Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file (%s), and it is currently defined in this section:"
" %s, line #: %d." % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE'], Info[0], Info[1]),
self.FileName, Line)
else:
raise Warning("PCD (%s) is not defined in DSC file (%s)" % (Excpt.Pcd, GlobalData.gPlatformOtherPcds['DSCFILE']),
self.FileName, Line)
else:
raise Warning(str(Excpt), self.FileName, Line)
else:
if Expression.startswith('$(') and Expression[-1] == ')':
Expression = Expression[2:-1]
return Expression in MacroPcdDict
## _IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsToken(self, String, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def _IsKeyword(self, KeyWord, IgnoreCase = False):
self._SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(KeyWord.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(KeyWord)
if index == 0:
followingChar = self._CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPARATORS:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetExpression(self):
Line = self.Profile.FileLinesList[self.CurrentLineNumber - 1]
Index = len(Line) - 1
while Line[Index] in CR_LB_SET:
Index -= 1
ExpressionString = self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:Index+1]
self.CurrentOffsetWithinLine += len(ExpressionString)
ExpressionString = ExpressionString.strip()
return ExpressionString
## _GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def _GetNextWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
def _GetNextPcdWord(self):
self._SkipWhiteSpace()
if self._EndOfFile():
return False
TempChar = self._CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
while not self._EndOfLine():
TempChar = self._CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-' or TempChar == TAB_SECTION_START or TempChar == TAB_SECTION_END:
self._GetOneChar()
else:
break
self._Token = self._CurrentLine()[StartPos: self.CurrentOffsetWithinLine]
return True
return False
## _GetNextToken() method
#
# Get next token unit before a separator
# If found, the string value is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def _GetNextToken(self):
# Skip leading spaces, if exist.
self._SkipWhiteSpace()
if self._EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while StartLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPARATORS:
self._GetOneChar()
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
self._GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self._Token = self.Profile.FileLinesList[StartLine-1][StartPos: EndPos]
if self._Token.lower() in {TAB_IF, TAB_END_IF, TAB_ELSE_IF, TAB_ELSE, TAB_IF_DEF, TAB_IF_N_DEF, TAB_ERROR, TAB_INCLUDE}:
self._Token = self._Token.lower()
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
## _GetNextGuid() method
#
# Get next token unit before a separator
# If found, the GUID string is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def _GetNextGuid(self):
if not self._GetNextToken():
return False
if GlobalData.gGuidPattern.match(self._Token) is not None:
return True
else:
self._UndoToken()
return False
@staticmethod
def _Verify(Name, Value, Scope):
# value verification only applies to numeric values.
if Scope not in TAB_PCD_NUMERIC_TYPES:
return
ValueNumber = 0
try:
ValueNumber = int(Value, 0)
except:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value is not valid dec or hex number for %s." % Name)
if ValueNumber < 0:
EdkLogger.error("FdfParser", FORMAT_INVALID, "The value can't be set to negative value for %s." % Name)
if ValueNumber > MAX_VAL_TYPE[Scope]:
EdkLogger.error("FdfParser", FORMAT_INVALID, "Too large value for %s." % Name)
return True
## _UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def _UndoToken(self):
self._UndoOneChar()
while self._CurrentChar().isspace():
if not self._UndoOneChar():
self._GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self._CurrentChar()
# Try to find the end char that is not a space and not in separator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPARATORS:
if not self._UndoOneChar():
return
# if we happen to meet a separator as the first char, we must proceed to get it.
# That is, we get a token that is a separator char. normally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPARATORS:
return
else:
break
self._GetOneChar()
## _GetNextHexNumber() method
#
# Get next HEX data before a separator
# If found, the HEX data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def _GetNextHexNumber(self):
if not self._GetNextToken():
return False
if GlobalData.gHexPatternAll.match(self._Token):
return True
else:
self._UndoToken()
return False
## _GetNextDecimalNumber() method
#
# Get next decimal data before a separator
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def _GetNextDecimalNumber(self):
if not self._GetNextToken():
return False
if self._Token.isdigit():
return True
else:
self._UndoToken()
return False
def _GetNextPcdSettings(self):
if not self._GetNextWord():
raise Warning.Expected("<PcdTokenSpaceCName>", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self._Token
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected(".", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("<PcdCName>", self.FileName, self.CurrentLineNumber)
pcdCName = self._Token
Fields = []
while self._IsToken(TAB_SPLIT):
if not self._GetNextPcdWord():
raise Warning.Expected("Pcd Fields", self.FileName, self.CurrentLineNumber)
Fields.append(self._Token)
return (pcdCName, pcdTokenSpaceCName,TAB_SPLIT.join(Fields))
## _GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self._Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def _GetStringData(self):
QuoteToUse = None
if self._Token.startswith(T_CHAR_DOUBLE_QUOTE) or self._Token.startswith("L\""):
QuoteToUse = T_CHAR_DOUBLE_QUOTE
elif self._Token.startswith(T_CHAR_SINGLE_QUOTE) or self._Token.startswith("L\'"):
QuoteToUse = T_CHAR_SINGLE_QUOTE
else:
return False
self._UndoToken()
self._SkipToToken(QuoteToUse)
currentLineNumber = self.CurrentLineNumber
if not self._SkipToToken(QuoteToUse):
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning(QuoteToUse, self.FileName, self.CurrentLineNumber)
self._Token = self._SkippedChars.rstrip(QuoteToUse)
return True
## _SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self._SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def _SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self._SkippedChars = ""
while not self._EndOfFile():
index = -1
if IgnoreCase:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].upper().find(String.upper())
else:
index = self._CurrentLine()[self.CurrentOffsetWithinLine: ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self._SkippedChars += String
return True
self._SkippedChars += str(self._CurrentChar())
self._GetOneChar()
self.SetFileBufferPos(StartPos)
self._SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## Preprocess() method
#
# Preprocess comment, conditional directive, include directive, replace macro.
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def Preprocess(self):
self._StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self._StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self._StringToList()
for Pos in self._WipeOffArea:
self._ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self._GetDefines():
pass
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.Preprocess()
self._GetError()
#
# Keep processing sections of the FDF until no new sections or a syntax error is found
#
while self._GetFd() or self._GetFv() or self._GetFmp() or self._GetCapsule() or self._GetRule() or self._GetOptionRom():
pass
except Warning as X:
self._UndoToken()
#'\n\tGot Token: \"%s\" from File %s\n' % (self._Token, FileLineTuple[0]) + \
# At this point, the closest parent would be the included file itself
Profile = GetParentAtLine(X.OriginalLineNumber)
if Profile is not None:
X.Message += ' near line %d, column %d: %s' \
% (X.LineNumber, 0, Profile.FileLinesList[X.LineNumber-1])
else:
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
X.Message += ' near line %d, column %d: %s' \
% (FileLineTuple[1], self.CurrentOffsetWithinLine + 1, self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:].rstrip(TAB_LINE_BREAK).rstrip(T_CHAR_CR))
raise
## SectionParser() method
#
# Parse the file section info
# Exception will be raised if syntax error found
#
# @param self The object pointer
# @param section The section string
def SectionParser(self, section):
S = section.upper()
if not S.startswith("[DEFINES") and not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM.") and not S.startswith('[FMPPAYLOAD.'):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [Rule.], [OptionRom.], [FMPPAYLOAD.])", self.FileName, self.CurrentLineNumber)
## _GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def _GetDefines(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[DEFINES"):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[DEFINES", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
while self._GetNextWord():
# handle the SET statement
if self._Token == 'SET':
self._UndoToken()
self._GetSetStatement(None)
continue
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.startswith(TAB_SECTION_START):
raise Warning.Expected("MACRO value", self.FileName, self.CurrentLineNumber)
Value = self._Token
return False
##_GetError() method
def _GetError(self):
#save the Current information
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
while self._GetNextToken():
if self._Token == TAB_ERROR:
EdkLogger.error('FdfParser', ERROR_STATEMENT, self._CurrentLine().replace(TAB_ERROR, '', 1), File=self.FileName, Line=self.CurrentLineNumber)
self.CurrentLineNumber = CurrentLine
self.CurrentOffsetWithinLine = CurrentOffset
## _GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def _GetFd(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith('[FMPPAYLOAD.') and not S.startswith("[CAPSULE.") \
and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section", self.FileName, self.CurrentLineNumber)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[FD.]", self.FileName, self.CurrentLineNumber)
FdName = self._GetUiName()
if FdName == "":
if len (self.Profile.FdDict) == 0:
FdName = GenFdsGlobalVariable.PlatformName
if FdName == "" and GlobalData.gActivePlatform:
FdName = GlobalData.gActivePlatform.PlatformName
self.Profile.FdNameNotSet = True
else:
raise Warning.Expected("FdName in [FD.] section", self.FileName, self.CurrentLineNumber)
self.CurrentFdName = FdName.upper()
if self.CurrentFdName in self.Profile.FdDict:
raise Warning("Unexpected the same FD name", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FdObj = FD()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
if len (self.Profile.FdDict) > 1 and self.Profile.FdNameNotSet:
raise Warning.Expected("all FDs have their name", self.FileName, self.CurrentLineNumber)
Status = self._GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error", self.FileName, self.CurrentLineNumber)
while self._GetTokenStatements(FdObj):
pass
for Attr in ("BaseAddress", "Size", "ErasePolarity"):
if getattr(FdObj, Attr) is None:
self._GetNextToken()
raise Warning("Keyword %s missing" % Attr, self.FileName, self.CurrentLineNumber)
if not FdObj.BlockSizeList:
FdObj.BlockSizeList.append((1, FdObj.Size, None))
self._GetDefineStatements(FdObj)
self._GetSetStatements(FdObj)
if not self._GetRegionLayout(FdObj):
raise Warning.Expected("region layout", self.FileName, self.CurrentLineNumber)
while self._GetRegionLayout(FdObj):
pass
return True
## _GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def _GetUiName(self):
Name = ""
if self._GetNextWord():
Name = self._Token
return Name
## _GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def _GetCreateFile(self, Obj):
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
FileName = self._Token
Obj.CreateFileName = FileName
return True
def SetPcdLocalation(self,pcdpair):
self.Profile.PcdLocalDict[pcdpair] = (self.Profile.FileName,self.CurrentLineNumber)
## _GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
#
def _GetTokenStatements(self, Obj):
if self._IsKeyword("BaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex base address", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.BaseAddress
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
return True
if self._IsKeyword("Size"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex size", self.FileName, self.CurrentLineNumber)
Size = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
pcdPair = self._GetNextPcdSettings()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Size
self.SetPcdLocalation(pcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
Obj.Size = int(Size, 0)
return True
if self._IsKeyword("ErasePolarity"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Erase Polarity", self.FileName, self.CurrentLineNumber)
if not self._Token in {"1", "0"}:
raise Warning.Expected("1 or 0 Erase Polarity", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self._Token
return True
return self._GetBlockStatements(Obj)
## _GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetAddressStatements(self, Obj):
if self._IsKeyword("BsBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
BsAddress = int(self._Token, 0)
Obj.BsBaseAddress = BsAddress
if self._IsKeyword("RtBaseAddress"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("address", self.FileName, self.CurrentLineNumber)
RtAddress = int(self._Token, 0)
Obj.RtBaseAddress = RtAddress
## _GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
#
def _GetBlockStatements(self, Obj):
IsBlock = False
while self._GetBlockStatement(Obj):
IsBlock = True
Item = Obj.BlockSizeList[-1]
if Item[0] is None or Item[1] is None:
raise Warning.Expected("block statement", self.FileName, self.CurrentLineNumber)
return IsBlock
## _GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetBlockStatement(self, Obj):
if not self._IsKeyword("BlockSize"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex or Integer block size", self.FileName, self.CurrentLineNumber)
BlockSize = self._Token
BlockSizePcd = None
if self._IsToken(TAB_VALUE_SPLIT):
PcdPair = self._GetNextPcdSettings()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
BlockSize = int(BlockSize, 0)
BlockNumber = None
if self._IsKeyword("NumBlocks"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextDecimalNumber() and not self._GetNextHexNumber():
raise Warning.Expected("block numbers", self.FileName, self.CurrentLineNumber)
BlockNumber = int(self._Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## _GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatements(self, Obj):
while self._GetDefineStatement(Obj):
pass
## _GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetDefineStatement(self, Obj):
if self._IsKeyword(TAB_DEFINE):
self._GetNextToken()
Macro = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
Value = self._Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## _GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatements(self, Obj):
while self._GetSetStatement(Obj):
pass
## _GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetSetStatement(self, Obj):
if self._IsKeyword("SET"):
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
Value = self._GetExpression()
Value = self._EvaluateConditional(Value, self.CurrentLineNumber, 'eval', True)
if Obj:
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
self.SetPcdLocalation(PcdPair)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## _CalcRegionExpr(self)
#
# Calculate expression for offset or size of a region
#
# @return: None if invalid expression
# Calculated number if successfully
#
def _CalcRegionExpr(self):
StartPos = self.GetFileBufferPos()
Expr = ''
PairCount = 0
while not self._EndOfFile():
CurCh = self._CurrentChar()
if CurCh == '(':
PairCount += 1
elif CurCh == ')':
PairCount -= 1
if CurCh in '|\r\n' and PairCount == 0:
break
Expr += CurCh
self._GetOneChar()
try:
return int(
ValueExpression(Expr,
self._CollectMacroPcd()
)(True), 0)
except Exception:
self.SetFileBufferPos(StartPos)
return None
## _GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param theFd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def _GetRegionLayout(self, theFd):
Offset = self._CalcRegionExpr()
if Offset is None:
return False
RegionObj = Region()
RegionObj.Offset = Offset
theFd.RegionList.append(RegionObj)
if not self._IsToken(TAB_VALUE_SPLIT):
raise Warning.Expected("'|'", self.FileName, self.CurrentLineNumber)
Size = self._CalcRegionExpr()
if Size is None:
raise Warning.Expected("Region Size", self.FileName, self.CurrentLineNumber)
RegionObj.Size = Size
if not self._GetNextWord():
return True
if not self._Token in {"SET", BINARY_FILE_TYPE_FV, "FILE", "DATA", "CAPSULE", "INF"}:
#
# If next token is a word which is not a valid FV type, it might be part of [PcdOffset[|PcdSize]]
# Or it might be next region's offset described by an expression which starts with a PCD.
# PcdOffset[|PcdSize] or OffsetPcdExpression|Size
#
self._UndoToken()
IsRegionPcd = (RegionSizeGuidPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]) or
RegionOffsetPcdPattern.match(self._CurrentLine()[self.CurrentOffsetWithinLine:]))
if IsRegionPcd:
RegionObj.PcdOffset = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdOffset] = "0x%08X" % (RegionObj.Offset + int(theFd.BaseAddress, 0))
self.SetPcdLocalation(RegionObj.PcdOffset)
self._PcdDict['%s.%s' % (RegionObj.PcdOffset[1], RegionObj.PcdOffset[0])] = "0x%x" % RegionObj.Offset
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self._IsToken(TAB_VALUE_SPLIT):
RegionObj.PcdSize = self._GetNextPcdSettings()
self.Profile.PcdDict[RegionObj.PcdSize] = "0x%08X" % RegionObj.Size
self.SetPcdLocalation(RegionObj.PcdSize)
self._PcdDict['%s.%s' % (RegionObj.PcdSize[1], RegionObj.PcdSize[0])] = "0x%x" % RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self._GetNextWord():
return True
if self._Token == "SET":
self._UndoToken()
self._GetSetStatements(RegionObj)
if not self._GetNextWord():
return True
elif self._Token == BINARY_FILE_TYPE_FV:
self._UndoToken()
self._GetRegionFvType(RegionObj)
elif self._Token == "CAPSULE":
self._UndoToken()
self._GetRegionCapType(RegionObj)
elif self._Token == "FILE":
self._UndoToken()
self._GetRegionFileType(RegionObj)
elif self._Token == "INF":
self._UndoToken()
RegionObj.RegionType = "INF"
while self._IsKeyword("INF"):
self._UndoToken()
ffsInf = self._ParseInfStatement()
if not ffsInf:
break
RegionObj.RegionDataList.append(ffsInf)
elif self._Token == "DATA":
self._UndoToken()
self._GetRegionDataType(RegionObj)
else:
self._UndoToken()
if self._GetRegionLayout(theFd):
return True
raise Warning("A valid region type was not found. "
"Valid types are [SET, FV, CAPSULE, FILE, DATA, INF]. This error occurred",
self.FileName, self.CurrentLineNumber)
return True
## _GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFvType(self, RegionObj):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = BINARY_FILE_TYPE_FV
RegionObj.RegionDataList.append((self._Token).upper())
while self._IsKeyword(BINARY_FILE_TYPE_FV):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append((self._Token).upper())
## _GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionCapType(self, RegionObj):
if not self._IsKeyword("CAPSULE"):
raise Warning.Expected("'CAPSULE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("CAPSULE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("CAPSULE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionFileType(self, RegionObj):
if not self._IsKeyword("FILE"):
raise Warning.Expected("'FILE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append(self._Token)
while self._IsKeyword("FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FILE name", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self._Token)
## _GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def _GetRegionDataType(self, RegionObj):
if not self._IsKeyword("DATA"):
raise Warning.Expected("Region Data type", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append(DataString)
while self._IsKeyword("DATA"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
# convert hex string value to byte hex string array
AllString = self._Token
AllStrLen = len (AllString)
DataString = ""
while AllStrLen > 4:
DataString = DataString + "0x" + AllString[AllStrLen - 2: AllStrLen] + TAB_COMMA_SPLIT
AllStrLen = AllStrLen - 2
DataString = DataString + AllString[:AllStrLen] + TAB_COMMA_SPLIT
# byte value array
if len (self._Token) <= 4:
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
RegionObj.RegionDataList.append(DataString)
## _GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def _GetFv(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FV."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvName = self._GetUiName()
self.CurrentFvName = FvName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
FvObj = FV(Name=self.CurrentFvName)
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self._GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error", self.FileName, self.CurrentLineNumber)
self._GetDefineStatements(FvObj)
self._GetAddressStatements(FvObj)
while True:
self._GetSetStatements(FvObj)
if not (self._GetBlockStatement(FvObj) or self._GetFvBaseAddress(FvObj) or
self._GetFvForceRebase(FvObj) or self._GetFvAlignment(FvObj) or
self._GetFvAttributes(FvObj) or self._GetFvNameGuid(FvObj) or
self._GetFvExtEntryStatement(FvObj) or self._GetFvNameString(FvObj)):
break
if FvObj.FvNameString == 'TRUE' and not FvObj.FvNameGuid:
raise Warning("FvNameString found but FvNameGuid was not found", self.FileName, self.CurrentLineNumber)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
isInf = self._GetInfStatement(FvObj)
isFile = self._GetFileStatement(FvObj)
if not isInf and not isFile:
break
return True
## _GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def _GetFvAlignment(self, Obj):
if not self._IsKeyword("FvAlignment"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"}:
raise Warning("Unknown alignment value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self._Token
return True
## _GetFvBaseAddress() method
#
# Get BaseAddress for FV
#
# @param self The object pointer
# @param Obj for whom FvBaseAddress is got
# @retval True Successfully find a FvBaseAddress statement
# @retval False Not able to find a FvBaseAddress statement
#
def _GetFvBaseAddress(self, Obj):
if not self._IsKeyword("FvBaseAddress"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV base address value", self.FileName, self.CurrentLineNumber)
if not BaseAddrValuePattern.match(self._Token.upper()):
raise Warning("Unknown FV base address value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
Obj.FvBaseAddress = self._Token
return True
## _GetFvForceRebase() method
#
# Get FvForceRebase for FV
#
# @param self The object pointer
# @param Obj for whom FvForceRebase is got
# @retval True Successfully find a FvForceRebase statement
# @retval False Not able to find a FvForceRebase statement
#
def _GetFvForceRebase(self, Obj):
if not self._IsKeyword("FvForceRebase"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FvForceRebase value", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {"TRUE", "FALSE", "0", "0X0", "0X00", "1", "0X1", "0X01"}:
raise Warning("Unknown FvForceRebase value '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token.upper() in {"TRUE", "1", "0X1", "0X01"}:
Obj.FvForceRebase = True
elif self._Token.upper() in {"FALSE", "0", "0X0", "0X00"}:
Obj.FvForceRebase = False
else:
Obj.FvForceRebase = None
return True
## _GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def _GetFvAttributes(self, FvObj):
IsWordToken = False
while self._GetNextWord():
IsWordToken = True
name = self._Token
if name not in {"ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE", "WEAK_ALIGNMENT", "FvUsedSizeEnable"}:
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self._Token
return IsWordToken
## _GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def _GetFvNameGuid(self, FvObj):
if not self._IsKeyword("FvNameGuid"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
FvObj.FvNameGuid = self._Token
return True
def _GetFvNameString(self, FvObj):
if not self._IsKeyword("FvNameString"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken() or self._Token.upper() not in {'TRUE', 'FALSE'}:
raise Warning.Expected("TRUE or FALSE for FvNameString", self.FileName, self.CurrentLineNumber)
FvObj.FvNameString = self._Token
return True
def _GetFvExtEntryStatement(self, FvObj):
if not (self._IsKeyword("FV_EXT_ENTRY") or self._IsKeyword("FV_EXT_ENTRY_TYPE")):
return False
if not self._IsKeyword ("TYPE"):
raise Warning.Expected("'TYPE'", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber() and not self._GetNextDecimalNumber():
raise Warning.Expected("Hex FV extension entry type value At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryTypeValue.append(self._Token)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("FILE") and not self._IsKeyword("DATA"):
raise Warning.Expected("'FILE' or 'DATA'", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryType.append(self._Token)
if self._Token == 'DATA':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex byte", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString = self._Token
DataString += TAB_COMMA_SPLIT
while self._IsToken(TAB_COMMA_SPLIT):
if not self._GetNextHexNumber():
raise Warning("Invalid Hex number", self.FileName, self.CurrentLineNumber)
if len(self._Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long", self.FileName, self.CurrentLineNumber)
DataString += self._Token
DataString += TAB_COMMA_SPLIT
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(TAB_COMMA_SPLIT)
FvObj.FvExtEntryData.append(DataString)
if self._Token == 'FILE':
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV Extension Entry file path At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvExtEntryData.append(self._Token)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return True
## _GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def _GetAprioriSection(self, FvObj):
if not self._IsKeyword("APRIORI"):
return False
if not self._IsKeyword("PEI") and not self._IsKeyword("DXE"):
raise Warning.Expected("Apriori file type", self.FileName, self.CurrentLineNumber)
AprType = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
AprSectionObj = AprioriSection()
AprSectionObj.AprioriType = AprType
self._GetDefineStatements(AprSectionObj)
while True:
IsInf = self._GetInfStatement(AprSectionObj)
IsFile = self._GetFileStatement(AprSectionObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
def _ParseInfStatement(self):
if not self._IsKeyword("INF"):
return None
ffsInf = FfsInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if not ffsInf.InfFileName.endswith('.inf'):
raise Warning.Expected(".inf file path", self.FileName, self.CurrentLineNumber)
ffsInf.CurrentLineNum = self.CurrentLineNumber
ffsInf.CurrentLineContent = self._CurrentLine()
#Replace $(SAPCE) with real space
ffsInf.InfFileName = ffsInf.InfFileName.replace('$(SPACE)', ' ')
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#do case sensitive check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
if self._IsToken(TAB_VALUE_SPLIT):
if self._IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self._IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return ffsInf
## _GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetInfStatement(self, Obj, ForCapsule=False):
ffsInf = self._ParseInfStatement()
if not ffsInf:
return False
if ForCapsule:
myCapsuleFfs = CapsuleFfs()
myCapsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(myCapsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## _GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def _GetInfOptions(self, FfsInfObj):
if self._IsKeyword("FILE_GUID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
raise Warning.Expected("GUID value", self.FileName, self.CurrentLineNumber)
FfsInfObj.OverrideGuid = self._Token
if self._IsKeyword("RuleOverride"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Rule name", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self._Token
if self._IsKeyword("VERSION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Version", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Version = self._Token
if self._IsKeyword(BINARY_FILE_TYPE_UI):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI name", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
FfsInfObj.Ui = self._Token
if self._IsKeyword("USE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self._Token
if self._GetNextToken():
p = compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\))')
if p.match(self._Token) and p.match(self._Token).span()[1] == len(self._Token):
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
return
else:
self._UndoToken()
return
while self._GetNextToken():
if not p.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
## _GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetFileStatement(self, Obj, ForCapsule = False):
if not self._IsKeyword("FILE"):
return False
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
if ForCapsule and self._Token == 'DATA':
self._UndoToken()
self._UndoToken()
return False
FfsFileObj = FileStatement()
FfsFileObj.FvFileType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextGuid():
if not self._GetNextWord():
raise Warning.Expected("File GUID", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
FfsFileObj.NameGuid = self._Token
self._GetFilePart(FfsFileObj)
if ForCapsule:
capsuleFfs = CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## _FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _FileCouldHaveRelocFlag (FileType):
if FileType in {SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_MM_CORE_STANDALONE, 'PEI_DXE_COMBO'}:
return True
else:
return False
## _SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
@staticmethod
def _SectionCouldHaveRelocFlag (SectionType):
if SectionType in {BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32}:
return True
else:
return False
## _GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
#
def _GetFilePart(self, FfsFileObj):
self._GetFileOpts(FfsFileObj)
if not self._IsToken("{"):
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
if self._Token == 'RELOCS_STRIPPED':
FfsFileObj.KeepReloc = False
else:
FfsFileObj.KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name or section data", self.FileName, self.CurrentLineNumber)
if self._Token == BINARY_FILE_TYPE_FV:
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self._Token
elif self._Token == "FD":
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self._Token
elif self._Token in {TAB_DEFINE, "APRIORI", "SECTION"}:
self._UndoToken()
self._GetSectionData(FfsFileObj)
elif hasattr(FfsFileObj, 'FvFileType') and FfsFileObj.FvFileType == 'RAW':
self._UndoToken()
self._GetRAWData(FfsFileObj)
else:
FfsFileObj.CurrentLineNum = self.CurrentLineNumber
FfsFileObj.CurrentLineContent = self._CurrentLine()
FfsFileObj.FileName = self._Token.replace('$(SPACE)', ' ')
self._VerifyFile(FfsFileObj.FileName)
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
## _GetRAWData() method
#
# Get RAW data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetRAWData(self, FfsFileObj):
FfsFileObj.FileName = []
FfsFileObj.SubAlignment = []
while True:
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if not self._GetNextToken():
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
FileName = self._Token.replace('$(SPACE)', ' ')
if FileName == T_CHAR_BRACE_R:
self._UndoToken()
raise Warning.Expected("Filename value", self.FileName, self.CurrentLineNumber)
self._VerifyFile(FileName)
File = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir)
FfsFileObj.FileName.append(File.Path)
FfsFileObj.SubAlignment.append(AlignValue)
if self._IsToken(T_CHAR_BRACE_R):
self._UndoToken()
break
if len(FfsFileObj.SubAlignment) == 1:
FfsFileObj.SubAlignment = FfsFileObj.SubAlignment[0]
if len(FfsFileObj.FileName) == 1:
FfsFileObj.FileName = FfsFileObj.FileName[0]
## _GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def _GetFileOpts(self, FfsFileObj):
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
FfsFileObj.KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
if self._IsKeyword("FIXED", True):
FfsFileObj.Fixed = True
if self._IsKeyword("CHECKSUM", True):
FfsFileObj.CheckSum = True
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
FfsFileObj.Alignment = self._Token
## _GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def _GetAlignment(self):
if self._IsKeyword("Align", True):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("alignment value", self.FileName, self.CurrentLineNumber)
return True
return False
## _GetSectionData() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
#
def _GetSectionData(self, FfsFileObj):
self._GetDefineStatements(FfsFileObj)
while True:
IsLeafSection = self._GetLeafSection(FfsFileObj)
IsEncapSection = self._GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## _GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetLeafSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(Obj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
BuildNum = None
if self._IsKeyword("BUILD_NUM"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number value", self.FileName, self.CurrentLineNumber)
BuildNum = self._Token
if self._IsKeyword("VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("version", self.FileName, self.CurrentLineNumber)
VerSectionObj = VerSection()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self._GetStringData():
VerSectionObj.StringData = self._Token
else:
VerSectionObj.FileName = self._Token
Obj.SectionList.append(VerSectionObj)
elif self._IsKeyword(BINARY_FILE_TYPE_UI):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("UI", self.FileName, self.CurrentLineNumber)
UiSectionObj = UiSection()
UiSectionObj.Alignment = AlignValue
if self._GetStringData():
UiSectionObj.StringData = self._Token
else:
UiSectionObj.FileName = self._Token
Obj.SectionList.append(UiSectionObj)
elif self._IsKeyword("FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name or FV file path", self.FileName, self.CurrentLineNumber)
FvName = self._Token
FvObj = None
if self._IsToken("{"):
FvObj = FV()
FvObj.UiFvName = FvName.upper()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj = FvImageSection()
FvImageSectionObj.Alignment = AlignValue
if FvObj is not None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName.upper()
FvImageSectionObj.FvFileName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self._IsKeyword("PEI_DEPEX_EXP") or self._IsKeyword("DXE_DEPEX_EXP") or self._IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = DepexSection()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(T_CHAR_BRACE_R):
raise Warning.Expected("Depex expression ending '}'", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self._SkippedChars.rstrip(T_CHAR_BRACE_R)
Obj.SectionList.append(DepexSectionObj)
else:
if not self._GetNextWord():
raise Warning.Expected("section type", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self._Token == "COMPRESS" or self._Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self._Token not in {"COMPAT16", BINARY_FILE_TYPE_PE32, BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,\
BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "SUBTYPE_GUID", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown section type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self._Token == BINARY_FILE_TYPE_PE32) and (not self._Token == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = DataSection()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Obj.FvFileType) and self._SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self._Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag%d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_EQUAL_SPLIT):
if not self._GetNextToken():
raise Warning.Expected("section file path", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self._Token
self._VerifyFile(DataSectionObj.SectFileName)
else:
if not self._GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## _VerifyFile
#
# Check if file exists or not:
# If current phase if GenFds, the file must exist;
# If current phase is AutoGen and the file is not in $(OUTPUT_DIRECTORY), the file must exist
# @param FileName: File path to be verified.
#
def _VerifyFile(self, FileName):
if FileName.replace(TAB_WORKSPACE, '').find('$') != -1:
return
if not GlobalData.gAutoGenPhase or not self._GetMacroValue(TAB_DSC_DEFINES_OUTPUT_DIRECTORY) in FileName:
ErrorCode, ErrorInfo = PathClass(NormPath(FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
## _GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetCglSection(self, Obj, AlignValue = None):
if self._IsKeyword("COMPRESS"):
type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(CompressSectionObj)
IsEncapSection = self._GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Recursive sections...
while True:
IsLeafSection = self._GetLeafSection(GuidSectionObj)
IsEncapSection = self._GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## _GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def _GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = "NONE"
AttribDict["AUTH_STATUS_VALID"] = "NONE"
AttribDict["EXTRA_HEADER_SIZE"] = -1
while self._IsKeyword("PROCESSING_REQUIRED") or self._IsKeyword("AUTH_STATUS_VALID") \
or self._IsKeyword("EXTRA_HEADER_SIZE"):
AttribKey = self._Token
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE(1)/FALSE(0)/Number", self.FileName, self.CurrentLineNumber)
elif AttribKey == "EXTRA_HEADER_SIZE":
Base = 10
if self._Token[0:2].upper() == "0X":
Base = 16
try:
AttribDict[AttribKey] = int(self._Token, Base)
continue
except ValueError:
raise Warning.Expected("Number", self.FileName, self.CurrentLineNumber)
elif self._Token.upper() not in {"TRUE", "FALSE", "1", "0"}:
raise Warning.Expected("TRUE/FALSE (1/0)", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self._Token
return AttribDict
## _GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self._IsKeyword("SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning.Expected("SECTION", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
AlignValue = self._Token
if not self._GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
def _GetFmp(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[FMPPAYLOAD."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
self._SkipToToken("[FMPPAYLOAD.", True)
FmpUiName = self._GetUiName().upper()
if FmpUiName in self.Profile.FmpPayloadDict:
raise Warning("Duplicated FMP UI name found: %s" % FmpUiName, self.FileName, self.CurrentLineNumber)
FmpData = CapsulePayload()
FmpData.UiName = FmpUiName
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning("The FMP payload section is empty!", self.FileName, self.CurrentLineNumber)
FmpKeyList = ['IMAGE_HEADER_INIT_VERSION', 'IMAGE_TYPE_ID', 'IMAGE_INDEX', 'HARDWARE_INSTANCE', 'CERTIFICATE_GUID', 'MONOTONIC_COUNT']
while self._Token in FmpKeyList:
Name = self._Token
FmpKeyList.remove(Name)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if Name == 'IMAGE_TYPE_ID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for IMAGE_TYPE_ID.", self.FileName, self.CurrentLineNumber)
FmpData.ImageTypeId = self._Token
elif Name == 'CERTIFICATE_GUID':
if not self._GetNextGuid():
raise Warning.Expected("GUID value for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
FmpData.Certificate_Guid = self._Token
if UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_RSA2048_SHA256_GUID and UUID(FmpData.Certificate_Guid) != EFI_CERT_TYPE_PKCS7_GUID:
raise Warning("Only support EFI_CERT_TYPE_RSA2048_SHA256_GUID or EFI_CERT_TYPE_PKCS7_GUID for CERTIFICATE_GUID.", self.FileName, self.CurrentLineNumber)
else:
if not self._GetNextToken():
raise Warning.Expected("value of %s" % Name, self.FileName, self.CurrentLineNumber)
Value = self._Token
if Name == 'IMAGE_HEADER_INIT_VERSION':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.Version = Value
elif Name == 'IMAGE_INDEX':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.ImageIndex = Value
elif Name == 'HARDWARE_INSTANCE':
if FdfParser._Verify(Name, Value, 'UINT8'):
FmpData.HardwareInstance = Value
elif Name == 'MONOTONIC_COUNT':
if FdfParser._Verify(Name, Value, 'UINT64'):
FmpData.MonotonicCount = Value
if FmpData.MonotonicCount.upper().startswith('0X'):
FmpData.MonotonicCount = int(FmpData.MonotonicCount, 16)
else:
FmpData.MonotonicCount = int(FmpData.MonotonicCount)
if not self._GetNextToken():
break
else:
self._UndoToken()
if (FmpData.MonotonicCount and not FmpData.Certificate_Guid) or (not FmpData.MonotonicCount and FmpData.Certificate_Guid):
EdkLogger.error("FdfParser", FORMAT_INVALID, "CERTIFICATE_GUID and MONOTONIC_COUNT must be work as a pair.")
# Only the IMAGE_TYPE_ID is required item
if FmpKeyList and 'IMAGE_TYPE_ID' in FmpKeyList:
raise Warning("'IMAGE_TYPE_ID' in FMP payload section.", self.FileName, self.CurrentLineNumber)
# get the Image file and Vendor code file
self._GetFMPCapsuleData(FmpData)
if not FmpData.ImageFile:
raise Warning("Missing image file in FMP payload section.", self.FileName, self.CurrentLineNumber)
# check whether more than one Vendor code file
if len(FmpData.VendorCodeFile) > 1:
raise Warning("Vendor code file max of 1 per FMP payload section.", self.FileName, self.CurrentLineNumber)
self.Profile.FmpPayloadDict[FmpUiName] = FmpData
return True
## _GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def _GetCapsule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[CAPSULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Capsule.]", self.FileName, self.CurrentLineNumber)
CapsuleObj = Capsule()
CapsuleName = self._GetUiName()
if not CapsuleName:
raise Warning.Expected("capsule name", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
if self._IsKeyword("CREATE_FILE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self._Token
self._GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleDict[CapsuleObj.UiCapsuleName] = CapsuleObj
return True
## _GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def _GetCapsuleStatements(self, Obj):
self._GetCapsuleTokens(Obj)
self._GetDefineStatements(Obj)
self._GetSetStatements(Obj)
self._GetCapsuleData(Obj)
## _GetCapsuleTokens() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def _GetCapsuleTokens(self, Obj):
if not self._GetNextToken():
return False
while self._Token in {"CAPSULE_GUID", "CAPSULE_HEADER_SIZE", "CAPSULE_FLAGS", "OEM_CAPSULE_FLAGS", "CAPSULE_HEADER_INIT_VERSION"}:
Name = self._Token.strip()
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if Name == 'CAPSULE_FLAGS':
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
while self._IsToken(TAB_COMMA_SPLIT):
Value += TAB_COMMA_SPLIT
if not self._GetNextToken():
raise Warning.Expected("value", self.FileName, self.CurrentLineNumber)
if not self._Token in {"PersistAcrossReset", "PopulateSystemTable", "InitiateReset"}:
raise Warning.Expected("PersistAcrossReset, PopulateSystemTable, or InitiateReset", self.FileName, self.CurrentLineNumber)
Value += self._Token.strip()
elif Name == 'OEM_CAPSULE_FLAGS':
Value = self._Token.strip()
if not Value.upper().startswith('0X'):
raise Warning.Expected("hex value starting with 0x", self.FileName, self.CurrentLineNumber)
try:
Value = int(Value, 0)
except ValueError:
raise Warning.Expected("hex string failed to convert to value", self.FileName, self.CurrentLineNumber)
if not 0x0000 <= Value <= 0xFFFF:
raise Warning.Expected("hex value between 0x0000 and 0xFFFF", self.FileName, self.CurrentLineNumber)
Value = self._Token.strip()
else:
Value = self._Token.strip()
Obj.TokensDict[Name] = Value
if not self._GetNextToken():
return False
self._UndoToken()
## _GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetCapsuleData(self, Obj):
while True:
IsInf = self._GetInfStatement(Obj, True)
IsFile = self._GetFileStatement(Obj, True)
IsFv = self._GetFvStatement(Obj)
IsFd = self._GetFdStatement(Obj)
IsAnyFile = self._GetAnyFileStatement(Obj)
IsAfile = self._GetAfileStatement(Obj)
IsFmp = self._GetFmpStatement(Obj)
if not (IsInf or IsFile or IsFv or IsFd or IsAnyFile or IsAfile or IsFmp):
break
## _GetFMPCapsuleData() method
#
# Get capsule data for FMP capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def _GetFMPCapsuleData(self, Obj):
while True:
IsFv = self._GetFvStatement(Obj, True)
IsFd = self._GetFdStatement(Obj, True)
IsAnyFile = self._GetAnyFileStatement(Obj, True)
if not (IsFv or IsFd or IsAnyFile):
break
## _GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def _GetFvStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FV name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FvDict:
raise Warning("FV name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFv = CapsuleFv()
myCapsuleFv.FvName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFv)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFv)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFv)
return True
## _GetFdStatement() method
#
# Get FD for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FD is got
# @retval True Successfully find a FD statement
# @retval False Not able to find a FD statement
#
def _GetFdStatement(self, CapsuleObj, FMPCapsule = False):
if not self._IsKeyword("FD"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("FD name", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in self.Profile.FdDict:
raise Warning("FD name does not exist", self.FileName, self.CurrentLineNumber)
myCapsuleFd = CapsuleFd()
myCapsuleFd.FdName = self._Token
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleFd)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleFd)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleFd)
return True
def _GetFmpStatement(self, CapsuleObj):
if not self._IsKeyword("FMP_PAYLOAD"):
if not self._IsKeyword("FMP"):
return False
if not self._IsKeyword("PAYLOAD"):
self._UndoToken()
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("payload name after FMP_PAYLOAD =", self.FileName, self.CurrentLineNumber)
Payload = self._Token.upper()
if Payload not in self.Profile.FmpPayloadDict:
raise Warning("This FMP Payload does not exist: %s" % self._Token, self.FileName, self.CurrentLineNumber)
CapsuleObj.FmpPayloadList.append(self.Profile.FmpPayloadDict[Payload])
return True
def _ParseRawFileStatement(self):
if not self._IsKeyword("FILE"):
return None
if not self._IsKeyword("DATA"):
self._UndoToken()
return None
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
AnyFileName = self._Token
self._VerifyFile(AnyFileName)
if not os.path.isabs(AnyFileName):
AnyFileName = mws.join(GenFdsGlobalVariable.WorkSpaceDir, AnyFileName)
return AnyFileName
## _GetAnyFileStatement() method
#
# Get AnyFile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom AnyFile is got
# @retval True Successfully find a Anyfile statement
# @retval False Not able to find a AnyFile statement
#
def _GetAnyFileStatement(self, CapsuleObj, FMPCapsule = False):
AnyFileName = self._ParseRawFileStatement()
if not AnyFileName:
return False
myCapsuleAnyFile = CapsuleAnyFile()
myCapsuleAnyFile.FileName = AnyFileName
if FMPCapsule:
if not CapsuleObj.ImageFile:
CapsuleObj.ImageFile.append(myCapsuleAnyFile)
else:
CapsuleObj.VendorCodeFile.append(myCapsuleAnyFile)
else:
CapsuleObj.CapsuleDataList.append(myCapsuleAnyFile)
return True
## _GetAfileStatement() method
#
# Get Afile for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom Afile is got
# @retval True Successfully find a Afile statement
# @retval False Not able to find a Afile statement
#
def _GetAfileStatement(self, CapsuleObj):
if not self._IsKeyword("APPEND"):
return False
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Afile name", self.FileName, self.CurrentLineNumber)
AfileName = self._Token
AfileBaseName = os.path.basename(AfileName)
if os.path.splitext(AfileBaseName)[1] not in {".bin", ".BIN", ".Bin", ".dat", ".DAT", ".Dat", ".data", ".DATA", ".Data"}:
raise Warning('invalid binary file type, should be one of "bin",BINARY_FILE_TYPE_BIN,"Bin","dat","DAT","Dat","data","DATA","Data"', \
self.FileName, self.CurrentLineNumber)
if not os.path.isabs(AfileName):
AfileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(AfileName)
self._VerifyFile(AfileName)
else:
if not os.path.exists(AfileName):
raise Warning('%s does not exist' % AfileName, self.FileName, self.CurrentLineNumber)
else:
pass
myCapsuleAfile = CapsuleAfile()
myCapsuleAfile.FileName = AfileName
CapsuleObj.CapsuleDataList.append(myCapsuleAfile)
return True
## _GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def _GetRule(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[RULE."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine:], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning.Expected("[Rule.]", self.FileName, self.CurrentLineNumber)
if not self._SkipToToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Arch = self._SkippedChars.rstrip(TAB_SPLIT)
if Arch.upper() not in ARCH_SET_FULL:
raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
ModuleType = self._GetModuleType()
TemplateName = ""
if self._IsToken(TAB_SPLIT):
if not self._GetNextWord():
raise Warning.Expected("template name", self.FileName, self.CurrentLineNumber)
TemplateName = self._Token
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
RuleObj = self._GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '':
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() ] = RuleObj
else:
self.Profile.RuleDict['RULE' + \
TAB_SPLIT + \
Arch.upper() + \
TAB_SPLIT + \
ModuleType.upper() + \
TAB_SPLIT + \
TemplateName.upper() ] = RuleObj
return True
## _GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def _GetModuleType(self):
if not self._GetNextWord():
raise Warning.Expected("Module type", self.FileName, self.CurrentLineNumber)
if self._Token.upper() not in {
SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER,
SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_DXE_SMM_DRIVER,
SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_UEFI_DRIVER,
SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED,
TAB_DEFAULT, SUP_MODULE_BASE,
EDK_COMPONENT_TYPE_SECURITY_CORE,
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER,
EDK_COMPONENT_TYPE_PIC_PEIM,
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, "PE32_PEIM",
EDK_COMPONENT_TYPE_BS_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER,
EDK_COMPONENT_TYPE_SAL_RT_DRIVER,
EDK_COMPONENT_TYPE_APPLICATION, "ACPITABLE",
SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown Module type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
return self._Token
## _GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def _GetFileExtension(self):
if not self._IsToken(TAB_SPLIT):
raise Warning.Expected("'.'", self.FileName, self.CurrentLineNumber)
Ext = ""
if self._GetNextToken():
if FileExtensionPattern.match(self._Token):
Ext = self._Token
return TAB_SPLIT + Ext
else:
raise Warning("Unknown file extension '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
else:
raise Warning.Expected("file extension", self.FileName, self.CurrentLineNumber)
## _GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def _GetRuleFileStatements(self):
if not self._IsKeyword("FILE"):
raise Warning.Expected("FILE", self.FileName, self.CurrentLineNumber)
if not self._GetNextWord():
raise Warning.Expected("FFS type", self.FileName, self.CurrentLineNumber)
Type = self._Token.strip().upper()
if Type not in {"RAW", "FREEFORM", SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM,
"PEI_DXE_COMBO", "DRIVER", SUP_MODULE_DXE_CORE, EDK_COMPONENT_TYPE_APPLICATION,
"FV_IMAGE", "SMM", SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE,
SUP_MODULE_MM_CORE_STANDALONE}:
raise Warning("Unknown FV type '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._IsKeyword("$(NAMED_GUID)"):
if not self._GetNextWord():
raise Warning.Expected("$(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
NameGuid = self._Token
KeepReloc = None
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._FileCouldHaveRelocFlag(Type):
if self._Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag%d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self._GetNextToken():
if TokenFindPattern.match(self._Token):
KeyStringList.append(self._Token)
if self._IsToken(TAB_COMMA_SPLIT):
while self._GetNextToken():
if not TokenFindPattern.match(self._Token):
raise Warning.Expected("KeyString \"Target_Tag_Arch\"", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self._Token)
if not self._IsToken(TAB_COMMA_SPLIT):
break
else:
self._UndoToken()
Fixed = False
if self._IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self._IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
#For FFS, Auto is default option same to ""
if not self._Token == "Auto":
AlignValue = self._Token
if self._IsToken("{"):
# Complex file rule expected
NewRule = RuleComplexFile()
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(NewRule)
IsLeaf = self._GetEfiSection(NewRule)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
return NewRule
else:
# Simple file rule expected
if not self._GetNextWord():
raise Warning.Expected("leaf section type", self.FileName, self.CurrentLineNumber)
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self._IsKeyword("Fixed", True):
Fixed = True
if self._IsKeyword("CheckSum", True):
CheckSum = True
SectAlignment = ""
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
SectAlignment = self._Token
Ext = None
if self._IsToken(TAB_VALUE_SPLIT):
Ext = self._GetFileExtension()
elif not self._GetNextToken():
raise Warning.Expected("File name", self.FileName, self.CurrentLineNumber)
NewRule = RuleSimpleFile()
NewRule.SectionType = SectionName
NewRule.FvFileType = Type
NewRule.NameGuid = NameGuid
NewRule.Alignment = AlignValue
NewRule.SectAlignment = SectAlignment
NewRule.CheckSum = CheckSum
NewRule.Fixed = Fixed
NewRule.KeyStringList = KeyStringList
if KeepReloc is not None:
NewRule.KeepReloc = KeepReloc
NewRule.FileExtension = Ext
NewRule.FileName = self._Token
return NewRule
## _GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self._GetNextWord():
return False
SectionName = self._Token
if SectionName not in {
"COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE, "FV_IMAGE",
"RAW",BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI,
BINARY_FILE_TYPE_PEI_DEPEX, "VERSION", "SUBTYPE_GUID",
BINARY_FILE_TYPE_SMM_DEPEX, BINARY_FILE_TYPE_GUID}:
self._UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection()
if self._IsKeyword("FV_IMAGE"):
pass
if self._IsToken("{"):
FvObj = FV()
self._GetDefineStatements(FvObj)
self._GetBlockStatement(FvObj)
self._GetSetStatements(FvObj)
self._GetFvAlignment(FvObj)
self._GetFvAttributes(FvObj)
self._GetAprioriSection(FvObj)
self._GetAprioriSection(FvObj)
while True:
IsInf = self._GetInfStatement(FvObj)
IsFile = self._GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
if not self._IsKeyword(BINARY_FILE_TYPE_FV):
raise Warning.Expected("'FV'", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.FvFileType = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENT_NOAUTO:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Alignment = self._Token
if self._IsToken(TAB_VALUE_SPLIT):
FvImageSectionObj.FvFileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
FvImageSectionObj.FvFileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("FV file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(FvImageSectionObj)
return True
EfiSectionObj = EfiSection()
EfiSectionObj.SectionType = SectionName
if not self._GetNextToken():
raise Warning.Expected("file type", self.FileName, self.CurrentLineNumber)
if self._Token == "STRING":
if not self._RuleSectionCouldHaveString(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have string data%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Quoted String", self.FileName, self.CurrentLineNumber)
if self._GetStringData():
EfiSectionObj.StringData = self._Token
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
else:
EfiSectionObj.FileType = self._Token
self._CheckRuleSectionFileType(EfiSectionObj.SectionType, EfiSectionObj.FileType)
if self._IsKeyword("Optional"):
if not self._RuleSectionCouldBeOptional(EfiSectionObj.SectionType):
raise Warning("%s section could NOT be optional%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
EfiSectionObj.Optional = True
if self._IsKeyword("BUILD_NUM"):
if not self._RuleSectionCouldHaveBuildNum(EfiSectionObj.SectionType):
raise Warning("%s section could NOT have BUILD_NUM%d" % (EfiSectionObj.SectionType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("Build number", self.FileName, self.CurrentLineNumber)
EfiSectionObj.BuildNum = self._Token
if self._GetAlignment():
if self._Token not in ALIGNMENTS:
raise Warning("Incorrect alignment '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
if self._Token == 'Auto' and (not SectionName == BINARY_FILE_TYPE_PE32) and (not SectionName == BINARY_FILE_TYPE_TE):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
EfiSectionObj.Alignment = self._Token
if self._IsKeyword('RELOCS_STRIPPED') or self._IsKeyword('RELOCS_RETAINED'):
if self._SectionCouldHaveRelocFlag(EfiSectionObj.SectionType):
if self._Token == 'RELOCS_STRIPPED':
EfiSectionObj.KeepReloc = False
else:
EfiSectionObj.KeepReloc = True
if Obj.KeepReloc is not None and Obj.KeepReloc != EfiSectionObj.KeepReloc:
raise Warning("Section type %s has reloc strip flag conflict with Rule" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
else:
raise Warning("Section type %s could not have reloc strip flag" % EfiSectionObj.SectionType, self.FileName, self.CurrentLineNumber)
if self._IsToken(TAB_VALUE_SPLIT):
EfiSectionObj.FileExtension = self._GetFileExtension()
elif self._GetNextToken():
if self._Token not in {
T_CHAR_BRACE_R, "COMPAT16", BINARY_FILE_TYPE_PE32,
BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_TE,
"FV_IMAGE", "RAW", BINARY_FILE_TYPE_DXE_DEPEX,
BINARY_FILE_TYPE_UI, "VERSION",
BINARY_FILE_TYPE_PEI_DEPEX, BINARY_FILE_TYPE_GUID,
BINARY_FILE_TYPE_SMM_DEPEX}:
if self._Token.startswith('PCD'):
self._UndoToken()
self._GetNextWord()
if self._Token == 'PCD':
if not self._IsToken("("):
raise Warning.Expected("'('", self.FileName, self.CurrentLineNumber)
PcdPair = self._GetNextPcdSettings()
if not self._IsToken(")"):
raise Warning.Expected("')'", self.FileName, self.CurrentLineNumber)
self._Token = 'PCD('+PcdPair[1]+TAB_SPLIT+PcdPair[0]+')'
EfiSectionObj.FileName = self._Token
else:
self._UndoToken()
else:
raise Warning.Expected("section file name", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(EfiSectionObj)
return True
## _RuleSectionCouldBeOptional() method
#
# Get whether a section could be optional
#
# @param SectionType The section type to check
# @retval True section could be optional
# @retval False section never optional
#
@staticmethod
def _RuleSectionCouldBeOptional(SectionType):
if SectionType in {BINARY_FILE_TYPE_DXE_DEPEX, BINARY_FILE_TYPE_UI, "VERSION", BINARY_FILE_TYPE_PEI_DEPEX, "RAW", BINARY_FILE_TYPE_SMM_DEPEX}:
return True
else:
return False
## _RuleSectionCouldHaveBuildNum() method
#
# Get whether a section could have build number information
#
# @param SectionType The section type to check
# @retval True section could have build number information
# @retval False section never have build number information
#
@staticmethod
def _RuleSectionCouldHaveBuildNum(SectionType):
if SectionType == "VERSION":
return True
else:
return False
## _RuleSectionCouldHaveString() method
#
# Get whether a section could have string
#
# @param SectionType The section type to check
# @retval True section could have string
# @retval False section never have string
#
@staticmethod
def _RuleSectionCouldHaveString(SectionType):
if SectionType in {BINARY_FILE_TYPE_UI, "VERSION"}:
return True
else:
return False
## _CheckRuleSectionFileType() method
#
# Get whether a section matches a file type
#
# @param self The object pointer
# @param SectionType The section type to check
# @param FileType The file type to check
#
def _CheckRuleSectionFileType(self, SectionType, FileType):
WarningString = "Incorrect section file type '%s'"
if SectionType == "COMPAT16":
if FileType not in {"COMPAT16", "SEC_COMPAT16"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PE32:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_PE32"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PIC:
if FileType not in {BINARY_FILE_TYPE_PIC, BINARY_FILE_TYPE_PIC}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_TE:
if FileType not in {BINARY_FILE_TYPE_TE, "SEC_TE"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "RAW":
if FileType not in {BINARY_FILE_TYPE_BIN, "SEC_BIN", "RAW", "ASL", "ACPI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_DXE_DEPEX or SectionType == BINARY_FILE_TYPE_SMM_DEPEX:
if FileType not in {BINARY_FILE_TYPE_DXE_DEPEX, "SEC_DXE_DEPEX", BINARY_FILE_TYPE_SMM_DEPEX}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_UI:
if FileType not in {BINARY_FILE_TYPE_UI, "SEC_UI"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == "VERSION":
if FileType not in {"VERSION", "SEC_VERSION"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_PEI_DEPEX:
if FileType not in {BINARY_FILE_TYPE_PEI_DEPEX, "SEC_PEI_DEPEX"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
elif SectionType == BINARY_FILE_TYPE_GUID:
if FileType not in {BINARY_FILE_TYPE_PE32, "SEC_GUID"}:
raise Warning(WarningString % FileType, self.FileName, self.CurrentLineNumber)
## _GetRuleEncapsulationSection() method
#
# Get encapsulation section for Rule
#
# @param self The object pointer
# @param theRule for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def _GetRuleEncapsulationSection(self, theRule):
if self._IsKeyword("COMPRESS"):
Type = "PI_STD"
if self._IsKeyword("PI_STD") or self._IsKeyword("PI_NONE"):
Type = self._Token
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
CompressSectionObj = CompressSection()
CompressSectionObj.CompType = Type
# Recursive sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(CompressSectionObj)
IsLeaf = self._GetEfiSection(CompressSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(CompressSectionObj)
return True
elif self._IsKeyword("GUIDED"):
GuidValue = None
if self._GetNextGuid():
GuidValue = self._Token
if self._IsKeyword("$(NAMED_GUID)"):
GuidValue = self._Token
AttribDict = self._GetGuidAttrib()
if not self._IsToken("{"):
raise Warning.ExpectedCurlyOpen(self.FileName, self.CurrentLineNumber)
GuidSectionObj = GuidSection()
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
GuidSectionObj.ExtraHeaderSize = AttribDict["EXTRA_HEADER_SIZE"]
# Efi sections...
while True:
IsEncapsulate = self._GetRuleEncapsulationSection(GuidSectionObj)
IsLeaf = self._GetEfiSection(GuidSectionObj)
if not IsEncapsulate and not IsLeaf:
break
if not self._IsToken(T_CHAR_BRACE_R):
raise Warning.ExpectedCurlyClose(self.FileName, self.CurrentLineNumber)
theRule.SectionList.append(GuidSectionObj)
return True
return False
## _GetOptionRom() method
#
# Get OptionROM section contents and store its data into OptionROM list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a OptionROM
# @retval False Not able to find a OptionROM
#
def _GetOptionRom(self):
if not self._GetNextToken():
return False
S = self._Token.upper()
if S.startswith(TAB_SECTION_START) and not S.startswith("[OPTIONROM."):
self.SectionParser(S)
self._UndoToken()
return False
self._UndoToken()
if not self._IsToken("[OptionRom.", True):
raise Warning("Unknown Keyword '%s'" % self._Token, self.FileName, self.CurrentLineNumber)
OptRomName = self._GetUiName()
if not self._IsToken(TAB_SECTION_END):
raise Warning.ExpectedBracketClose(self.FileName, self.CurrentLineNumber)
OptRomObj = OPTIONROM(OptRomName)
self.Profile.OptRomDict[OptRomName] = OptRomObj
while True:
isInf = self._GetOptRomInfStatement(OptRomObj)
isFile = self._GetOptRomFileStatement(OptRomObj)
if not isInf and not isFile:
break
return True
## _GetOptRomInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def _GetOptRomInfStatement(self, Obj):
if not self._IsKeyword("INF"):
return False
ffsInf = OptRomInfStatement()
self._GetInfOptions(ffsInf)
if not self._GetNextToken():
raise Warning.Expected("INF file path", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self._Token
if ffsInf.InfFileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(ffsInf.InfFileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
NewFileName = ffsInf.InfFileName
if ffsInf.OverrideGuid:
NewFileName = ProcessDuplicatedInf(PathClass(ffsInf.InfFileName,GenFdsGlobalVariable.WorkSpaceDir), ffsInf.OverrideGuid, GenFdsGlobalVariable.WorkSpaceDir).Path
if not NewFileName in self.Profile.InfList:
self.Profile.InfList.append(NewFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if ffsInf.UseArch:
if ffsInf.UseArch not in self.Profile.InfDict:
self.Profile.InfDict[ffsInf.UseArch] = [ffsInf.InfFileName]
else:
self.Profile.InfDict[ffsInf.UseArch].append(ffsInf.InfFileName)
else:
self.Profile.InfDict['ArchTBD'].append(ffsInf.InfFileName)
self._GetOptRomOverrides (ffsInf)
Obj.FfsList.append(ffsInf)
return True
## _GetOptRomOverrides() method
#
# Get overrides for OptROM INF & FILE
#
# @param self The object pointer
# @param FfsInfObj for whom overrides is got
#
def _GetOptRomOverrides(self, Obj):
if self._IsToken('{'):
Overrides = OverrideAttribs()
while True:
if self._IsKeyword("PCI_VENDOR_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex vendor id", self.FileName, self.CurrentLineNumber)
Overrides.PciVendorId = self._Token
continue
if self._IsKeyword("PCI_CLASS_CODE"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex class code", self.FileName, self.CurrentLineNumber)
Overrides.PciClassCode = self._Token
continue
if self._IsKeyword("PCI_DEVICE_ID"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
# Get a list of PCI IDs
Overrides.PciDeviceId = ""
while (self._GetNextHexNumber()):
Overrides.PciDeviceId = "{} {}".format(Overrides.PciDeviceId, self._Token)
if not Overrides.PciDeviceId:
raise Warning.Expected("one or more Hex device ids", self.FileName, self.CurrentLineNumber)
continue
if self._IsKeyword("PCI_REVISION"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextHexNumber():
raise Warning.Expected("Hex revision", self.FileName, self.CurrentLineNumber)
Overrides.PciRevision = self._Token
continue
if self._IsKeyword("PCI_COMPRESS"):
if not self._IsToken(TAB_EQUAL_SPLIT):
raise Warning.ExpectedEquals(self.FileName, self.CurrentLineNumber)
if not self._GetNextToken():
raise Warning.Expected("TRUE/FALSE for compress", self.FileName, self.CurrentLineNumber)
Overrides.NeedCompress = self._Token.upper() == 'TRUE'
continue
if self._IsToken(T_CHAR_BRACE_R):
break
else:
EdkLogger.error("FdfParser", FORMAT_INVALID, File=self.FileName, Line=self.CurrentLineNumber)
Obj.OverrideAttribs = Overrides
## _GetOptRomFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def _GetOptRomFileStatement(self, Obj):
if not self._IsKeyword("FILE"):
return False
FfsFileObj = OptRomFileStatement()
if not self._IsKeyword("EFI") and not self._IsKeyword(BINARY_FILE_TYPE_BIN):
raise Warning.Expected("Binary type (EFI/BIN)", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileType = self._Token
if not self._GetNextToken():
raise Warning.Expected("File path", self.FileName, self.CurrentLineNumber)
FfsFileObj.FileName = self._Token
if FfsFileObj.FileName.replace(TAB_WORKSPACE, '').find('$') == -1:
#check for file path
ErrorCode, ErrorInfo = PathClass(NormPath(FfsFileObj.FileName), GenFdsGlobalVariable.WorkSpaceDir).Validate()
if ErrorCode != 0:
EdkLogger.error("GenFds", ErrorCode, ExtraData=ErrorInfo)
if FfsFileObj.FileType == 'EFI':
self._GetOptRomOverrides(FfsFileObj)
Obj.FfsList.append(FfsFileObj)
return True
## _GetCapInFd() method
#
# Get Cap list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval CapList List of Capsule in FD
#
def _GetCapInFd (self, FdName):
CapList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == 'CAPSULE':
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".cap"):
continue
if elementRegionData is not None and elementRegionData.upper() not in CapList:
CapList.append(elementRegionData.upper())
return CapList
## _GetReferencedFdCapTuple() method
#
# Get FV and FD list referenced by a capsule image
#
# @param self The object pointer
# @param CapObj Capsule section to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdCapTuple(self, CapObj, RefFdList = [], RefFvList = []):
for CapsuleDataObj in CapObj.CapsuleDataList:
if hasattr(CapsuleDataObj, 'FvName') and CapsuleDataObj.FvName is not None and CapsuleDataObj.FvName.upper() not in RefFvList:
RefFvList.append (CapsuleDataObj.FvName.upper())
elif hasattr(CapsuleDataObj, 'FdName') and CapsuleDataObj.FdName is not None and CapsuleDataObj.FdName.upper() not in RefFdList:
RefFdList.append (CapsuleDataObj.FdName.upper())
elif CapsuleDataObj.Ffs is not None:
if isinstance(CapsuleDataObj.Ffs, FileStatement):
if CapsuleDataObj.Ffs.FvName is not None and CapsuleDataObj.Ffs.FvName.upper() not in RefFvList:
RefFvList.append(CapsuleDataObj.Ffs.FvName.upper())
elif CapsuleDataObj.Ffs.FdName is not None and CapsuleDataObj.Ffs.FdName.upper() not in RefFdList:
RefFdList.append(CapsuleDataObj.Ffs.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(CapsuleDataObj.Ffs, RefFdList, RefFvList)
## _GetFvInFd() method
#
# Get FV list contained in FD
#
# @param self The object pointer
# @param FdName FD name
# @retval FvList list of FV in FD
#
def _GetFvInFd (self, FdName):
FvList = []
if FdName.upper() in self.Profile.FdDict:
FdObj = self.Profile.FdDict[FdName.upper()]
for elementRegion in FdObj.RegionList:
if elementRegion.RegionType == BINARY_FILE_TYPE_FV:
for elementRegionData in elementRegion.RegionDataList:
if elementRegionData.endswith(".fv"):
continue
if elementRegionData is not None and elementRegionData.upper() not in FvList:
FvList.append(elementRegionData.upper())
return FvList
## _GetReferencedFdFvTuple() method
#
# Get FD and FV list referenced by a FFS file
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param RefFdList referenced FD by section
# @param RefFvList referenced FV by section
#
def _GetReferencedFdFvTuple(self, FvObj, RefFdList = [], RefFvList = []):
for FfsObj in FvObj.FfsList:
if isinstance(FfsObj, FileStatement):
if FfsObj.FvName is not None and FfsObj.FvName.upper() not in RefFvList:
RefFvList.append(FfsObj.FvName.upper())
elif FfsObj.FdName is not None and FfsObj.FdName.upper() not in RefFdList:
RefFdList.append(FfsObj.FdName.upper())
else:
self._GetReferencedFdFvTupleFromSection(FfsObj, RefFdList, RefFvList)
## _GetReferencedFdFvTupleFromSection() method
#
# Get FD and FV list referenced by a FFS section
#
# @param self The object pointer
# @param FfsFile contains sections to be searched
# @param FdList referenced FD by section
# @param FvList referenced FV by section
#
def _GetReferencedFdFvTupleFromSection(self, FfsFile, FdList = [], FvList = []):
SectionStack = list(FfsFile.SectionList)
while SectionStack != []:
SectionObj = SectionStack.pop()
if isinstance(SectionObj, FvImageSection):
if SectionObj.FvName is not None and SectionObj.FvName.upper() not in FvList:
FvList.append(SectionObj.FvName.upper())
if SectionObj.Fv is not None and SectionObj.Fv.UiFvName is not None and SectionObj.Fv.UiFvName.upper() not in FvList:
FvList.append(SectionObj.Fv.UiFvName.upper())
self._GetReferencedFdFvTuple(SectionObj.Fv, FdList, FvList)
if isinstance(SectionObj, CompressSection) or isinstance(SectionObj, GuidSection):
SectionStack.extend(SectionObj.SectionList)
## CycleReferenceCheck() method
#
# Check whether cycle reference exists in FDF
#
# @param self The object pointer
# @retval True cycle reference exists
# @retval False Not exists cycle reference
#
def CycleReferenceCheck(self):
#
# Check the cycle between FV and FD image
#
MaxLength = len (self.Profile.FvDict)
for FvName in self.Profile.FvDict:
LogStr = "\nCycle Reference Checking for FV: %s\n" % FvName
RefFvStack = set(FvName)
FdAnalyzedList = set()
Index = 0
while RefFvStack and Index < MaxLength:
Index = Index + 1
FvNameFromStack = RefFvStack.pop()
if FvNameFromStack.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[FvNameFromStack.upper()]
else:
continue
RefFdList = []
RefFvList = []
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "FV %s contains FD %s\n" % (FvNameFromStack, RefFdName)
FvInFdList = self._GetFvInFd(RefFdName)
if FvInFdList != []:
for FvNameInFd in FvInFdList:
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvStack:
RefFvStack.add(FvNameInFd)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
FdAnalyzedList.add(RefFdName)
for RefFvName in RefFvList:
LogStr += "FV %s contains FV %s\n" % (FvNameFromStack, RefFvName)
if RefFvName not in RefFvStack:
RefFvStack.add(RefFvName)
if FvName in RefFvStack or FvNameFromStack in RefFvStack:
EdkLogger.info(LogStr)
return True
#
# Check the cycle between Capsule and FD image
#
MaxLength = len (self.Profile.CapsuleDict)
for CapName in self.Profile.CapsuleDict:
#
# Capsule image to be checked.
#
LogStr = "\n\n\nCycle Reference Checking for Capsule: %s\n" % CapName
RefCapStack = {CapName}
FdAnalyzedList = set()
FvAnalyzedList = set()
Index = 0
while RefCapStack and Index < MaxLength:
Index = Index + 1
CapNameFromStack = RefCapStack.pop()
if CapNameFromStack.upper() in self.Profile.CapsuleDict:
CapObj = self.Profile.CapsuleDict[CapNameFromStack.upper()]
else:
continue
RefFvList = []
RefFdList = []
self._GetReferencedFdCapTuple(CapObj, RefFdList, RefFvList)
FvListLength = 0
FdListLength = 0
while FvListLength < len (RefFvList) or FdListLength < len (RefFdList):
for RefFdName in RefFdList:
if RefFdName in FdAnalyzedList:
continue
LogStr += "Capsule %s contains FD %s\n" % (CapNameFromStack, RefFdName)
for CapNameInFd in self._GetCapInFd(RefFdName):
LogStr += "FD %s contains Capsule %s\n" % (RefFdName, CapNameInFd)
if CapNameInFd not in RefCapStack:
RefCapStack.append(CapNameInFd)
if CapName in RefCapStack or CapNameFromStack in RefCapStack:
EdkLogger.info(LogStr)
return True
for FvNameInFd in self._GetFvInFd(RefFdName):
LogStr += "FD %s contains FV %s\n" % (RefFdName, FvNameInFd)
if FvNameInFd not in RefFvList:
RefFvList.append(FvNameInFd)
FdAnalyzedList.add(RefFdName)
#
# the number of the parsed FV and FD image
#
FvListLength = len (RefFvList)
FdListLength = len (RefFdList)
for RefFvName in RefFvList:
if RefFvName in FvAnalyzedList:
continue
LogStr += "Capsule %s contains FV %s\n" % (CapNameFromStack, RefFvName)
if RefFvName.upper() in self.Profile.FvDict:
FvObj = self.Profile.FvDict[RefFvName.upper()]
else:
continue
self._GetReferencedFdFvTuple(FvObj, RefFdList, RefFvList)
FvAnalyzedList.add(RefFvName)
return False
def GetAllIncludedFile (self):
global AllIncludeFileList
return AllIncludeFileList
if __name__ == "__main__":
import sys
try:
test_file = sys.argv[1]
except IndexError as v:
print("Usage: %s filename" % sys.argv[0])
sys.exit(1)
parser = FdfParser(test_file)
try:
parser.ParseFile()
parser.CycleReferenceCheck()
except Warning as X:
print(str(X))
else:
print("Success!")
|
apache-2.0
| -8,537,984,819,681,910,000
| 40.938762
| 223
| 0.573885
| false
| 4.237279
| false
| false
| false
|
folti/subuser
|
logic/subuserlib/commands.py
|
1
|
2023
|
# -*- coding: utf-8 -*-
"""
This module helps us figure out which subuser subcommands can be called.
"""
#external imports
import os
#internal imports
import subuserlib.executablePath
import subuserlib.paths
def getBuiltIn():
"""
Get a list of the names of the built in subuser commands.
"""
try:
commands = set(os.listdir(subuserlib.paths.getSubuserCommandsDir()))
return [command[8:-3] for command in commands if command.endswith(".py") and command.startswith("subuser-")] # Filter out non-.py files and remove the .py suffixes and the "subuser-" prefixes.
except OSError:
return []
def getExternal():
"""
Return the list of "external" subuser commands. These are not built in commands but rather stand alone executables which appear in the user's $PATH and who's names start with "subuser-"
"""
def isPathToCommand(path):
directory, executableName = os.path.split(path)
return executableName.startswith("subuser-")
externalCommandPaths = subuserlib.executablePath.queryPATH(isPathToCommand)
externalCommands = []
subuserPrefixLength=len("subuser-")
for externalCommandPath in externalCommandPaths:
commandDir, executableName = os.path.split(externalCommandPath)
commandName = executableName[subuserPrefixLength:]
if commandName.endswith(".py"):
commandName=commandName[:-3]
externalCommands.append(commandName)
return list(set(externalCommands)) # remove duplicate entries
def getCommands():
"""
Returns a list of commands that may be called by the user.
"""
return list(set(getBuiltIn() + getExternal()))
def getPath(command):
builtInCommandPath = os.path.join(subuserlib.paths.getSubuserCommandsDir(),"subuser-" + command + ".py")
if os.path.exists(builtInCommandPath):
return (builtInCommandPath)
else:
externalCommandPath = subuserlib.executablePath.which("subuser-"+command)
if externalCommandPath:
return externalCommandPath
else:
return subuserlib.executablePath.which("subuser-"+command+".py")
|
lgpl-3.0
| 4,134,394,350,383,250,000
| 35.781818
| 196
| 0.739001
| false
| 3.890385
| false
| false
| false
|
abranches/backmonitor
|
backmonitor/protocol.py
|
1
|
1832
|
import logging
from twisted.internet.protocol import Factory, Protocol
from frame import decode_frame
from message import decode_message
log = logging.getLogger(__name__)
class ConnectionManager(object):
def __init__(self, backmonitor, addr):
self.backmonitor = backmonitor
self.addr = addr
self._buffer = bytes()
self._open = False
self.bytes_received = 0
self.frames_received = 0
@property
def open(self):
return self._open
def _on_data_received(self, data):
log.debug("_on_data_received(), data length=%d" % len(data))
self._buffer += data
while self._buffer:
consumed_bytes, frame = decode_frame(self._buffer)
if consumed_bytes == 0:
return
self.bytes_received += consumed_bytes
self._buffer = self._buffer[consumed_bytes:]
self._process_frame(frame)
def _process_frame(self, frame):
log.debug("Processing new frame")
message = decode_message(frame)
self.backmonitor.on_new_message(message)
self.frames_received += 1
class BackmonitorTwistedProtocol(Protocol):
def __init__(self, factory, conn_manager):
self.factory = factory
self.conn_manager = conn_manager
def connectionMade(self):
log.debug("New connection estabilished")
self.conn_manager._open = True
self.factory.connected_peers += 1
def dataReceived(self, data):
self.conn_manager._on_data_received(data)
class BackmonitorTwistedFactory(Factory):
def __init__(self, backmonitor):
self.connected_peers = 0
self.backmonitor = backmonitor
def buildProtocol(self, addr):
return BackmonitorTwistedProtocol(self, ConnectionManager(self.backmonitor, addr))
|
apache-2.0
| 6,756,100,460,570,981,000
| 26.757576
| 90
| 0.637555
| false
| 4.173121
| false
| false
| false
|
vntarasov/openpilot
|
selfdrive/registration.py
|
1
|
2605
|
import os
import json
from datetime import datetime, timedelta
from selfdrive.swaglog import cloudlog
from selfdrive.version import version, terms_version, training_version, get_git_commit, get_git_branch, get_git_remote
from common.hardware import HARDWARE
from common.api import api_get
from common.params import Params
from common.file_helpers import mkdirs_exists_ok
from common.basedir import PERSIST
def register():
params = Params()
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
params.put("SubscriberInfo", HARDWARE.get_subscriber_info())
# create a key for auth
# your private key is kept on your device persist partition and never sent to our servers
# do not erase your persist partition
if not os.path.isfile(PERSIST+"/comma/id_rsa.pub"):
cloudlog.warning("generating your personal RSA key")
mkdirs_exists_ok(PERSIST+"/comma")
assert os.system("openssl genrsa -out "+PERSIST+"/comma/id_rsa.tmp 2048") == 0
assert os.system("openssl rsa -in "+PERSIST+"/comma/id_rsa.tmp -pubout -out "+PERSIST+"/comma/id_rsa.tmp.pub") == 0
os.rename(PERSIST+"/comma/id_rsa.tmp", PERSIST+"/comma/id_rsa")
os.rename(PERSIST+"/comma/id_rsa.tmp.pub", PERSIST+"/comma/id_rsa.pub")
# make key readable by app users (ai.comma.plus.offroad)
os.chmod(PERSIST+'/comma/', 0o755)
os.chmod(PERSIST+'/comma/id_rsa', 0o744)
dongle_id = params.get("DongleId", encoding='utf8')
public_key = open(PERSIST+"/comma/id_rsa.pub").read()
# create registration token
# in the future, this key will make JWTs directly
private_key = open(PERSIST+"/comma/id_rsa").read()
# late import
import jwt
register_token = jwt.encode({'register': True, 'exp': datetime.utcnow() + timedelta(hours=1)}, private_key, algorithm='RS256')
try:
cloudlog.info("getting pilotauth")
resp = api_get("v2/pilotauth/", method='POST', timeout=15,
imei=HARDWARE.get_imei(0), imei2=HARDWARE.get_imei(1), serial=HARDWARE.get_serial(), public_key=public_key, register_token=register_token)
dongleauth = json.loads(resp.text)
dongle_id = dongleauth["dongle_id"]
params.put("DongleId", dongle_id)
return dongle_id
except Exception:
cloudlog.exception("failed to authenticate")
if dongle_id is not None:
return dongle_id
else:
return None
if __name__ == "__main__":
print(register())
|
mit
| 2,335,180,383,859,445,000
| 37.308824
| 157
| 0.70595
| false
| 3.280856
| false
| false
| false
|
s1na/darkoob
|
darkoob/book/views.py
|
1
|
2366
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.template import RequestContext
from django.db import transaction
from django.utils import simplejson
from darkoob.book.models import Book, Author
from darkoob.book.forms import NewReviewForm
from darkoob.social.views import common_context
from darkoob.migration.models import Migration
from darkoob.book.models import Review
def page(request, book_id, book_title):
try:
book = Book.objects.get(id=book_id)
except Book.DoesNotExist:
raise Http404
template = 'book/book_page.html'
reviews = Review.objects.filter(book=book).order_by("-rating_score")
count = range(1, len(reviews) + 1)
if request.is_ajax():
template = 'book/reviews.html'
context = {
'new_review_form': NewReviewForm(),
'book': book,
'rate': book.rating.get_rating(),
'reviews': reviews,
'count': count[::-1],
'migrations': Migration.objects.filter(book=book),
}
common_context(request, context)
m = Migration()
return render(request, template, context)
from avatar.templatetags import avatar_tags
def book_lookup(request):
results = []
if request.method == "GET":
if request.GET.has_key(u'query'):
value = request.GET[u'query']
model_results = Book.objects.filter(title__icontains=value)
results = [ {'book_title': x.title ,'book_id':x.id ,'photo': x.thumb.url , 'author_name': x.author_names() } for x in model_results]
to_json = []
jt = simplejson.dumps(results)
print jt
return HttpResponse(jt, mimetype='application/json')
def author_lookup(request):
results = []
if request.method == "GET":
if request.GET.has_key(u'query'):
value = request.GET[u'query']
model_results = Author.objects.filter(name__icontains=value)
results = [ x.name for x in model_results]
to_json = []
jt = simplejson.dumps(results)
print jt
return HttpResponse(jt, mimetype='application/json')
|
mit
| 4,478,514,506,639,604,000
| 30.546667
| 145
| 0.677515
| false
| 3.834684
| false
| false
| false
|
GoogleCloudPlatform/PerfKitBenchmarker
|
perfkitbenchmarker/providers/openstack/swift.py
|
1
|
4141
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to Swift Storage Service."""
import os
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
flags.DEFINE_boolean('openstack_swift_insecure', False,
'Allow swiftclient to access Swift service without \n'
'having to verify the SSL certificate')
FLAGS = flags.FLAGS
SWIFTCLIENT_LIB_VERSION = 'python-swiftclient_lib_version'
class SwiftStorageService(object_storage_service.ObjectStorageService):
"""Interface to OpenStack Swift."""
STORAGE_NAME = providers.OPENSTACK
def __init__(self):
self.swift_command_prefix = ''
def PrepareService(self, location):
openstack_creds_set = ('OS_AUTH_URL' in os.environ,
'OS_TENANT_NAME' in os.environ,
'OS_USERNAME' in os.environ,
'OS_PASSWORD' in os.environ,)
if not all(openstack_creds_set):
raise errors.Benchmarks.MissingObjectCredentialException(
'OpenStack credentials not found in environment variables')
self.swift_command_parts = [
'--os-auth-url', os.environ['OS_AUTH_URL'],
'--os-tenant-name', os.environ['OS_TENANT_NAME'],
'--os-username', os.environ['OS_USERNAME'],
'--os-password', os.environ['OS_PASSWORD']]
if FLAGS.openstack_swift_insecure:
self.swift_command_parts.append('--insecure')
self.swift_command_prefix = ' '.join(self.swift_command_parts)
def MakeBucket(self, bucket, raise_on_failure=True):
_, stderr, ret_code = vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['post', bucket],
raise_on_failure=False)
if ret_code and raise_on_failure:
raise errors.Benchmarks.BucketCreationError(stderr)
def DeleteBucket(self, bucket):
self.EmptyBucket(bucket)
vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['delete', bucket],
raise_on_failure=False)
def Copy(self, src_url, dst_url):
"""See base class."""
raise NotImplementedError()
def CopyToBucket(self, src_path, bucket, object_path):
"""See base class."""
raise NotImplementedError()
def MakeRemoteCliDownloadUrl(self, bucket, object_path):
"""See base class."""
raise NotImplementedError()
def GenerateCliDownloadFileCommand(self, src_url, local_path):
"""See base class."""
raise NotImplementedError()
def List(self, buckets):
"""See base class."""
raise NotImplementedError()
def EmptyBucket(self, bucket):
vm_util.IssueCommand(
['swift'] + self.swift_command_parts + ['delete', bucket],
raise_on_failure=False)
def PrepareVM(self, vm):
vm.Install('swift_client')
def CleanupVM(self, vm):
vm.Uninstall('swift_client')
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall absl-py')
def CLIUploadDirectory(self, vm, directory, file_names, bucket):
return vm.RemoteCommand(
'time swift %s upload %s %s'
% (self.swift_command_prefix, bucket, directory))
def CLIDownloadBucket(self, vm, bucket, objects, dest):
return vm.RemoteCommand(
'time swift %s download %s -D %s'
% (self.swift_command_prefix, bucket, dest))
def Metadata(self, vm):
return {SWIFTCLIENT_LIB_VERSION:
linux_packages.GetPipPackageVersion(vm, 'python-swiftclient')}
|
apache-2.0
| 5,507,729,754,466,135,000
| 34.09322
| 75
| 0.683651
| false
| 3.921402
| false
| false
| false
|
cpennington/edx-platform
|
lms/djangoapps/courseware/module_render.py
|
1
|
55817
|
"""
Module rendering
"""
import hashlib
import json
import logging
import textwrap
from collections import OrderedDict
from functools import partial
import six
from completion import waffle as completion_waffle
from completion.models import BlockCompletion
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.middleware.csrf import CsrfViewMiddleware
from django.template.context_processors import csrf
from django.urls import reverse
from django.utils.text import slugify
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from edx_django_utils.cache import RequestCache
from edx_django_utils.monitoring import set_custom_metrics_for_course_key, set_monitoring_transaction_name
from edx_proctoring.api import get_attempt_status_summary
from edx_proctoring.services import ProctoringService
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_when.field_data import DateLookupFieldData
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from requests.auth import HTTPBasicAuth
from rest_framework.decorators import api_view
from rest_framework.exceptions import APIException
from six import text_type
from web_fragments.fragment import Fragment
from xblock.core import XBlock
from xblock.django.request import django_to_webob_request, webob_to_django_response
from xblock.exceptions import NoSuchHandlerError, NoSuchViewError
from xblock.reference.plugins import FSService
from xblock.runtime import KvsFieldData
import static_replace
from capa.xqueue_interface import XQueueInterface
from lms.djangoapps.courseware.access import get_user_role, has_access
from lms.djangoapps.courseware.entrance_exams import user_can_skip_entrance_exam, user_has_passed_entrance_exam
from lms.djangoapps.courseware.masquerade import (
MasqueradingKeyValueStore,
filter_displayed_blocks,
is_masquerading_as_specific_student,
setup_masquerade
)
from lms.djangoapps.courseware.model_data import DjangoKeyValueStore, FieldDataCache
from edxmako.shortcuts import render_to_string
from lms.djangoapps.courseware.field_overrides import OverrideFieldData
from lms.djangoapps.courseware.services import UserStateService
from lms.djangoapps.grades.api import GradesUtilService
from lms.djangoapps.grades.api import signals as grades_signals
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from lms.djangoapps.lms_xblock.runtime import LmsModuleSystem
from lms.djangoapps.verify_student.services import XBlockVerificationService
from openedx.core.djangoapps.bookmarks.services import BookmarksService
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.credit.services import CreditService
from openedx.core.djangoapps.util.user_utils import SystemUser
from openedx.core.djangolib.markup import HTML
from openedx.core.lib.api.authentication import BearerAuthenticationAllowInactiveUser
from openedx.core.lib.api.view_utils import view_auth_classes
from openedx.core.lib.gating.services import GatingService
from openedx.core.lib.license import wrap_with_license
from openedx.core.lib.url_utils import quote_slashes, unquote_slashes
from openedx.core.lib.xblock_utils import (
add_staff_markup,
get_aside_from_xblock,
hash_resource,
is_xblock_aside,
replace_course_urls,
replace_jump_to_id_urls,
replace_static_urls
)
from openedx.core.lib.xblock_utils import request_token as xblock_request_token
from openedx.core.lib.xblock_utils import wrap_xblock
from openedx.features.course_duration_limits.access import course_expiration_wrapper
from openedx.features.discounts.utils import offer_banner_wrapper
from student.models import anonymous_id_for_user, user_by_anonymous_id
from student.roles import CourseBetaTesterRole
from track import contexts
from util import milestones_helpers
from util.json_request import JsonResponse
from xblock_django.user_service import DjangoXBlockUserService
from xmodule.contentstore.django import contentstore
from xmodule.error_module import ErrorDescriptor, NonStaffErrorDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
from xmodule.lti_module import LTIModule
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.util.sandboxing import can_execute_unsafe_code, get_python_lib_zip
from xmodule.x_module import XModuleDescriptor
log = logging.getLogger(__name__)
if settings.XQUEUE_INTERFACE.get('basic_auth') is not None:
REQUESTS_AUTH = HTTPBasicAuth(*settings.XQUEUE_INTERFACE['basic_auth'])
else:
REQUESTS_AUTH = None
XQUEUE_INTERFACE = XQueueInterface(
settings.XQUEUE_INTERFACE['url'],
settings.XQUEUE_INTERFACE['django_auth'],
REQUESTS_AUTH,
)
# TODO: course_id and course_key are used interchangeably in this file, which is wrong.
# Some brave person should make the variable names consistently someday, but the code's
# coupled enough that it's kind of tricky--you've been warned!
class LmsModuleRenderError(Exception):
"""
An exception class for exceptions thrown by module_render that don't fit well elsewhere
"""
pass
def make_track_function(request):
'''
Make a tracking function that logs what happened.
For use in ModuleSystem.
'''
import track.views
def function(event_type, event):
return track.views.server_track(request, event_type, event, page='x_module')
return function
def toc_for_course(user, request, course, active_chapter, active_section, field_data_cache):
'''
Create a table of contents from the module store
Return format:
{ 'chapters': [
{'display_name': name, 'url_name': url_name, 'sections': SECTIONS, 'active': bool},
],
'previous_of_active_section': {..},
'next_of_active_section': {..}
}
where SECTIONS is a list
[ {'display_name': name, 'url_name': url_name,
'format': format, 'due': due, 'active' : bool, 'graded': bool}, ...]
where previous_of_active_section and next_of_active_section have information on the
next/previous sections of the active section.
active is set for the section and chapter corresponding to the passed
parameters, which are expected to be url_names of the chapter+section.
Everything else comes from the xml, or defaults to "".
chapters with name 'hidden' are skipped.
NOTE: assumes that if we got this far, user has access to course. Returns
None if this is not the case.
field_data_cache must include data from the course module and 2 levels of its descendants
'''
with modulestore().bulk_operations(course.id):
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course.id, course=course
)
if course_module is None:
return None, None, None
toc_chapters = list()
chapters = course_module.get_display_items()
# Check for content which needs to be completed
# before the rest of the content is made available
required_content = milestones_helpers.get_required_content(course.id, user)
# The user may not actually have to complete the entrance exam, if one is required
if user_can_skip_entrance_exam(user, course):
required_content = [content for content in required_content if not content == course.entrance_exam_id]
previous_of_active_section, next_of_active_section = None, None
last_processed_section, last_processed_chapter = None, None
found_active_section = False
for chapter in chapters:
# Only show required content, if there is required content
# chapter.hide_from_toc is read-only (bool)
# xss-lint: disable=python-deprecated-display-name
display_id = slugify(chapter.display_name_with_default_escaped)
local_hide_from_toc = False
if required_content:
if six.text_type(chapter.location) not in required_content:
local_hide_from_toc = True
# Skip the current chapter if a hide flag is tripped
if chapter.hide_from_toc or local_hide_from_toc:
continue
sections = list()
for section in chapter.get_display_items():
# skip the section if it is hidden from the user
if section.hide_from_toc:
continue
is_section_active = (chapter.url_name == active_chapter and section.url_name == active_section)
if is_section_active:
found_active_section = True
section_context = {
# xss-lint: disable=python-deprecated-display-name
'display_name': section.display_name_with_default_escaped,
'url_name': section.url_name,
'format': section.format if section.format is not None else '',
'due': section.due,
'active': is_section_active,
'graded': section.graded,
}
_add_timed_exam_info(user, course, section, section_context)
# update next and previous of active section, if applicable
if is_section_active:
if last_processed_section:
previous_of_active_section = last_processed_section.copy()
previous_of_active_section['chapter_url_name'] = last_processed_chapter.url_name
elif found_active_section and not next_of_active_section:
next_of_active_section = section_context.copy()
next_of_active_section['chapter_url_name'] = chapter.url_name
sections.append(section_context)
last_processed_section = section_context
last_processed_chapter = chapter
toc_chapters.append({
# xss-lint: disable=python-deprecated-display-name
'display_name': chapter.display_name_with_default_escaped,
'display_id': display_id,
'url_name': chapter.url_name,
'sections': sections,
'active': chapter.url_name == active_chapter
})
return {
'chapters': toc_chapters,
'previous_of_active_section': previous_of_active_section,
'next_of_active_section': next_of_active_section,
}
def _add_timed_exam_info(user, course, section, section_context):
"""
Add in rendering context if exam is a timed exam (which includes proctored)
"""
section_is_time_limited = (
getattr(section, 'is_time_limited', False) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if section_is_time_limited:
# call into edx_proctoring subsystem
# to get relevant proctoring information regarding this
# level of the courseware
#
# This will return None, if (user, course_id, content_id)
# is not applicable
timed_exam_attempt_context = None
try:
timed_exam_attempt_context = get_attempt_status_summary(
user.id,
six.text_type(course.id),
six.text_type(section.location)
)
except Exception as ex: # pylint: disable=broad-except
# safety net in case something blows up in edx_proctoring
# as this is just informational descriptions, it is better
# to log and continue (which is safe) than to have it be an
# unhandled exception
log.exception(ex)
if timed_exam_attempt_context:
# yes, user has proctoring context about
# this level of the courseware
# so add to the accordion data context
section_context.update({
'proctoring': timed_exam_attempt_context,
})
def get_module(user, request, usage_key, field_data_cache,
position=None, log_if_not_found=True, wrap_xmodule_display=True,
grade_bucket_type=None, depth=0,
static_asset_path='', course=None, will_recheck_access=False):
"""
Get an instance of the xmodule class identified by location,
setting the state based on an existing StudentModule, or creating one if none
exists.
Arguments:
- user : User for whom we're getting the module
- request : current django HTTPrequest. Note: request.user isn't used for anything--all auth
and such works based on user.
- usage_key : A UsageKey object identifying the module to load
- field_data_cache : a FieldDataCache
- position : extra information from URL for user-specified
position within module
- log_if_not_found : If this is True, we log a debug message if we cannot find the requested xmodule.
- wrap_xmodule_display : If this is True, wrap the output display in a single div to allow for the
XModule javascript to be bound correctly
- depth : number of levels of descendents to cache when loading this module.
None means cache all descendents
- static_asset_path : static asset path to use (overrides descriptor's value); needed
by get_course_info_section, because info section modules
do not have a course as the parent module, and thus do not
inherit this lms key value.
- will_recheck_access : If True, the caller commits to re-checking access on each child XBlock
before rendering the content in order to display access error messages
to the user.
Returns: xmodule instance, or None if the user does not have access to the
module. If there's an error, will try to return an instance of ErrorModule
if possible. If not possible, return None.
"""
try:
descriptor = modulestore().get_item(usage_key, depth=depth)
return get_module_for_descriptor(user, request, descriptor, field_data_cache, usage_key.course_key,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
course=course, will_recheck_access=will_recheck_access)
except ItemNotFoundError:
if log_if_not_found:
log.debug("Error in get_module: ItemNotFoundError")
return None
except: # pylint: disable=W0702
# Something has gone terribly wrong, but still not letting it turn into a 500.
log.exception("Error in get_module")
return None
def display_access_messages(user, block, view, frag, context): # pylint: disable=W0613
"""
An XBlock wrapper that replaces the content fragment with a fragment or message determined by
the has_access check.
"""
blocked_prior_sibling = RequestCache('display_access_messages_prior_sibling')
load_access = has_access(user, 'load', block, block.scope_ids.usage_id.course_key)
if load_access:
blocked_prior_sibling.delete(block.parent)
return frag
prior_sibling = blocked_prior_sibling.get_cached_response(block.parent)
if prior_sibling.is_found and prior_sibling.value.error_code == load_access.error_code:
return Fragment(u"")
else:
blocked_prior_sibling.set(block.parent, load_access)
if load_access.user_fragment:
msg_fragment = load_access.user_fragment
elif load_access.user_message:
msg_fragment = Fragment(textwrap.dedent(HTML(u"""\
<div>{}</div>
""").format(load_access.user_message)))
else:
msg_fragment = Fragment(u"")
if load_access.developer_message and has_access(user, 'staff', block, block.scope_ids.usage_id.course_key):
msg_fragment.content += textwrap.dedent(HTML(u"""\
<div>{}</div>
""").format(load_access.developer_message))
return msg_fragment
def get_xqueue_callback_url_prefix(request):
"""
Calculates default prefix based on request, but allows override via settings
This is separated from get_module_for_descriptor so that it can be called
by the LMS before submitting background tasks to run. The xqueue callbacks
should go back to the LMS, not to the worker.
"""
prefix = '{proto}://{host}'.format(
proto=request.META.get('HTTP_X_FORWARDED_PROTO', 'https' if request.is_secure() else 'http'),
host=request.get_host()
)
return settings.XQUEUE_INTERFACE.get('callback_url', prefix)
# pylint: disable=too-many-statements
def get_module_for_descriptor(user, request, descriptor, field_data_cache, course_key,
position=None, wrap_xmodule_display=True, grade_bucket_type=None,
static_asset_path='', disable_staff_debug_info=False,
course=None, will_recheck_access=False):
"""
Implements get_module, extracting out the request-specific functionality.
disable_staff_debug_info : If this is True, exclude staff debug information in the rendering of the module.
See get_module() docstring for further details.
"""
track_function = make_track_function(request)
xqueue_callback_url_prefix = get_xqueue_callback_url_prefix(request)
user_location = getattr(request, 'session', {}).get('country_code')
student_kvs = DjangoKeyValueStore(field_data_cache)
if is_masquerading_as_specific_student(user, course_key):
student_kvs = MasqueradingKeyValueStore(student_kvs, request.session)
student_data = KvsFieldData(student_kvs)
return get_module_for_descriptor_internal(
user=user,
descriptor=descriptor,
student_data=student_data,
course_id=course_key,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=xblock_request_token(request),
disable_staff_debug_info=disable_staff_debug_info,
course=course,
will_recheck_access=will_recheck_access,
)
def get_module_system_for_user(
user,
student_data, # TODO
# Arguments preceding this comment have user binding, those following don't
descriptor,
course_id,
track_function,
xqueue_callback_url_prefix,
request_token,
position=None,
wrap_xmodule_display=True,
grade_bucket_type=None,
static_asset_path='',
user_location=None,
disable_staff_debug_info=False,
course=None,
will_recheck_access=False,
):
"""
Helper function that returns a module system and student_data bound to a user and a descriptor.
The purpose of this function is to factor out everywhere a user is implicitly bound when creating a module,
to allow an existing module to be re-bound to a user. Most of the user bindings happen when creating the
closures that feed the instantiation of ModuleSystem.
The arguments fall into two categories: those that have explicit or implicit user binding, which are user
and student_data, and those don't and are just present so that ModuleSystem can be instantiated, which
are all the other arguments. Ultimately, this isn't too different than how get_module_for_descriptor_internal
was before refactoring.
Arguments:
see arguments for get_module()
request_token (str): A token unique to the request use by xblock initialization
Returns:
(LmsModuleSystem, KvsFieldData): (module system, student_data) bound to, primarily, the user and descriptor
"""
def make_xqueue_callback(dispatch='score_update'):
"""
Returns fully qualified callback URL for external queueing system
"""
relative_xqueue_callback_url = reverse(
'xqueue_callback',
kwargs=dict(
course_id=text_type(course_id),
userid=str(user.id),
mod_id=text_type(descriptor.location),
dispatch=dispatch
),
)
return xqueue_callback_url_prefix + relative_xqueue_callback_url
# Default queuename is course-specific and is derived from the course that
# contains the current module.
# TODO: Queuename should be derived from 'course_settings.json' of each course
xqueue_default_queuename = descriptor.location.org + '-' + descriptor.location.course
xqueue = {
'interface': XQUEUE_INTERFACE,
'construct_callback': make_xqueue_callback,
'default_queuename': xqueue_default_queuename.replace(' ', '_'),
'waittime': settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS
}
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor_internal() with all values except `descriptor` set.
Because it does an access check, it may return None.
"""
# TODO: fix this so that make_xqueue_callback uses the descriptor passed into
# inner_get_module, not the parent's callback. Add it as an argument....
return get_module_for_descriptor_internal(
user=user,
descriptor=descriptor,
student_data=student_data,
course_id=course_id,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=request_token,
course=course,
will_recheck_access=will_recheck_access,
)
def get_event_handler(event_type):
"""
Return an appropriate function to handle the event.
Returns None if no special processing is required.
"""
handlers = {
'grade': handle_grade_event,
}
if completion_waffle.waffle().is_enabled(completion_waffle.ENABLE_COMPLETION_TRACKING):
handlers.update({
'completion': handle_completion_event,
'progress': handle_deprecated_progress_event,
})
return handlers.get(event_type)
def publish(block, event_type, event):
"""
A function that allows XModules to publish events.
"""
handle_event = get_event_handler(event_type)
if handle_event and not is_masquerading_as_specific_student(user, course_id):
handle_event(block, event)
else:
context = contexts.course_context_from_course_id(course_id)
if block.runtime.user_id:
context['user_id'] = block.runtime.user_id
context['asides'] = {}
for aside in block.runtime.get_asides(block):
if hasattr(aside, 'get_event_context'):
aside_event_info = aside.get_event_context(event_type, event)
if aside_event_info is not None:
context['asides'][aside.scope_ids.block_type] = aside_event_info
with tracker.get_tracker().context(event_type, context):
track_function(event_type, event)
def handle_completion_event(block, event):
"""
Submit a completion object for the block.
"""
if not completion_waffle.waffle().is_enabled(completion_waffle.ENABLE_COMPLETION_TRACKING):
raise Http404
else:
BlockCompletion.objects.submit_completion(
user=user,
block_key=block.scope_ids.usage_id,
completion=event['completion'],
)
def handle_grade_event(block, event):
"""
Submit a grade for the block.
"""
if not user.is_anonymous:
grades_signals.SCORE_PUBLISHED.send(
sender=None,
block=block,
user=user,
raw_earned=event['value'],
raw_possible=event['max_value'],
only_if_higher=event.get('only_if_higher'),
score_deleted=event.get('score_deleted'),
grader_response=event.get('grader_response')
)
def handle_deprecated_progress_event(block, event):
"""
DEPRECATED: Submit a completion for the block represented by the
progress event.
This exists to support the legacy progress extension used by
edx-solutions. New XBlocks should not emit these events, but instead
emit completion events directly.
"""
if not completion_waffle.waffle().is_enabled(completion_waffle.ENABLE_COMPLETION_TRACKING):
raise Http404
else:
requested_user_id = event.get('user_id', user.id)
if requested_user_id != user.id:
log.warning(u"{} tried to submit a completion on behalf of {}".format(user, requested_user_id))
return
# If blocks explicitly declare support for the new completion API,
# we expect them to emit 'completion' events,
# and we ignore the deprecated 'progress' events
# in order to avoid duplicate work and possibly conflicting semantics.
if not getattr(block, 'has_custom_completion', False):
BlockCompletion.objects.submit_completion(
user=user,
block_key=block.scope_ids.usage_id,
completion=1.0,
)
def rebind_noauth_module_to_user(module, real_user):
"""
A function that allows a module to get re-bound to a real user if it was previously bound to an AnonymousUser.
Will only work within a module bound to an AnonymousUser, e.g. one that's instantiated by the noauth_handler.
Arguments:
module (any xblock type): the module to rebind
real_user (django.contrib.auth.models.User): the user to bind to
Returns:
nothing (but the side effect is that module is re-bound to real_user)
"""
if user.is_authenticated:
err_msg = ("rebind_noauth_module_to_user can only be called from a module bound to "
"an anonymous user")
log.error(err_msg)
raise LmsModuleRenderError(err_msg)
field_data_cache_real_user = FieldDataCache.cache_for_descriptor_descendents(
course_id,
real_user,
module.descriptor,
asides=XBlockAsidesConfig.possible_asides(),
)
student_data_real_user = KvsFieldData(DjangoKeyValueStore(field_data_cache_real_user))
(inner_system, inner_student_data) = get_module_system_for_user(
user=real_user,
student_data=student_data_real_user, # These have implicit user bindings, rest of args considered not to
descriptor=module.descriptor,
course_id=course_id,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=request_token,
course=course,
will_recheck_access=will_recheck_access,
)
module.descriptor.bind_for_student(
inner_system,
real_user.id,
[
partial(DateLookupFieldData, course_id=course_id, user=user),
partial(OverrideFieldData.wrap, real_user, course),
partial(LmsFieldData, student_data=inner_student_data),
],
)
module.descriptor.scope_ids = (
module.descriptor.scope_ids._replace(user_id=real_user.id)
)
module.scope_ids = module.descriptor.scope_ids # this is needed b/c NamedTuples are immutable
# now bind the module to the new ModuleSystem instance and vice-versa
module.runtime = inner_system
inner_system.xmodule_instance = module
# Build a list of wrapping functions that will be applied in order
# to the Fragment content coming out of the xblocks that are about to be rendered.
block_wrappers = []
if is_masquerading_as_specific_student(user, course_id):
block_wrappers.append(filter_displayed_blocks)
if settings.FEATURES.get("LICENSING", False):
block_wrappers.append(wrap_with_license)
# Wrap the output display in a single div to allow for the XModule
# javascript to be bound correctly
if wrap_xmodule_display is True:
block_wrappers.append(partial(
wrap_xblock,
'LmsRuntime',
extra_data={'course-id': text_type(course_id)},
usage_id_serializer=lambda usage_id: quote_slashes(text_type(usage_id)),
request_token=request_token,
))
# TODO (cpennington): When modules are shared between courses, the static
# prefix is going to have to be specific to the module, not the directory
# that the xml was loaded from
# Rewrite urls beginning in /static to point to course-specific content
block_wrappers.append(partial(
replace_static_urls,
getattr(descriptor, 'data_dir', None),
course_id=course_id,
static_asset_path=static_asset_path or descriptor.static_asset_path
))
# Allow URLs of the form '/course/' refer to the root of multicourse directory
# hierarchy of this course
block_wrappers.append(partial(replace_course_urls, course_id))
# this will rewrite intra-courseware links (/jump_to_id/<id>). This format
# is an improvement over the /course/... format for studio authored courses,
# because it is agnostic to course-hierarchy.
# NOTE: module_id is empty string here. The 'module_id' will get assigned in the replacement
# function, we just need to specify something to get the reverse() to work.
block_wrappers.append(partial(
replace_jump_to_id_urls,
course_id,
reverse('jump_to_id', kwargs={'course_id': text_type(course_id), 'module_id': ''}),
))
block_wrappers.append(partial(display_access_messages, user))
block_wrappers.append(partial(course_expiration_wrapper, user))
block_wrappers.append(partial(offer_banner_wrapper, user))
if settings.FEATURES.get('DISPLAY_DEBUG_INFO_TO_STAFF'):
if is_masquerading_as_specific_student(user, course_id):
# When masquerading as a specific student, we want to show the debug button
# unconditionally to enable resetting the state of the student we are masquerading as.
# We already know the user has staff access when masquerading is active.
staff_access = True
# To figure out whether the user has instructor access, we temporarily remove the
# masquerade_settings from the real_user. With the masquerading settings in place,
# the result would always be "False".
masquerade_settings = user.real_user.masquerade_settings
del user.real_user.masquerade_settings
user.real_user.masquerade_settings = masquerade_settings
else:
staff_access = has_access(user, 'staff', descriptor, course_id)
if staff_access:
block_wrappers.append(partial(add_staff_markup, user, disable_staff_debug_info))
# These modules store data using the anonymous_student_id as a key.
# To prevent loss of data, we will continue to provide old modules with
# the per-student anonymized id (as we have in the past),
# while giving selected modules a per-course anonymized id.
# As we have the time to manually test more modules, we can add to the list
# of modules that get the per-course anonymized id.
is_pure_xblock = isinstance(descriptor, XBlock) and not isinstance(descriptor, XModuleDescriptor)
module_class = getattr(descriptor, 'module_class', None)
is_lti_module = not is_pure_xblock and issubclass(module_class, LTIModule)
if (is_pure_xblock and not getattr(descriptor, 'requires_per_student_anonymous_id', False)) or is_lti_module:
anonymous_student_id = anonymous_id_for_user(user, course_id)
else:
anonymous_student_id = anonymous_id_for_user(user, None)
field_data = DateLookupFieldData(descriptor._field_data, course_id, user) # pylint: disable=protected-access
field_data = LmsFieldData(field_data, student_data)
user_is_staff = bool(has_access(user, u'staff', descriptor.location, course_id))
system = LmsModuleSystem(
track_function=track_function,
render_template=render_to_string,
static_url=settings.STATIC_URL,
xqueue=xqueue,
# TODO (cpennington): Figure out how to share info between systems
filestore=descriptor.runtime.resources_fs,
get_module=inner_get_module,
user=user,
debug=settings.DEBUG,
hostname=settings.SITE_NAME,
# TODO (cpennington): This should be removed when all html from
# a module is coming through get_html and is therefore covered
# by the replace_static_urls code below
replace_urls=partial(
static_replace.replace_static_urls,
data_directory=getattr(descriptor, 'data_dir', None),
course_id=course_id,
static_asset_path=static_asset_path or descriptor.static_asset_path,
),
replace_course_urls=partial(
static_replace.replace_course_urls,
course_key=course_id
),
replace_jump_to_id_urls=partial(
static_replace.replace_jump_to_id_urls,
course_id=course_id,
jump_to_id_base_url=reverse('jump_to_id', kwargs={'course_id': text_type(course_id), 'module_id': ''})
),
node_path=settings.NODE_PATH,
publish=publish,
anonymous_student_id=anonymous_student_id,
course_id=course_id,
cache=cache,
can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)),
get_python_lib_zip=(lambda: get_python_lib_zip(contentstore, course_id)),
# TODO: When we merge the descriptor and module systems, we can stop reaching into the mixologist (cpennington)
mixins=descriptor.runtime.mixologist._mixins, # pylint: disable=protected-access
wrappers=block_wrappers,
get_real_user=user_by_anonymous_id,
services={
'fs': FSService(),
'field-data': field_data,
'user': DjangoXBlockUserService(user, user_is_staff=user_is_staff),
'verification': XBlockVerificationService(),
'proctoring': ProctoringService(),
'milestones': milestones_helpers.get_service(),
'credit': CreditService(),
'bookmarks': BookmarksService(user=user),
'gating': GatingService(),
'grade_utils': GradesUtilService(course_id=course_id),
'user_state': UserStateService(),
},
get_user_role=lambda: get_user_role(user, course_id),
descriptor_runtime=descriptor._runtime, # pylint: disable=protected-access
rebind_noauth_module_to_user=rebind_noauth_module_to_user,
user_location=user_location,
request_token=request_token,
)
# pass position specified in URL to module through ModuleSystem
if position is not None:
try:
position = int(position)
except (ValueError, TypeError):
log.exception(u'Non-integer %r passed as position.', position)
position = None
system.set('position', position)
system.set(u'user_is_staff', user_is_staff)
system.set(u'user_is_admin', bool(has_access(user, u'staff', 'global')))
system.set(u'user_is_beta_tester', CourseBetaTesterRole(course_id).has_user(user))
system.set(u'days_early_for_beta', descriptor.days_early_for_beta)
# make an ErrorDescriptor -- assuming that the descriptor's system is ok
if has_access(user, u'staff', descriptor.location, course_id):
system.error_descriptor_class = ErrorDescriptor
else:
system.error_descriptor_class = NonStaffErrorDescriptor
return system, field_data
# TODO: Find all the places that this method is called and figure out how to
# get a loaded course passed into it
def get_module_for_descriptor_internal(user, descriptor, student_data, course_id,
track_function, xqueue_callback_url_prefix, request_token,
position=None, wrap_xmodule_display=True, grade_bucket_type=None,
static_asset_path='', user_location=None, disable_staff_debug_info=False,
course=None, will_recheck_access=False):
"""
Actually implement get_module, without requiring a request.
See get_module() docstring for further details.
Arguments:
request_token (str): A unique token for this request, used to isolate xblock rendering
"""
(system, student_data) = get_module_system_for_user(
user=user,
student_data=student_data, # These have implicit user bindings, the rest of args are considered not to
descriptor=descriptor,
course_id=course_id,
track_function=track_function,
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
position=position,
wrap_xmodule_display=wrap_xmodule_display,
grade_bucket_type=grade_bucket_type,
static_asset_path=static_asset_path,
user_location=user_location,
request_token=request_token,
disable_staff_debug_info=disable_staff_debug_info,
course=course,
will_recheck_access=will_recheck_access,
)
descriptor.bind_for_student(
system,
user.id,
[
partial(DateLookupFieldData, course_id=course_id, user=user),
partial(OverrideFieldData.wrap, user, course),
partial(LmsFieldData, student_data=student_data),
],
)
descriptor.scope_ids = descriptor.scope_ids._replace(user_id=user.id)
# Do not check access when it's a noauth request.
# Not that the access check needs to happen after the descriptor is bound
# for the student, since there may be field override data for the student
# that affects xblock visibility.
user_needs_access_check = getattr(user, 'known', True) and not isinstance(user, SystemUser)
if user_needs_access_check:
access = has_access(user, 'load', descriptor, course_id)
# A descriptor should only be returned if either the user has access, or the user doesn't have access, but
# the failed access has a message for the user and the caller of this function specifies it will check access
# again. This allows blocks to show specific error message or upsells when access is denied.
caller_will_handle_access_error = (
not access
and will_recheck_access
and (access.user_message or access.user_fragment)
)
if access or caller_will_handle_access_error:
return descriptor
return None
return descriptor
def load_single_xblock(request, user_id, course_id, usage_key_string, course=None, will_recheck_access=False):
"""
Load a single XBlock identified by usage_key_string.
"""
usage_key = UsageKey.from_string(usage_key_string)
course_key = CourseKey.from_string(course_id)
usage_key = usage_key.map_into_course(course_key)
user = User.objects.get(id=user_id)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key,
user,
modulestore().get_item(usage_key),
depth=0,
)
instance = get_module(
user,
request,
usage_key,
field_data_cache,
grade_bucket_type='xqueue',
course=course,
will_recheck_access=will_recheck_access
)
if instance is None:
msg = u"No module {0} for user {1}--access denied?".format(usage_key_string, user)
log.debug(msg)
raise Http404
return instance
@csrf_exempt
def xqueue_callback(request, course_id, userid, mod_id, dispatch):
'''
Entry point for graded results from the queueing system.
'''
data = request.POST.copy()
# Test xqueue package, which we expect to be:
# xpackage = {'xqueue_header': json.dumps({'lms_key':'secretkey',...}),
# 'xqueue_body' : 'Message from grader'}
for key in ['xqueue_header', 'xqueue_body']:
if key not in data:
raise Http404
header = json.loads(data['xqueue_header'])
if not isinstance(header, dict) or 'lms_key' not in header:
raise Http404
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=0)
instance = load_single_xblock(request, userid, course_id, mod_id, course=course)
# Transfer 'queuekey' from xqueue response header to the data.
# This is required to use the interface defined by 'handle_ajax'
data.update({'queuekey': header['lms_key']})
# We go through the "AJAX" path
# So far, the only dispatch from xqueue will be 'score_update'
try:
# Can ignore the return value--not used for xqueue_callback
instance.handle_ajax(dispatch, data)
# Save any state that has changed to the underlying KeyValueStore
instance.save()
except:
log.exception("error processing ajax call")
raise
return HttpResponse("")
@csrf_exempt
@xframe_options_exempt
def handle_xblock_callback_noauth(request, course_id, usage_id, handler, suffix=None):
"""
Entry point for unauthenticated XBlock handlers.
"""
request.user.known = False
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=0)
return _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=course)
@csrf_exempt
@xframe_options_exempt
def handle_xblock_callback(request, course_id, usage_id, handler, suffix=None):
"""
Generic view for extensions. This is where AJAX calls go.
Arguments:
request (Request): Django request.
course_id (str): Course containing the block
usage_id (str)
handler (str)
suffix (str)
Raises:
HttpResponseForbidden: If the request method is not `GET` and user is not authenticated.
Http404: If the course is not found in the modulestore.
"""
# In this case, we are using Session based authentication, so we need to check CSRF token.
if request.user.is_authenticated:
error = CsrfViewMiddleware().process_view(request, None, (), {})
if error:
return error
# We are reusing DRF logic to provide support for JWT and Oauth2. We abandoned the idea of using DRF view here
# to avoid introducing backwards-incompatible changes.
# You can see https://github.com/edx/XBlock/pull/383 for more details.
else:
authentication_classes = (JwtAuthentication, BearerAuthenticationAllowInactiveUser)
authenticators = [auth() for auth in authentication_classes]
for authenticator in authenticators:
try:
user_auth_tuple = authenticator.authenticate(request)
except APIException:
log.exception(
u"XBlock handler %r failed to authenticate with %s", handler, authenticator.__class__.__name__
)
else:
if user_auth_tuple is not None:
request.user, _ = user_auth_tuple
break
# NOTE (CCB): Allow anonymous GET calls (e.g. for transcripts). Modifying this view is simpler than updating
# the XBlocks to use `handle_xblock_callback_noauth`, which is practically identical to this view.
if request.method != 'GET' and not (request.user and request.user.is_authenticated):
return HttpResponseForbidden('Unauthenticated')
request.user.known = request.user.is_authenticated
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise Http404(u'{} is not a valid course key'.format(course_id))
with modulestore().bulk_operations(course_key):
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
raise Http404(u'{} does not exist in the modulestore'.format(course_id))
return _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=course)
def get_module_by_usage_id(request, course_id, usage_id, disable_staff_debug_info=False, course=None):
"""
Gets a module instance based on its `usage_id` in a course, for a given request/user
Returns (instance, tracking_context)
"""
user = request.user
try:
course_id = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(unquote_slashes(usage_id)).map_into_course(course_id)
except InvalidKeyError:
raise Http404("Invalid location")
try:
descriptor = modulestore().get_item(usage_key)
descriptor_orig_usage_key, descriptor_orig_version = modulestore().get_block_original_usage(usage_key)
except ItemNotFoundError:
log.warn(
u"Invalid location for course id %s: %s",
usage_key.course_key,
usage_key
)
raise Http404
tracking_context = {
'module': {
# xss-lint: disable=python-deprecated-display-name
'display_name': descriptor.display_name_with_default_escaped,
'usage_key': six.text_type(descriptor.location),
}
}
# For blocks that are inherited from a content library, we add some additional metadata:
if descriptor_orig_usage_key is not None:
tracking_context['module']['original_usage_key'] = six.text_type(descriptor_orig_usage_key)
tracking_context['module']['original_usage_version'] = six.text_type(descriptor_orig_version)
unused_masquerade, user = setup_masquerade(request, course_id, has_access(user, 'staff', descriptor, course_id))
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id,
user,
descriptor,
read_only=CrawlersConfig.is_crawler(request),
)
instance = get_module_for_descriptor(
user,
request,
descriptor,
field_data_cache,
usage_key.course_key,
disable_staff_debug_info=disable_staff_debug_info,
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
log.debug(u"No module %s for user %s -- access denied?", usage_key, user)
raise Http404
return (instance, tracking_context)
def _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=None):
"""
Invoke an XBlock handler, either authenticated or not.
Arguments:
request (HttpRequest): the current request
course_id (str): A string of the form org/course/run
usage_id (str): A string of the form i4x://org/course/category/name@revision
handler (str): The name of the handler to invoke
suffix (str): The suffix to pass to the handler when invoked
"""
# Check submitted files
files = request.FILES or {}
error_msg = _check_files_limits(files)
if error_msg:
return JsonResponse({'success': error_msg}, status=413)
# Make a CourseKey from the course_id, raising a 404 upon parse error.
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise Http404
set_custom_metrics_for_course_key(course_key)
with modulestore().bulk_operations(course_key):
try:
usage_key = UsageKey.from_string(unquote_slashes(usage_id))
except InvalidKeyError:
raise Http404
if is_xblock_aside(usage_key):
# Get the usage key for the block being wrapped by the aside (not the aside itself)
block_usage_key = usage_key.usage_key
else:
block_usage_key = usage_key
instance, tracking_context = get_module_by_usage_id(
request, course_id, six.text_type(block_usage_key), course=course
)
# Name the transaction so that we can view XBlock handlers separately in
# New Relic. The suffix is necessary for XModule handlers because the
# "handler" in those cases is always just "xmodule_handler".
nr_tx_name = "{}.{}".format(instance.__class__.__name__, handler)
nr_tx_name += "/{}".format(suffix) if (suffix and handler == "xmodule_handler") else ""
set_monitoring_transaction_name(nr_tx_name, group="Python/XBlock/Handler")
tracking_context_name = 'module_callback_handler'
req = django_to_webob_request(request)
try:
with tracker.get_tracker().context(tracking_context_name, tracking_context):
if is_xblock_aside(usage_key):
# In this case, 'instance' is the XBlock being wrapped by the aside, so
# the actual aside instance needs to be retrieved in order to invoke its
# handler method.
handler_instance = get_aside_from_xblock(instance, usage_key.aside_type)
else:
handler_instance = instance
resp = handler_instance.handle(handler, req, suffix)
if suffix == 'problem_check' \
and course \
and getattr(course, 'entrance_exam_enabled', False) \
and getattr(instance, 'in_entrance_exam', False):
ee_data = {'entrance_exam_passed': user_has_passed_entrance_exam(request.user, course)}
resp = append_data_to_webob_response(resp, ee_data)
except NoSuchHandlerError:
log.exception(u"XBlock %s attempted to access missing handler %r", instance, handler)
raise Http404
# If we can't find the module, respond with a 404
except NotFoundError:
log.exception("Module indicating to user that request doesn't exist")
raise Http404
# For XModule-specific errors, we log the error and respond with an error message
except ProcessingError as err:
log.warning("Module encountered an error while processing AJAX call",
exc_info=True)
return JsonResponse({'success': err.args[0]}, status=200)
# If any other error occurred, re-raise it to trigger a 500 response
except Exception:
log.exception("error executing xblock handler")
raise
return webob_to_django_response(resp)
@api_view(['GET'])
@view_auth_classes(is_authenticated=True)
def xblock_view(request, course_id, usage_id, view_name):
"""
Returns the rendered view of a given XBlock, with related resources
Returns a json object containing two keys:
html: The rendered html of the view
resources: A list of tuples where the first element is the resource hash, and
the second is the resource description
"""
if not settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False):
log.warn("Attempt to use deactivated XBlock view endpoint -"
" see FEATURES['ENABLE_XBLOCK_VIEW_ENDPOINT']")
raise Http404
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise Http404("Invalid location")
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key)
instance, _ = get_module_by_usage_id(request, course_id, usage_id, course=course)
try:
fragment = instance.render(view_name, context=request.GET)
except NoSuchViewError:
log.exception(u"Attempt to render missing view on %s: %s", instance, view_name)
raise Http404
hashed_resources = OrderedDict()
for resource in fragment.resources:
hashed_resources[hash_resource(resource)] = resource
return JsonResponse({
'html': fragment.content,
'resources': list(hashed_resources.items()),
'csrf_token': six.text_type(csrf(request)['csrf_token']),
})
def _check_files_limits(files):
"""
Check if the files in a request are under the limits defined by
`settings.MAX_FILEUPLOADS_PER_INPUT` and
`settings.STUDENT_FILEUPLOAD_MAX_SIZE`.
Returns None if files are correct or an error messages otherwise.
"""
for fileinput_id in files.keys():
inputfiles = files.getlist(fileinput_id)
# Check number of files submitted
if len(inputfiles) > settings.MAX_FILEUPLOADS_PER_INPUT:
msg = u'Submission aborted! Maximum %d files may be submitted at once' % \
settings.MAX_FILEUPLOADS_PER_INPUT
return msg
# Check file sizes
for inputfile in inputfiles:
if inputfile.size > settings.STUDENT_FILEUPLOAD_MAX_SIZE: # Bytes
msg = u'Submission aborted! Your file "%s" is too large (max size: %d MB)' % \
(inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2))
return msg
return None
def append_data_to_webob_response(response, data):
"""
Appends data to a JSON webob response.
Arguments:
response (webob response object): the webob response object that needs to be modified
data (dict): dictionary containing data that needs to be appended to response body
Returns:
(webob response object): webob response with updated body.
"""
if getattr(response, 'content_type', None) == 'application/json':
json_input = response.body.decode('utf-8') if isinstance(response.body, bytes) else response.body
response_data = json.loads(json_input)
response_data.update(data)
response.body = json.dumps(response_data).encode('utf-8')
return response
|
agpl-3.0
| -4,936,744,408,799,270,000
| 41.608397
| 119
| 0.650572
| false
| 4.181049
| false
| false
| false
|
kieranjol/IFIscripts
|
seq.py
|
1
|
1751
|
#!/usr/bin/env python
import subprocess
import sys
import os
import argparse
from glob import glob
parser = argparse.ArgumentParser(description='Generate v210/mov file from image sequence.'
'Written by Kieran O\'Leary.')
parser.add_argument('input', help='file path of parent directory')
parser.add_argument('-p', action='store_true', help='Use the Apple ProRes 4:2:2 codec instead of v210')
parser.add_argument('-f', action='store_true', help='choose an alternative framerate')
args = parser.parse_args()
source_directory = args.input
if not os.path.isdir(args.input):
print('Please provide a directory as input, not a file')
sys.exit()
os.chdir(source_directory)
images = (
glob('*.tif') +
glob('*.tiff') +
glob('*.dpx')
)
extension = os.path.splitext(images[0])[1]
numberless_filename = images[0].split("_")[0:-1]
ffmpeg_friendly_name = ''
counter = 0
while counter <len(numberless_filename) :
ffmpeg_friendly_name += numberless_filename[counter] + '_'
counter += 1
dirname = os.path.dirname(source_directory)
output = dirname + '/%s.mov' % os.path.split(source_directory)[-1]
ffmpeg_friendly_name += '%06d' + extension
codec = 'v210'
if args.p:
codec = 'prores'
#the sript will choose 24fps as default
cmd = ['ffmpeg','-f','image2','-framerate','24', '-i', ffmpeg_friendly_name,'-c:v',codec,output]
#adding the choice of an alternative fps here through argsparse
if args.f:
fps = raw_input('what alternative framerate do you require? 16,18,21,25?')
cmd = ['ffmpeg','-f','image2','-framerate',fps, '-i', ffmpeg_friendly_name,'-c:v',codec,output]
print cmd
subprocess.call(cmd)
print 'Output file is located in %s' % output
|
mit
| 9,195,344,782,524,252,000
| 34.02
| 103
| 0.667047
| false
| 3.367308
| false
| false
| false
|
apophys/freeipa
|
pylint_plugins.py
|
1
|
10988
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import copy
import os.path
import sys
import textwrap
from astroid import MANAGER, register_module_extender
from astroid import scoped_nodes
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker
from astroid.builder import AstroidBuilder
def register(linter):
linter.register_checker(IPAChecker(linter))
def _warning_already_exists(cls, member):
print(
"WARNING: member '{member}' in '{cls}' already exists".format(
cls="{}.{}".format(cls.root().name, cls.name), member=member),
file=sys.stderr
)
def fake_class(name_or_class_obj, members=()):
if isinstance(name_or_class_obj, scoped_nodes.Class):
cl = name_or_class_obj
else:
cl = scoped_nodes.Class(name_or_class_obj, None)
for m in members:
if isinstance(m, str):
if m in cl.locals:
_warning_already_exists(cl, m)
else:
cl.locals[m] = [scoped_nodes.Class(m, None)]
elif isinstance(m, dict):
for key, val in m.items():
assert isinstance(key, str), "key must be string"
if key in cl.locals:
_warning_already_exists(cl, key)
fake_class(cl.locals[key], val)
else:
cl.locals[key] = [fake_class(key, val)]
else:
# here can be used any astroid type
if m.name in cl.locals:
_warning_already_exists(cl, m.name)
else:
cl.locals[m.name] = [copy.copy(m)]
return cl
fake_backend = {'Backend': [
{'wsgi_dispatch': ['mount']},
]}
NAMESPACE_ATTRS = ['Command', 'Object', 'Method', fake_backend, 'Updater',
'Advice']
fake_api_env = {'env': [
'host',
'realm',
'session_auth_duration',
'session_duration_type',
'kinit_lifetime',
]}
# this is due ipaserver.rpcserver.KerberosSession where api is undefined
fake_api = {'api': [fake_api_env] + NAMESPACE_ATTRS}
# 'class': ['generated', 'properties']
ipa_class_members = {
# Python standard library & 3rd party classes
'socket._socketobject': ['sendall'],
# IPA classes
'ipalib.base.NameSpace': [
'add',
'mod',
'del',
'show',
'find'
],
'ipalib.cli.Collector': ['__options'],
'ipalib.config.Env': [
{'__d': ['get']},
{'__done': ['add']},
'xmlrpc_uri',
'validate_api',
'startup_traceback',
'verbose',
'debug',
'server',
{'domain': dir(str)},
],
'ipalib.errors.ACIError': [
'info',
],
'ipalib.errors.ConversionError': [
'error',
],
'ipalib.errors.DatabaseError': [
'desc',
],
'ipalib.errors.NetworkError': [
'error',
],
'ipalib.errors.NotFound': [
'reason',
],
'ipalib.errors.PublicError': [
'msg',
'strerror',
'kw',
],
'ipalib.errors.SingleMatchExpected': [
'found',
],
'ipalib.errors.SkipPluginModule': [
'reason',
],
'ipalib.errors.ValidationError': [
'error',
],
'ipalib.errors.SchemaUpToDate': [
'fingerprint',
'ttl',
],
'ipalib.messages.PublicMessage': [
'msg',
'strerror',
'type',
'kw',
],
'ipalib.parameters.Param': [
'cli_name',
'cli_short_name',
'label',
'default',
'doc',
'required',
'multivalue',
'primary_key',
'normalizer',
'default_from',
'autofill',
'query',
'attribute',
'include',
'exclude',
'flags',
'hint',
'alwaysask',
'sortorder',
'option_group',
'no_convert',
'deprecated',
],
'ipalib.parameters.Bool': [
'truths',
'falsehoods'],
'ipalib.parameters.Data': [
'minlength',
'maxlength',
'length',
'pattern',
'pattern_errmsg',
],
'ipalib.parameters.Str': ['noextrawhitespace'],
'ipalib.parameters.Password': ['confirm'],
'ipalib.parameters.File': ['stdin_if_missing'],
'ipalib.parameters.Enum': ['values'],
'ipalib.parameters.Number': [
'minvalue',
'maxvalue',
],
'ipalib.parameters.Decimal': [
'precision',
'exponential',
'numberclass',
],
'ipalib.parameters.DNSNameParam': [
'only_absolute',
'only_relative',
],
'ipalib.parameters.Principal': [
'require_service',
],
'ipalib.plugable.API': [
fake_api_env,
] + NAMESPACE_ATTRS,
'ipalib.plugable.Plugin': [
'Object',
'Method',
'Updater',
'Advice',
],
'ipalib.util.ForwarderValidationError': [
'msg',
],
'ipaserver.plugins.dns.DNSRecord': [
'validatedns',
'normalizedns',
],
'ipaserver.rpcserver.KerberosSession': [
fake_api,
],
'ipatests.test_integration.base.IntegrationTest': [
'domain',
{'master': [
{'config': [
{'dirman_password': dir(str)},
{'admin_password': dir(str)},
{'admin_name': dir(str)},
{'dns_forwarder': dir(str)},
{'test_dir': dir(str)},
{'ad_admin_name': dir(str)},
{'ad_admin_password': dir(str)},
{'domain_level': dir(str)},
]},
{'domain': [
{'realm': dir(str)},
{'name': dir(str)},
]},
'hostname',
'ip',
'collect_log',
{'run_command': [
{'stdout_text': dir(str)},
'stderr_text',
'returncode',
]},
{'transport': ['put_file', 'file_exists']},
'put_file_contents',
'get_file_contents',
'ldap_connect',
]},
'replicas',
'clients',
'ad_domains',
]
}
def fix_ipa_classes(cls):
class_name_with_module = "{}.{}".format(cls.root().name, cls.name)
if class_name_with_module in ipa_class_members:
fake_class(cls, ipa_class_members[class_name_with_module])
MANAGER.register_transform(scoped_nodes.Class, fix_ipa_classes)
def pytest_config_transform():
"""pylint.config attribute
"""
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from _pytest.config import get_config
config = get_config()
'''))
register_module_extender(MANAGER, 'pytest', pytest_config_transform)
def ipaplatform_constants_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.constants import constants
__all__ = ('constants',)
'''))
def ipaplatform_paths_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.paths import paths
__all__ = ('paths',)
'''))
def ipaplatform_services_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.services import knownservices
from ipaplatform.base.services import timedate_services
from ipaplatform.base.services import service
from ipaplatform.base.services import wellknownservices
from ipaplatform.base.services import wellknownports
__all__ = ('knownservices', 'timedate_services', 'service',
'wellknownservices', 'wellknownports')
'''))
def ipaplatform_tasks_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
from ipaplatform.base.tasks import tasks
__all__ = ('tasks',)
'''))
register_module_extender(MANAGER, 'ipaplatform.constants',
ipaplatform_constants_transform)
register_module_extender(MANAGER, 'ipaplatform.paths',
ipaplatform_paths_transform)
register_module_extender(MANAGER, 'ipaplatform.services',
ipaplatform_services_transform)
register_module_extender(MANAGER, 'ipaplatform.tasks',
ipaplatform_tasks_transform)
class IPAChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'ipa'
msgs = {
'W9901': (
'Forbidden import %s (can\'t import from %s in %s)',
'ipa-forbidden-import',
'Used when an forbidden import is detected.',
),
}
options = (
(
'forbidden-imports',
{
'default': '',
'type': 'csv',
'metavar': '<path>[:<module>[:<module>...]][,<path>...]',
'help': 'Modules which are forbidden to be imported in the '
'given paths',
},
),
)
priority = -1
def open(self):
self._dir = os.path.abspath(os.path.dirname(__file__))
self._forbidden_imports = {self._dir: []}
for forbidden_import in self.config.forbidden_imports:
forbidden_import = forbidden_import.split(':')
path = os.path.join(self._dir, forbidden_import[0])
path = os.path.abspath(path)
modules = forbidden_import[1:]
self._forbidden_imports[path] = modules
self._forbidden_imports_stack = []
def _get_forbidden_import_rule(self, node):
path = node.path
if path:
path = os.path.abspath(path)
while path.startswith(self._dir):
if path in self._forbidden_imports:
return path
path = os.path.dirname(path)
return self._dir
def visit_module(self, node):
self._forbidden_imports_stack.append(
self._get_forbidden_import_rule(node))
def leave_module(self, node):
self._forbidden_imports_stack.pop()
def _check_forbidden_imports(self, node, names):
path = self._forbidden_imports_stack[-1]
relpath = os.path.relpath(path, self._dir)
modules = self._forbidden_imports[path]
for module in modules:
module_prefix = module + '.'
for name in names:
if name == module or name.startswith(module_prefix):
self.add_message('ipa-forbidden-import',
args=(name, module, relpath), node=node)
@check_messages('ipa-forbidden-import')
def visit_import(self, node):
names = [n[0] for n in node.names]
self._check_forbidden_imports(node, names)
@check_messages('ipa-forbidden-import')
def visit_importfrom(self, node):
names = ['{}.{}'.format(node.modname, n[0]) for n in node.names]
self._check_forbidden_imports(node, names)
|
gpl-3.0
| 3,321,435,523,477,805,600
| 27.246787
| 77
| 0.545777
| false
| 3.915895
| true
| false
| false
|
solvo/organilab
|
src/authentication/migrations/0001_initial.py
|
1
|
1044
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 07:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedbackEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('explanation', models.TextField(blank=True, verbose_name='Explanation')),
('related_file', models.FileField(blank=True, upload_to='media/feedback_entries/', verbose_name='Related file')),
],
options={
'verbose_name_plural': 'Feedback entries',
'permissions': (('view_feedbackentry', 'Can see available feed back entry'),),
'verbose_name': 'Feedback entry',
},
),
]
|
gpl-3.0
| 7,104,996,995,801,698,000
| 33.8
| 129
| 0.577586
| false
| 4.423729
| false
| false
| false
|
tensorflow/model-optimization
|
tensorflow_model_optimization/python/core/sparsity/keras/test_utils.py
|
1
|
5637
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility to generate models for testing."""
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_wrapper
keras = tf.keras
l = keras.layers
def _build_mnist_layer_list():
return [
l.Conv2D(
32, 5, padding='same', activation='relu', input_shape=(28, 28, 1)),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.BatchNormalization(),
l.Conv2D(64, 5, padding='same', activation='relu'),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Flatten(),
l.Dense(1024, activation='relu'),
l.Dropout(0.4),
l.Dense(10, activation='softmax')
]
def _build_mnist_sequential_model():
return keras.Sequential(_build_mnist_layer_list())
def _build_mnist_functional_model():
# pylint: disable=missing-docstring
inp = keras.Input(shape=(28, 28, 1))
x = l.Conv2D(32, 5, padding='same', activation='relu')(inp)
x = l.MaxPooling2D((2, 2), (2, 2), padding='same')(x)
x = l.BatchNormalization()(x)
x = l.Conv2D(64, 5, padding='same', activation='relu')(x)
x = l.MaxPooling2D((2, 2), (2, 2), padding='same')(x)
x = l.Flatten()(x)
x = l.Dense(1024, activation='relu')(x)
x = l.Dropout(0.4)(x)
out = l.Dense(10, activation='softmax')(x)
return keras.models.Model([inp], [out])
def _build_mnist_layerwise_pruned_model(pruning_params):
if pruning_params is None:
raise ValueError('pruning_params should be provided.')
return keras.Sequential([
prune.prune_low_magnitude(
l.Conv2D(32, 5, padding='same', activation='relu'),
input_shape=(28, 28, 1),
**pruning_params),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.BatchNormalization(),
prune.prune_low_magnitude(
l.Conv2D(64, 5, padding='same', activation='relu'), **pruning_params),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Flatten(),
prune.prune_low_magnitude(
l.Dense(1024, activation='relu'), **pruning_params),
l.Dropout(0.4),
prune.prune_low_magnitude(
l.Dense(10, activation='softmax'), **pruning_params)
])
def build_mnist_model(model_type, pruning_params=None):
return {
'sequential': _build_mnist_sequential_model(),
'functional': _build_mnist_functional_model(),
'layer_list': _build_mnist_layer_list(),
'layer_wise': _build_mnist_layerwise_pruned_model(pruning_params),
}[model_type]
def model_type_keys():
return ['sequential', 'functional', 'layer_list', 'layer_wise']
def list_to_named_parameters(param_name, options):
"""Convert list of options for parameter to input to @parameterized.named_parameters.
Arguments:
param_name: name of parameter
options: list of options for parameter
Returns:
named_params: input to @parameterized.named_parameters
Needed to stack multiple parameters (e.g. with keras run_all_modes).
"""
def snakecase_to_camelcase(value):
# Non-comprensive check for camelcase already.
if value[0].isupper() and '_' not in value:
return value
camelcase = ''
for s in value.split('_'):
camelcase += s.capitalize()
return camelcase
def name(s):
if isinstance(s, str):
return s
return s.__name__
named_params = []
for key in options:
named_params.append({
'testcase_name': snakecase_to_camelcase(name(key)),
param_name: key
})
return named_params
def _save_restore_keras_model(model):
_, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
with prune.prune_scope():
loaded_model = keras.models.load_model(keras_file)
return loaded_model
def _save_restore_tf_model(model):
tmpdir = tempfile.mkdtemp()
tf.keras.models.save_model(model, tmpdir, save_format='tf')
with prune.prune_scope():
loaded_model = tf.keras.models.load_model(tmpdir)
return loaded_model
def save_restore_fns():
return [_save_restore_keras_model, _save_restore_tf_model]
# Assertion/Sparsity Verification functions.
def _get_sparsity(weights):
return 1.0 - np.count_nonzero(weights) / float(weights.size)
def assert_model_sparsity(test_case, sparsity, model, rtol=1e-6, atol=1e-6):
for layer in model.layers:
if isinstance(layer, pruning_wrapper.PruneLowMagnitude):
for weight in layer.layer.get_prunable_weights():
test_case.assertAllClose(
sparsity, _get_sparsity(tf.keras.backend.get_value(weight)), rtol=rtol, atol=atol)
# Check if model does not have target sparsity.
def is_model_sparsity_not(sparsity, model):
for layer in model.layers:
if isinstance(layer, pruning_wrapper.PruneLowMagnitude):
for weight in layer.layer.get_prunable_weights():
if sparsity != _get_sparsity(tf.keras.backend.get_value(weight)):
return True
return False
|
apache-2.0
| 1,986,405,175,058,615,300
| 29.972527
| 94
| 0.664006
| false
| 3.449816
| false
| false
| false
|
finger563/editor
|
tests/flatProxyModel.py
|
1
|
3113
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtCore, QtGui
class FlatProxyModel(QtGui.QAbstractProxyModel):
def sourceDataChanged(self, topLeft, bottomRight):
self.dataChanged.emit(self.mapFromSource(topLeft),
self.mapFromSource(bottomRight))
def buildMap(self, model, parent=QtCore.QModelIndex(), row=0):
if row == 0:
self.m_rowMap = {}
self.m_indexMap = {}
rows = model.rowCount(parent)
for r in range(rows):
index = model.index(r, 0, parent)
print('row', row, 'item', model.data(index))
self.m_rowMap[index] = row
self.m_indexMap[row] = index
row = row + 1
if model.hasChildren(index):
row = self.buildMap(model, index, row)
return row
def setSourceModel(self, model):
QtGui.QAbstractProxyModel.setSourceModel(self, model)
self.buildMap(model)
model.dataChanged.connect(self.sourceDataChanged)
def mapFromSource(self, index):
if index not in self.m_rowMap:
return QtCore.QModelIndex()
# print('mapping to row', self.m_rowMap[index], flush = True)
return self.createIndex(self.m_rowMap[index], index.column())
def mapToSource(self, index):
if not index.isValid() or index.row() not in self.m_indexMap:
return QtCore.QModelIndex()
# print('mapping from row', index.row(), flush = True)
return self.m_indexMap[index.row()]
def columnCount(self, parent):
return QtGui.QAbstractProxyModel.sourceModel(self)\
.columnCount(self.mapToSource(parent))
def rowCount(self, parent):
# print('rows:', len(self.m_rowMap), flush=True)
return len(self.m_rowMap) if not parent.isValid() else 0
def index(self, row, column, parent):
# print('index for:', row, column, flush=True)
if parent.isValid():
return QtCore.QModelIndex()
return self.createIndex(row, column)
def parent(self, index):
return QtCore.QModelIndex()
def __init__(self, parent=None):
super(FlatProxyModel, self).__init__(parent)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
model = QtGui.QStandardItemModel()
names = ['Foo', 'Bar', 'Baz']
for first in names:
row = QtGui.QStandardItem(first)
for second in names:
row.appendRow(QtGui.QStandardItem(first+second))
model.appendRow(row)
proxy = FlatProxyModel()
proxy.setSourceModel(model)
nestedProxy = FlatProxyModel()
nestedProxy.setSourceModel(proxy)
w = QtGui.QWidget()
layout = QtGui.QHBoxLayout(w)
view = QtGui.QTreeView()
view.setModel(model)
view.expandAll()
view.header().hide()
layout.addWidget(view)
view = QtGui.QListView()
view.setModel(proxy)
layout.addWidget(view)
view = QtGui.QListView()
view.setModel(nestedProxy)
layout.addWidget(view)
w.show()
sys.exit(app.exec_())
|
mit
| 131,644,875,371,513,710
| 30.765306
| 78
| 0.610022
| false
| 3.737095
| false
| false
| false
|
Jumpscale/play8
|
sockettest.py
|
1
|
1790
|
# import nnpy
# import time
# s=nnpy.Socket(nnpy.AF_SP,nnpy.REP)
#
#
# s.bind('tcp://127.0.0.1:5555')
#
# # s.setsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET,value=1024*1024)
# # s.getsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET)
#
# counter=0
# while True:
# try:
# res=s.recv(flags=nnpy.DONTWAIT)
# counter+=1
# except Exception as e:
# if not str(e)=='Resource temporarily unavailable':
# raise(e)
# from IPython import embed
# print ("DEBUG NOW 9")
# embed()
# raise RuntimeError("stop debug here")
# time.sleep(1)
# print(counter)
# continue
#
# s.send("ok")
# # print(res)
from JumpScale import j
def MyMethod(hello):
import time
counter=0
while True:
time.sleep(1)
counter+=1
print("%s:%s"%(hello,counter))
import asyncio
import logging
import aionn
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
counter=0
async def reader(socket,counter):
while True:
# print('receiving...')
name = await socket.recv()
# print('received:', value)
p = j.core.processmanager.startProcess(method=MyMethod,args={"hello":name.decode()},name=name.decode())
counter+=1
print(counter)
async def logger():
counter=0
while True:
for key,p in j.core.processmanager.processes.items():
p.sync()
print(p.new_stdout)
counter+=1
await asyncio.sleep(1)
print("logger:%s"%counter)
async def main(loop):
await asyncio.wait([reader(socket,counter),logger()]),
socket = aionn.Socket(aionn.AF_SP, aionn.PULL)
socket.bind('tcp://*:5555')
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
apache-2.0
| -7,030,116,780,493,325,000
| 22.246753
| 111
| 0.60838
| false
| 3.162544
| false
| false
| false
|
NazarioJL/google-foobar
|
level_3/the_grandest_staircase_of_them_all/solution.py
|
1
|
1603
|
def answer(n):
# there will be only *one* sequence with count 1
result = make_stairs_count(n) - 1
return result
def make_stairs(total_remaining):
"""Returns a list of all sequences of increasing values that add up to total_remaining"""
all_lists = []
def make_stairs_rec(prev_step_size, left, l):
if left == 0:
all_lists.append(l)
return
if left < 0:
return
for new_step_size in xrange(prev_step_size + 1, left + 1):
new_left = left - new_step_size
make_stairs_rec(new_step_size, new_left, l + [new_step_size])
return
make_stairs_rec(0, total_remaining, [])
return all_lists
def make_stairs_count(total_remaining):
"""Returns the count of all sequences of increasing values that add up to total_remaining
Since the problem only requires the count, this method will not keep track of the
actual sequence.
"""
# use for memoization
memo = {}
def make_stairs_count_rec(prev_step_size, remaining):
if remaining == 0:
return 1
if remaining < 0:
return 0
result = 0
for new_step_size in xrange(prev_step_size + 1, remaining + 1):
new_remaining = remaining - new_step_size
args = (new_step_size, new_remaining)
if args not in memo:
memo[args] = make_stairs_count_rec(new_step_size, new_remaining)
result += memo[args]
return result
all_count = make_stairs_count_rec(0, total_remaining)
return all_count
|
unlicense
| 3,313,028,360,251,223,000
| 25.716667
| 93
| 0.59451
| false
| 3.807601
| false
| false
| false
|
DarkFenX/Pyfa
|
eos/saveddata/module.py
|
1
|
46249
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import math
from logbook import Logger
from sqlalchemy.orm import reconstructor, validates
import eos.db
from eos.const import FittingHardpoint, FittingModuleState, FittingSlot
from eos.effectHandlerHelpers import HandledCharge, HandledItem
from eos.modifiedAttributeDict import ChargeAttrShortcut, ItemAttrShortcut, ModifiedAttributeDict
from eos.saveddata.citadel import Citadel
from eos.saveddata.mutator import Mutator
from eos.utils.cycles import CycleInfo, CycleSequence
from eos.utils.default import DEFAULT
from eos.utils.float import floatUnerr
from eos.utils.spoolSupport import calculateSpoolup, resolveSpoolOptions
from eos.utils.stats import DmgTypes, RRTypes
pyfalog = Logger(__name__)
ProjectedMap = {
FittingModuleState.OVERHEATED: FittingModuleState.ACTIVE,
FittingModuleState.ACTIVE: FittingModuleState.OFFLINE,
FittingModuleState.OFFLINE: FittingModuleState.ACTIVE,
FittingModuleState.ONLINE: FittingModuleState.ACTIVE # Just in case
}
# Old state : New State
LocalMap = {
FittingModuleState.OVERHEATED: FittingModuleState.ACTIVE,
FittingModuleState.ACTIVE: FittingModuleState.ONLINE,
FittingModuleState.OFFLINE: FittingModuleState.ONLINE,
FittingModuleState.ONLINE: FittingModuleState.ACTIVE
}
# For system effects. They should only ever be online or offline
ProjectedSystem = {
FittingModuleState.OFFLINE: FittingModuleState.ONLINE,
FittingModuleState.ONLINE: FittingModuleState.OFFLINE
}
class Module(HandledItem, HandledCharge, ItemAttrShortcut, ChargeAttrShortcut):
"""An instance of this class represents a module together with its charge and modified attributes"""
MINING_ATTRIBUTES = ("miningAmount",)
SYSTEM_GROUPS = ("Effect Beacon", "MassiveEnvironments", "Abyssal Hazards", "Non-Interactable Object")
def __init__(self, item, baseItem=None, mutaplasmid=None):
"""Initialize a module from the program"""
self.itemID = item.ID if item is not None else None
self.baseItemID = baseItem.ID if baseItem is not None else None
self.mutaplasmidID = mutaplasmid.ID if mutaplasmid is not None else None
if baseItem is not None:
# we're working with a mutated module, need to get abyssal module loaded with the base attributes
# Note: there may be a better way of doing this, such as a metho on this classe to convert(mutaplamid). This
# will require a bit more research though, considering there has never been a need to "swap" out the item of a Module
# before, and there may be assumptions taken with regards to the item never changing (pre-calculated / cached results, for example)
self.__item = eos.db.getItemWithBaseItemAttribute(self.itemID, self.baseItemID)
self.__baseItem = baseItem
self.__mutaplasmid = mutaplasmid
else:
self.__item = item
self.__baseItem = baseItem
self.__mutaplasmid = mutaplasmid
if item is not None and self.isInvalid:
raise ValueError("Passed item is not a Module")
self.__charge = None
self.projected = False
self.projectionRange = None
self.state = FittingModuleState.ONLINE
self.build()
@reconstructor
def init(self):
"""Initialize a module from the database and validate"""
self.__item = None
self.__baseItem = None
self.__charge = None
self.__mutaplasmid = None
# we need this early if module is invalid and returns early
self.__slot = self.dummySlot
if self.itemID:
self.__item = eos.db.getItem(self.itemID)
if self.__item is None:
pyfalog.error("Item (id: {0}) does not exist", self.itemID)
return
if self.baseItemID:
self.__item = eos.db.getItemWithBaseItemAttribute(self.itemID, self.baseItemID)
self.__baseItem = eos.db.getItem(self.baseItemID)
self.__mutaplasmid = eos.db.getMutaplasmid(self.mutaplasmidID)
if self.__baseItem is None:
pyfalog.error("Base Item (id: {0}) does not exist", self.itemID)
return
if self.isInvalid:
pyfalog.error("Item (id: {0}) is not a Module", self.itemID)
return
if self.chargeID:
self.__charge = eos.db.getItem(self.chargeID)
self.build()
def build(self):
""" Builds internal module variables from both init's """
if self.__charge and self.__charge.category.name != "Charge":
self.__charge = None
self.__baseVolley = None
self.__baseRRAmount = None
self.__miningyield = None
self.__reloadTime = None
self.__reloadForce = None
self.__chargeCycles = None
self.__hardpoint = FittingHardpoint.NONE
self.__itemModifiedAttributes = ModifiedAttributeDict(parent=self)
self.__chargeModifiedAttributes = ModifiedAttributeDict(parent=self)
self.__slot = self.dummySlot # defaults to None
if self.__item:
self.__itemModifiedAttributes.original = self.__item.attributes
self.__itemModifiedAttributes.overrides = self.__item.overrides
self.__hardpoint = self.__calculateHardpoint(self.__item)
self.__slot = self.calculateSlot(self.__item)
# Instantiate / remove mutators if this is a mutated module
if self.__baseItem:
for x in self.mutaplasmid.attributes:
attr = self.item.attributes[x.name]
id = attr.ID
if id not in self.mutators: # create the mutator
Mutator(self, attr, attr.value)
# @todo: remove attributes that are no longer part of the mutaplasmid.
self.__itemModifiedAttributes.mutators = self.mutators
if self.__charge:
self.__chargeModifiedAttributes.original = self.__charge.attributes
self.__chargeModifiedAttributes.overrides = self.__charge.overrides
@classmethod
def buildEmpty(cls, slot):
empty = Module(None)
empty.__slot = slot
empty.dummySlot = slot
return empty
@classmethod
def buildRack(cls, slot, num=None):
empty = Rack(None)
empty.__slot = slot
empty.dummySlot = slot
empty.num = num
return empty
@property
def isEmpty(self):
return self.dummySlot is not None
@property
def hardpoint(self):
return self.__hardpoint
@property
def isInvalid(self):
# todo: validate baseItem as well if it's set.
if self.isEmpty:
return False
return (
self.__item is None or (
self.__item.category.name not in ("Module", "Subsystem", "Structure Module") and
self.__item.group.name not in self.SYSTEM_GROUPS) or
(self.item.isAbyssal and not self.isMutated))
@property
def isMutated(self):
return self.baseItemID and self.mutaplasmidID
@property
def numCharges(self):
return self.getNumCharges(self.charge)
def getNumCharges(self, charge):
if charge is None:
charges = 0
else:
chargeVolume = charge.volume
containerCapacity = self.item.capacity
if chargeVolume is None or containerCapacity is None:
charges = 0
else:
charges = int(floatUnerr(containerCapacity / chargeVolume))
return charges
@property
def numShots(self):
if self.charge is None:
return 0
if self.__chargeCycles is None and self.charge:
numCharges = self.numCharges
# Usual ammo like projectiles and missiles
if numCharges > 0 and "chargeRate" in self.itemModifiedAttributes:
self.__chargeCycles = self.__calculateAmmoShots()
# Frequency crystals (combat and mining lasers)
elif numCharges > 0 and "crystalsGetDamaged" in self.chargeModifiedAttributes:
self.__chargeCycles = self.__calculateCrystalShots()
# Scripts and stuff
else:
self.__chargeCycles = 0
return self.__chargeCycles
else:
return self.__chargeCycles
@property
def modPosition(self):
return self.getModPosition()
def getModPosition(self, fit=None):
# Pass in fit for reliability. When it's not passed, we rely on owner and owner
# is set by sqlalchemy during flush
fit = fit if fit is not None else self.owner
if fit:
container = fit.projectedModules if self.isProjected else fit.modules
try:
return container.index(self)
except ValueError:
return None
return None
@property
def isProjected(self):
if self.owner:
return self in self.owner.projectedModules
return None
@property
def isExclusiveSystemEffect(self):
return self.item.group.name in ("Effect Beacon", "Non-Interactable Object", "MassiveEnvironments")
@property
def isCapitalSize(self):
return self.getModifiedItemAttr("volume", 0) >= 4000
@property
def hpBeforeReload(self):
"""
If item is some kind of repairer with charges, calculate
HP it reps before going into reload.
"""
cycles = self.numShots
armorRep = self.getModifiedItemAttr("armorDamageAmount") or 0
shieldRep = self.getModifiedItemAttr("shieldBonus") or 0
if not cycles or (not armorRep and not shieldRep):
return 0
hp = round((armorRep + shieldRep) * cycles)
return hp
def __calculateAmmoShots(self):
if self.charge is not None:
# Set number of cycles before reload is needed
# numcycles = math.floor(module_capacity / (module_volume * module_chargerate))
chargeRate = self.getModifiedItemAttr("chargeRate")
numCharges = self.numCharges
numShots = math.floor(numCharges / chargeRate)
else:
numShots = None
return numShots
def __calculateCrystalShots(self):
if self.charge is not None:
if self.getModifiedChargeAttr("crystalsGetDamaged") == 1:
# For depletable crystals, calculate average amount of shots before it's destroyed
hp = self.getModifiedChargeAttr("hp")
chance = self.getModifiedChargeAttr("crystalVolatilityChance")
damage = self.getModifiedChargeAttr("crystalVolatilityDamage")
crystals = self.numCharges
numShots = math.floor((crystals * hp) / (damage * chance))
else:
# Set 0 (infinite) for permanent crystals like t1 laser crystals
numShots = 0
else:
numShots = None
return numShots
@property
def maxRange(self):
attrs = ("maxRange", "shieldTransferRange", "powerTransferRange",
"energyDestabilizationRange", "empFieldRange",
"ecmBurstRange", "warpScrambleRange", "cargoScanRange",
"shipScanRange", "surveyScanRange")
maxRange = None
for attr in attrs:
maxRange = self.getModifiedItemAttr(attr, None)
if maxRange is not None:
break
if maxRange is not None:
if 'burst projector' in self.item.name.lower():
maxRange -= self.owner.ship.getModifiedItemAttr("radius")
return maxRange
missileMaxRangeData = self.missileMaxRangeData
if missileMaxRangeData is None:
return None
lowerRange, higherRange, higherChance = missileMaxRangeData
maxRange = lowerRange * (1 - higherChance) + higherRange * higherChance
return maxRange
@property
def missileMaxRangeData(self):
if self.charge is None:
return None
try:
chargeName = self.charge.group.name
except AttributeError:
pass
else:
if chargeName in ("Scanner Probe", "Survey Probe"):
return None
def calculateRange(maxVelocity, mass, agility, flightTime):
# Source: http://www.eveonline.com/ingameboard.asp?a=topic&threadID=1307419&page=1#15
# D_m = V_m * (T_m + T_0*[exp(- T_m/T_0)-1])
accelTime = min(flightTime, mass * agility / 1000000)
# Average distance done during acceleration
duringAcceleration = maxVelocity / 2 * accelTime
# Distance done after being at full speed
fullSpeed = maxVelocity * (flightTime - accelTime)
maxRange = duringAcceleration + fullSpeed
return maxRange
maxVelocity = self.getModifiedChargeAttr("maxVelocity")
if not maxVelocity:
return None
shipRadius = self.owner.ship.getModifiedItemAttr("radius")
# Flight time has bonus based on ship radius, see https://github.com/pyfa-org/Pyfa/issues/2083
flightTime = floatUnerr(self.getModifiedChargeAttr("explosionDelay") / 1000 + shipRadius / maxVelocity)
mass = self.getModifiedChargeAttr("mass")
agility = self.getModifiedChargeAttr("agility")
lowerTime = math.floor(flightTime)
higherTime = math.ceil(flightTime)
lowerRange = calculateRange(maxVelocity, mass, agility, lowerTime)
higherRange = calculateRange(maxVelocity, mass, agility, higherTime)
# Fof range limit is supposedly calculated based on overview (surface-to-surface) range
if 'fofMissileLaunching' in self.charge.effects:
rangeLimit = self.getModifiedChargeAttr("maxFOFTargetRange")
if rangeLimit:
lowerRange = min(lowerRange, rangeLimit)
higherRange = min(higherRange, rangeLimit)
# Make range center-to-surface, as missiles spawn in the center of the ship
lowerRange = max(0, lowerRange - shipRadius)
higherRange = max(0, higherRange - shipRadius)
higherChance = flightTime - lowerTime
return lowerRange, higherRange, higherChance
@property
def falloff(self):
attrs = ("falloffEffectiveness", "falloff", "shipScanFalloff")
for attr in attrs:
falloff = self.getModifiedItemAttr(attr, None)
if falloff is not None:
return falloff
@property
def slot(self):
return self.__slot
@property
def itemModifiedAttributes(self):
return self.__itemModifiedAttributes
@property
def chargeModifiedAttributes(self):
return self.__chargeModifiedAttributes
@property
def item(self):
return self.__item if self.__item != 0 else None
@property
def baseItem(self):
return self.__baseItem
@property
def mutaplasmid(self):
return self.__mutaplasmid
@property
def charge(self):
return self.__charge if self.__charge != 0 else None
@charge.setter
def charge(self, charge):
self.__charge = charge
if charge is not None:
self.chargeID = charge.ID
self.__chargeModifiedAttributes.original = charge.attributes
self.__chargeModifiedAttributes.overrides = charge.overrides
else:
self.chargeID = None
self.__chargeModifiedAttributes.original = None
self.__chargeModifiedAttributes.overrides = {}
self.__itemModifiedAttributes.clear()
@property
def miningStats(self):
if self.__miningyield is None:
if self.isEmpty:
self.__miningyield = 0
else:
if self.state >= FittingModuleState.ACTIVE:
volley = self.getModifiedItemAttr("specialtyMiningAmount") or self.getModifiedItemAttr(
"miningAmount") or 0
if volley:
cycleParams = self.getCycleParameters()
if cycleParams is None:
self.__miningyield = 0
else:
cycleTime = cycleParams.averageTime
self.__miningyield = volley / (cycleTime / 1000.0)
else:
self.__miningyield = 0
else:
self.__miningyield = 0
return self.__miningyield
def isDealingDamage(self, ignoreState=False):
volleyParams = self.getVolleyParameters(ignoreState=ignoreState)
for volley in volleyParams.values():
if volley.total > 0:
return True
return False
def getVolleyParameters(self, spoolOptions=None, targetProfile=None, ignoreState=False):
if self.isEmpty or (self.state < FittingModuleState.ACTIVE and not ignoreState):
return {0: DmgTypes(0, 0, 0, 0)}
if self.__baseVolley is None:
self.__baseVolley = {}
dmgGetter = self.getModifiedChargeAttr if self.charge else self.getModifiedItemAttr
dmgMult = self.getModifiedItemAttr("damageMultiplier", 1)
# Some delay attributes have non-0 default value, so we have to pick according to effects
if {'superWeaponAmarr', 'superWeaponCaldari', 'superWeaponGallente', 'superWeaponMinmatar', 'lightningWeapon'}.intersection(self.item.effects):
dmgDelay = self.getModifiedItemAttr("damageDelayDuration", 0)
elif {'doomsdayBeamDOT', 'doomsdaySlash', 'doomsdayConeDOT'}.intersection(self.item.effects):
dmgDelay = self.getModifiedItemAttr("doomsdayWarningDuration", 0)
else:
dmgDelay = 0
dmgDuration = self.getModifiedItemAttr("doomsdayDamageDuration", 0)
dmgSubcycle = self.getModifiedItemAttr("doomsdayDamageCycleTime", 0)
# Reaper DD can damage each target only once
if dmgDuration != 0 and dmgSubcycle != 0 and 'doomsdaySlash' not in self.item.effects:
subcycles = math.floor(floatUnerr(dmgDuration / dmgSubcycle))
else:
subcycles = 1
for i in range(subcycles):
self.__baseVolley[dmgDelay + dmgSubcycle * i] = DmgTypes(
em=(dmgGetter("emDamage", 0)) * dmgMult,
thermal=(dmgGetter("thermalDamage", 0)) * dmgMult,
kinetic=(dmgGetter("kineticDamage", 0)) * dmgMult,
explosive=(dmgGetter("explosiveDamage", 0)) * dmgMult)
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
spoolBoost = calculateSpoolup(
self.getModifiedItemAttr("damageMultiplierBonusMax", 0),
self.getModifiedItemAttr("damageMultiplierBonusPerCycle", 0),
self.rawCycleTime / 1000, spoolType, spoolAmount)[0]
spoolMultiplier = 1 + spoolBoost
adjustedVolley = {}
for volleyTime, volleyValue in self.__baseVolley.items():
adjustedVolley[volleyTime] = DmgTypes(
em=volleyValue.em * spoolMultiplier * (1 - getattr(targetProfile, "emAmount", 0)),
thermal=volleyValue.thermal * spoolMultiplier * (1 - getattr(targetProfile, "thermalAmount", 0)),
kinetic=volleyValue.kinetic * spoolMultiplier * (1 - getattr(targetProfile, "kineticAmount", 0)),
explosive=volleyValue.explosive * spoolMultiplier * (1 - getattr(targetProfile, "explosiveAmount", 0)))
return adjustedVolley
def getVolley(self, spoolOptions=None, targetProfile=None, ignoreState=False):
volleyParams = self.getVolleyParameters(spoolOptions=spoolOptions, targetProfile=targetProfile, ignoreState=ignoreState)
if len(volleyParams) == 0:
return DmgTypes(0, 0, 0, 0)
return volleyParams[min(volleyParams)]
def getDps(self, spoolOptions=None, targetProfile=None, ignoreState=False):
dmgDuringCycle = DmgTypes(0, 0, 0, 0)
cycleParams = self.getCycleParameters()
if cycleParams is None:
return dmgDuringCycle
volleyParams = self.getVolleyParameters(spoolOptions=spoolOptions, targetProfile=targetProfile, ignoreState=ignoreState)
avgCycleTime = cycleParams.averageTime
if len(volleyParams) == 0 or avgCycleTime == 0:
return dmgDuringCycle
for volleyValue in volleyParams.values():
dmgDuringCycle += volleyValue
dpsFactor = 1 / (avgCycleTime / 1000)
dps = DmgTypes(
em=dmgDuringCycle.em * dpsFactor,
thermal=dmgDuringCycle.thermal * dpsFactor,
kinetic=dmgDuringCycle.kinetic * dpsFactor,
explosive=dmgDuringCycle.explosive * dpsFactor)
return dps
def isRemoteRepping(self, ignoreState=False):
repParams = self.getRepAmountParameters(ignoreState=ignoreState)
for rrData in repParams.values():
if rrData:
return True
return False
def getRepAmountParameters(self, spoolOptions=None, ignoreState=False):
if self.isEmpty or (self.state < FittingModuleState.ACTIVE and not ignoreState):
return {}
remoteModuleGroups = {
"Remote Armor Repairer": "Armor",
"Ancillary Remote Armor Repairer": "Armor",
"Mutadaptive Remote Armor Repairer": "Armor",
"Remote Hull Repairer": "Hull",
"Remote Shield Booster": "Shield",
"Ancillary Remote Shield Booster": "Shield",
"Remote Capacitor Transmitter": "Capacitor"}
rrType = remoteModuleGroups.get(self.item.group.name)
if rrType is None:
return {}
if self.__baseRRAmount is None:
self.__baseRRAmount = {}
shieldAmount = 0
armorAmount = 0
hullAmount = 0
capacitorAmount = 0
if rrType == "Hull":
hullAmount += self.getModifiedItemAttr("structureDamageAmount", 0)
elif rrType == "Armor":
if self.item.group.name == "Ancillary Remote Armor Repairer" and self.charge:
mult = self.getModifiedItemAttr("chargedArmorDamageMultiplier", 1)
else:
mult = 1
armorAmount += self.getModifiedItemAttr("armorDamageAmount", 0) * mult
elif rrType == "Shield":
shieldAmount += self.getModifiedItemAttr("shieldBonus", 0)
elif rrType == "Capacitor":
capacitorAmount += self.getModifiedItemAttr("powerTransferAmount", 0)
rrDelay = 0 if rrType == "Shield" else self.rawCycleTime
self.__baseRRAmount[rrDelay] = RRTypes(shield=shieldAmount, armor=armorAmount, hull=hullAmount, capacitor=capacitorAmount)
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
spoolBoost = calculateSpoolup(
self.getModifiedItemAttr("repairMultiplierBonusMax", 0),
self.getModifiedItemAttr("repairMultiplierBonusPerCycle", 0),
self.rawCycleTime / 1000, spoolType, spoolAmount)[0]
spoolMultiplier = 1 + spoolBoost
adjustedRRAmount = {}
for rrTime, rrAmount in self.__baseRRAmount.items():
if spoolMultiplier == 1:
adjustedRRAmount[rrTime] = rrAmount
else:
adjustedRRAmount[rrTime] = rrAmount * spoolMultiplier
return adjustedRRAmount
def getRemoteReps(self, spoolOptions=None, ignoreState=False, reloadOverride=None):
rrDuringCycle = RRTypes(0, 0, 0, 0)
cycleParams = self.getCycleParameters(reloadOverride=reloadOverride)
if cycleParams is None:
return rrDuringCycle
repAmountParams = self.getRepAmountParameters(spoolOptions=spoolOptions, ignoreState=ignoreState)
avgCycleTime = cycleParams.averageTime
if len(repAmountParams) == 0 or avgCycleTime == 0:
return rrDuringCycle
for rrAmount in repAmountParams.values():
rrDuringCycle += rrAmount
rrFactor = 1 / (avgCycleTime / 1000)
rps = rrDuringCycle * rrFactor
return rps
def getSpoolData(self, spoolOptions=None):
weaponMultMax = self.getModifiedItemAttr("damageMultiplierBonusMax", 0)
weaponMultPerCycle = self.getModifiedItemAttr("damageMultiplierBonusPerCycle", 0)
if weaponMultMax and weaponMultPerCycle:
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
_, spoolCycles, spoolTime = calculateSpoolup(
weaponMultMax, weaponMultPerCycle,
self.rawCycleTime / 1000, spoolType, spoolAmount)
return spoolCycles, spoolTime
rrMultMax = self.getModifiedItemAttr("repairMultiplierBonusMax", 0)
rrMultPerCycle = self.getModifiedItemAttr("repairMultiplierBonusPerCycle", 0)
if rrMultMax and rrMultPerCycle:
spoolType, spoolAmount = resolveSpoolOptions(spoolOptions, self)
_, spoolCycles, spoolTime = calculateSpoolup(
rrMultMax, rrMultPerCycle,
self.rawCycleTime / 1000, spoolType, spoolAmount)
return spoolCycles, spoolTime
return 0, 0
@property
def reloadTime(self):
# Get reload time from attrs first, then use
# custom value specified otherwise (e.g. in effects)
moduleReloadTime = self.getModifiedItemAttr("reloadTime")
if moduleReloadTime is None:
moduleReloadTime = self.__reloadTime
return moduleReloadTime or 0.0
@reloadTime.setter
def reloadTime(self, milliseconds):
self.__reloadTime = milliseconds
@property
def forceReload(self):
return self.__reloadForce
@forceReload.setter
def forceReload(self, type):
self.__reloadForce = type
def fits(self, fit, hardpointLimit=True):
"""
Function that determines if a module can be fit to the ship. We always apply slot restrictions no matter what
(too many assumptions made on this), however all other fitting restrictions are optional
"""
slot = self.slot
if fit.getSlotsFree(slot) <= (0 if self.owner != fit else -1):
return False
fits = self.__fitRestrictions(fit, hardpointLimit)
if not fits and fit.ignoreRestrictions:
self.restrictionOverridden = True
fits = True
elif fits and fit.ignoreRestrictions:
self.restrictionOverridden = False
return fits
def __fitRestrictions(self, fit, hardpointLimit=True):
if not fit.canFit(self.item):
return False
# EVE doesn't let capital modules be fit onto subcapital hulls. Confirmed by CCP Larrikin that this is dictated
# by the modules volume. See GH issue #1096
if not isinstance(fit.ship, Citadel) and fit.ship.getModifiedItemAttr("isCapitalSize", 0) != 1 and self.isCapitalSize:
return False
# If the mod is a subsystem, don't let two subs in the same slot fit
if self.slot == FittingSlot.SUBSYSTEM:
subSlot = self.getModifiedItemAttr("subSystemSlot")
for mod in fit.modules:
if mod is self:
continue
if mod.getModifiedItemAttr("subSystemSlot") == subSlot:
return False
# Check rig sizes
if self.slot == FittingSlot.RIG:
if self.getModifiedItemAttr("rigSize") != fit.ship.getModifiedItemAttr("rigSize"):
return False
# Check max group fitted
max = self.getModifiedItemAttr("maxGroupFitted", None)
if max is not None:
current = 0 # if self.owner != fit else -1 # Disabled, see #1278
for mod in fit.modules:
if (mod.item and mod.item.groupID == self.item.groupID and
self.getModPosition(fit) != mod.getModPosition(fit)):
current += 1
if current >= max:
return False
# Check this only if we're told to do so
if hardpointLimit:
if fit.getHardpointsFree(self.hardpoint) < 1:
return False
return True
def isValidState(self, state):
"""
Check if the state is valid for this module, without considering other modules at all
"""
# Check if we're within bounds
if state < -1 or state > 2:
return False
elif state >= FittingModuleState.ACTIVE and (not self.item.isType("active") or self.getModifiedItemAttr('activationBlocked') > 0):
return False
elif state == FittingModuleState.OVERHEATED and not self.item.isType("overheat"):
return False
else:
return True
def getMaxState(self, proposedState=None):
states = sorted((s for s in FittingModuleState if proposedState is None or s <= proposedState), reverse=True)
for state in states:
if self.isValidState(state):
return state
def canHaveState(self, state=None, projectedOnto=None):
"""
Check with other modules if there are restrictions that might not allow this module to be activated.
Returns True if state is allowed, or max state module can have if current state is invalid.
"""
# If we're going to set module to offline, it should be fine for all cases
item = self.item
if state <= FittingModuleState.OFFLINE:
return True
# Check if the local module is over it's max limit; if it's not, we're fine
maxGroupOnline = self.getModifiedItemAttr("maxGroupOnline", None)
maxGroupActive = self.getModifiedItemAttr("maxGroupActive", None)
if maxGroupOnline is None and maxGroupActive is None and projectedOnto is None:
return True
# Following is applicable only to local modules, we do not want to limit projected
if projectedOnto is None:
currOnline = 0
currActive = 0
group = item.group.name
maxState = None
for mod in self.owner.modules:
currItem = getattr(mod, "item", None)
if currItem is not None and currItem.group.name == group:
if mod.state >= FittingModuleState.ONLINE:
currOnline += 1
if mod.state >= FittingModuleState.ACTIVE:
currActive += 1
if maxGroupOnline is not None and currOnline > maxGroupOnline:
if maxState is None or maxState > FittingModuleState.OFFLINE:
maxState = FittingModuleState.OFFLINE
break
if maxGroupActive is not None and currActive > maxGroupActive:
if maxState is None or maxState > FittingModuleState.ONLINE:
maxState = FittingModuleState.ONLINE
return True if maxState is None else maxState
# For projected, we're checking if ship is vulnerable to given item
else:
# Do not allow to apply offensive modules on ship with offensive module immunite, with few exceptions
# (all effects which apply instant modification are exception, generally speaking)
if item.offensive and projectedOnto.ship.getModifiedItemAttr("disallowOffensiveModifiers") == 1:
offensiveNonModifiers = {"energyDestabilizationNew",
"leech",
"energyNosferatuFalloff",
"energyNeutralizerFalloff"}
if not offensiveNonModifiers.intersection(set(item.effects)):
return FittingModuleState.OFFLINE
# If assistive modules are not allowed, do not let to apply these altogether
if item.assistive and projectedOnto.ship.getModifiedItemAttr("disallowAssistance") == 1:
return FittingModuleState.OFFLINE
return True
def isValidCharge(self, charge):
# Check sizes, if 'charge size > module volume' it won't fit
if charge is None:
return True
chargeVolume = charge.volume
moduleCapacity = self.item.capacity
if chargeVolume is not None and moduleCapacity is not None and chargeVolume > moduleCapacity:
return False
itemChargeSize = self.getModifiedItemAttr("chargeSize")
if itemChargeSize > 0:
chargeSize = charge.getAttribute('chargeSize')
if itemChargeSize != chargeSize:
return False
chargeGroup = charge.groupID
for i in range(5):
itemChargeGroup = self.getModifiedItemAttr('chargeGroup' + str(i), None)
if itemChargeGroup is None:
continue
if itemChargeGroup == chargeGroup:
return True
return False
def getValidCharges(self):
validCharges = set()
for i in range(5):
itemChargeGroup = self.getModifiedItemAttr('chargeGroup' + str(i), None)
if itemChargeGroup is not None:
g = eos.db.getGroup(int(itemChargeGroup), eager="items.attributes")
if g is None:
continue
for singleItem in g.items:
if singleItem.published and self.isValidCharge(singleItem):
validCharges.add(singleItem)
return validCharges
@staticmethod
def __calculateHardpoint(item):
effectHardpointMap = {
"turretFitted" : FittingHardpoint.TURRET,
"launcherFitted": FittingHardpoint.MISSILE
}
if item is None:
return FittingHardpoint.NONE
for effectName, slot in effectHardpointMap.items():
if effectName in item.effects:
return slot
return FittingHardpoint.NONE
@staticmethod
def calculateSlot(item):
effectSlotMap = {
"rigSlot" : FittingSlot.RIG.value,
"loPower" : FittingSlot.LOW.value,
"medPower" : FittingSlot.MED.value,
"hiPower" : FittingSlot.HIGH.value,
"subSystem" : FittingSlot.SUBSYSTEM.value,
"serviceSlot": FittingSlot.SERVICE.value
}
if item is None:
return None
for effectName, slot in effectSlotMap.items():
if effectName in item.effects:
return slot
if item.group.name in Module.SYSTEM_GROUPS:
return FittingSlot.SYSTEM
return None
@validates("ID", "itemID", "ammoID")
def validator(self, key, val):
map = {
"ID" : lambda _val: isinstance(_val, int),
"itemID": lambda _val: _val is None or isinstance(_val, int),
"ammoID": lambda _val: isinstance(_val, int)
}
if not map[key](val):
raise ValueError(str(val) + " is not a valid value for " + key)
else:
return val
def clear(self):
self.__baseVolley = None
self.__baseRRAmount = None
self.__miningyield = None
self.__reloadTime = None
self.__reloadForce = None
self.__chargeCycles = None
self.itemModifiedAttributes.clear()
self.chargeModifiedAttributes.clear()
def calculateModifiedAttributes(self, fit, runTime, forceProjected=False, gang=False, forcedProjRange=DEFAULT):
# We will run the effect when two conditions are met:
# 1: It makes sense to run the effect
# The effect is either offline
# or the effect is passive and the module is in the online state (or higher)
# or the effect is active and the module is in the active state (or higher)
# or the effect is overheat and the module is in the overheated state (or higher)
# 2: the runtimes match
if self.projected or forceProjected:
context = "projected", "module"
projected = True
else:
context = ("module",)
projected = False
projectionRange = self.projectionRange if forcedProjRange is DEFAULT else forcedProjRange
if self.charge is not None:
# fix for #82 and it's regression #106
if not projected or (self.projected and not forceProjected) or gang:
for effect in self.charge.effects.values():
if (
effect.runTime == runTime and
effect.activeByDefault and (
effect.isType("offline") or
(effect.isType("passive") and self.state >= FittingModuleState.ONLINE) or
(effect.isType("active") and self.state >= FittingModuleState.ACTIVE)) and
(not gang or (gang and effect.isType("gang")))
):
contexts = ("moduleCharge",)
effect.handler(fit, self, contexts, projectionRange, effect=effect)
if self.item:
if self.state >= FittingModuleState.OVERHEATED:
for effect in self.item.effects.values():
if effect.runTime == runTime and \
effect.isType("overheat") \
and not forceProjected \
and effect.activeByDefault \
and ((gang and effect.isType("gang")) or not gang):
effect.handler(fit, self, context, projectionRange, effect=effect)
for effect in self.item.effects.values():
if effect.runTime == runTime and \
effect.activeByDefault and \
(effect.isType("offline") or
(effect.isType("passive") and self.state >= FittingModuleState.ONLINE) or
(effect.isType("active") and self.state >= FittingModuleState.ACTIVE)) \
and ((projected and effect.isType("projected")) or not projected) \
and ((gang and effect.isType("gang")) or not gang):
effect.handler(fit, self, context, projectionRange, effect=effect)
def getCycleParameters(self, reloadOverride=None):
"""Copied from new eos as well"""
# Determine if we'll take into account reload time or not
if reloadOverride is not None:
factorReload = reloadOverride
else:
factorReload = self.owner.factorReload if self.forceReload is None else self.forceReload
cycles_until_reload = self.numShots
if cycles_until_reload == 0:
cycles_until_reload = math.inf
active_time = self.rawCycleTime
if active_time == 0:
return None
forced_inactive_time = self.reactivationDelay
reload_time = self.reloadTime
# Effects which cannot be reloaded have the same processing whether
# caller wants to take reload time into account or not
if reload_time is None and cycles_until_reload < math.inf:
final_cycles = 1
early_cycles = cycles_until_reload - final_cycles
# Single cycle until effect cannot run anymore
if early_cycles == 0:
return CycleInfo(active_time, 0, 1, False)
# Multiple cycles with the same parameters
if forced_inactive_time == 0:
return CycleInfo(active_time, 0, cycles_until_reload, False)
# Multiple cycles with different parameters
return CycleSequence((
CycleInfo(active_time, forced_inactive_time, early_cycles, False),
CycleInfo(active_time, 0, final_cycles, False)
), 1)
# Module cycles the same way all the time in 3 cases:
# 1) caller doesn't want to take into account reload time
# 2) effect does not have to reload anything to keep running
# 3) effect has enough time to reload during inactivity periods
if (
not factorReload or
cycles_until_reload == math.inf or
forced_inactive_time >= reload_time
):
isInactivityReload = factorReload and forced_inactive_time >= reload_time
return CycleInfo(active_time, forced_inactive_time, math.inf, isInactivityReload)
# We've got to take reload into consideration
else:
final_cycles = 1
early_cycles = cycles_until_reload - final_cycles
# If effect has to reload after each its cycle, then its parameters
# are the same all the time
if early_cycles == 0:
return CycleInfo(active_time, reload_time, math.inf, True)
return CycleSequence((
CycleInfo(active_time, forced_inactive_time, early_cycles, False),
CycleInfo(active_time, reload_time, final_cycles, True)
), math.inf)
@property
def rawCycleTime(self):
speed = max(
self.getModifiedItemAttr("speed", 0), # Most weapons
self.getModifiedItemAttr("duration", 0), # Most average modules
self.getModifiedItemAttr("durationSensorDampeningBurstProjector", 0),
self.getModifiedItemAttr("durationTargetIlluminationBurstProjector", 0),
self.getModifiedItemAttr("durationECMJammerBurstProjector", 0),
self.getModifiedItemAttr("durationWeaponDisruptionBurstProjector", 0)
)
return speed
@property
def disallowRepeatingAction(self):
return self.getModifiedItemAttr("disallowRepeatingActivation", 0)
@property
def reactivationDelay(self):
return self.getModifiedItemAttr("moduleReactivationDelay", 0)
@property
def capUse(self):
capNeed = self.getModifiedItemAttr("capacitorNeed")
if capNeed and self.state >= FittingModuleState.ACTIVE:
cycleParams = self.getCycleParameters()
if cycleParams is None:
return 0
cycleTime = cycleParams.averageTime
if cycleTime > 0:
capUsed = capNeed / (cycleTime / 1000.0)
return capUsed
else:
return 0
@staticmethod
def getProposedState(mod, click, proposedState=None):
pyfalog.debug("Get proposed state for module.")
if mod.slot == FittingSlot.SUBSYSTEM or mod.isEmpty:
return FittingModuleState.ONLINE
if mod.slot == FittingSlot.SYSTEM:
transitionMap = ProjectedSystem
else:
transitionMap = ProjectedMap if mod.projected else LocalMap
currState = mod.state
if proposedState is not None:
state = proposedState
elif click == "right":
state = FittingModuleState.OVERHEATED
elif click == "ctrl":
state = FittingModuleState.OFFLINE
else:
state = transitionMap[currState]
# If passive module tries to transition into online and fails,
# put it to passive instead
if not mod.isValidState(state) and currState == FittingModuleState.ONLINE:
state = FittingModuleState.OFFLINE
return mod.getMaxState(proposedState=state)
def __deepcopy__(self, memo):
item = self.item
if item is None:
copy = Module.buildEmpty(self.slot)
else:
copy = Module(self.item, self.baseItem, self.mutaplasmid)
copy.charge = self.charge
copy.state = self.state
copy.spoolType = self.spoolType
copy.spoolAmount = self.spoolAmount
copy.projectionRange = self.projectionRange
for x in self.mutators.values():
Mutator(copy, x.attribute, x.value)
return copy
def rebase(self, item):
state = self.state
charge = self.charge
spoolType = self.spoolType
spoolAmount = self.spoolAmount
projectionRange = self.projectionRange
Module.__init__(self, item, self.baseItem, self.mutaplasmid)
self.state = state
if self.isValidCharge(charge):
self.charge = charge
self.spoolType = spoolType
self.spoolAmount = spoolAmount
self.projectionRange = projectionRange
for x in self.mutators.values():
Mutator(self, x.attribute, x.value)
def __repr__(self):
if self.item:
return "Module(ID={}, name={}) at {}".format(
self.item.ID, self.item.name, hex(id(self))
)
else:
return "EmptyModule() at {}".format(hex(id(self)))
class Rack(Module):
"""
This is simply the Module class named something else to differentiate
it for app logic. The only thing interesting about it is the num property,
which is the number of slots for this rack
"""
num = None
|
gpl-3.0
| 1,596,012,414,091,344,100
| 41.159526
| 155
| 0.613894
| false
| 4.268876
| false
| false
| false
|
ezarowny/url-condenser
|
url_condenser/url_condenser/settings.py
|
1
|
3209
|
"""
Django settings for url_condenser project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+!6_uwpq6@ee)620m6f@lni3**fz5a8pjetd#)^e!t&hf#u&=k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'condensed_urls',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'url_condenser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'url_condenser.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -3,015,443,904,954,819,600
| 25.303279
| 91
| 0.689
| false
| 3.495643
| false
| false
| false
|
vsego/PyteArt
|
patgen.py
|
1
|
2830
|
#!/usr/bin/env python3
"""
Patterns generator for img2dat.py
"""
from itertools import combinations
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from string import printable, ascii_letters, punctuation
from sys import argv, stderr
class GenPatts:
allowedHoles = 0 # how many colors are permitted to remain unassigned
charSet = ascii_letters + punctuation # character set to be used for all or only first character
# (see reduceTo argument of asString() method)
colNum = 256 # the number of colors
font = "LiberationMono-Regular.ttf" # font file
maxChars = 3 # maximum number of overlapping characters
size = 31 # size of each character (bigger numbers produce finer images)
xy = (0, -size // 6) # where on the temporary image to put the character
skipChars = { "^", "#", "]", "/", "-" } # chars to be excluded (those with special meaning)
def asString(self, reduceTo = None):
"""
Generate patterns and return them as a list.
Parameter reduceTo is a string that is used for all characters but first.
If None, it is ignored and self.charSet is used for all characters (this might be a wee bit slow).
"""
patterns = [ "" for g in range(1, 256) ] + [ " " ]
left2do = self.colNum - 1
chars = 1
font = ImageFont.truetype(self.font, self.size)
imgSize = font.getsize('X')
charSet = set(self.charSet) - set(self.skipChars)
colFactor = 256 / self.colNum
while left2do > self.allowedHoles and chars <= self.maxChars:
for code in combinations(charSet, chars):
#if not self.skipChars.intersection(set(code)):
im = Image.new('L', imgSize, '#ffffff')
draw = ImageDraw.Draw(im)
for char in code:
draw.text(self.xy, char, '#000000', font)
hist = im.histogram()
col = round(sum([ hist[i] * i for i in range(len(hist)) ]) / sum(hist))
col = min(max(0, round(colFactor * round(col / colFactor))), 255)
if patterns[col] == "":
patterns[col] = code
left2do -= 1
if left2do <= 0:
break
chars += 1
if reduceTo and chars == 2:
charSet = set(reduceTo) - set(self.skipChars)
return patterns
def toFile(self, fname, reduceTo = None):
with open(fname, "w", encoding = "utf-8") as f:
f.write(str(self.asString()))
if __name__ == "__main__":
fname = (argv[1] if len(argv) > 1 else "img2dat.pat")
gp = GenPatts()
gp.toFile(fname, reduceTo = ascii_letters)
stderr.write("Patterns created and saved to \"" + fname + "\".\n")
|
gpl-2.0
| -1,986,131,092,147,713,800
| 39.428571
| 106
| 0.576325
| false
| 3.958042
| false
| false
| false
|
theindependentwolf/liverpool-discord-bot
|
result_details.py
|
1
|
4915
|
import discord
from random import randint
import random
import random
from random import randint
import urllib.request
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import nocontext
import requests
import datetime
import config
import asyncio
import csv
import time
import dateutil.relativedelta as relativetd
def next_game():
"""
Count down to the next game
"""
current_time = datetime.datetime.now()
opponent_details = get_opponent_details()
if opponent_details:
oppo_time = opponent_details[0]
opponent_time = datetime.datetime(int(oppo_time[:4]), int(oppo_time[4:6]), int(oppo_time[6:8]), int(oppo_time[9:11]) - 1, int(oppo_time[11:13]))
countdown = relativetd.relativedelta(opponent_time, current_time)
countdown_readable = "{} day(s) {} hours {} minutes {} seconds".format(countdown.days, countdown.hours, countdown.minutes, countdown.seconds)
return "```{}\n{}```".format(countdown_readable, opponent_details[1])
else:
return "```No fixtures found in the calendar```"
def get_opponent_details():
"""
Return opponent details
"""
todays_date = time.strftime("%Y%m%d")
opponent_details = ""
with open('fixtures.csv','rt') as csvfile:
content = csv.reader(csvfile, delimiter = ',')
for row in content:
date = row[0]
summary = row[1]
if date[:8] >= todays_date:
return row
def get_readable_time(input_date):
"""
Convert yyyymmddT00000000 into readable time
"""
weekchart = {0:"Monday", 1:"Tuesday", 2:"Wednesday", 3:"Thursday", 4:"Friday",5:"Saturday", 6:"Sunday"}
readable_time = ""
separator_slash = "/"
separator_colon = ":"
space = " "
year = input_date[:4]
month = input_date[4:6]
date = input_date[6:8]
hour = input_date[9:11]
minute = input_date[11:13]
day = datetime.datetime(int(year), int(month), int(date), 0, 0, 0, 0).weekday()
return ('{:9s} {}/{}/{} {}:{}'.format(weekchart.get(day), month, date, year, hour, minute))
def get_fixtures():
"""
Gets the next 5 fixtures according to date
"""
printable_string ="```"
todays_date = time.strftime("%Y%m%d")
count = 1
with open('fixtures.csv','rt') as csvfile:
content = csv.reader(csvfile, delimiter=',')
for row in content:
date = row[0]
summary = row[1]
if date[:8] > todays_date:
printable_string += get_readable_time(date) + " " + get_home_away(summary) + " " + summary.replace("Liverpool","").replace(" v ","").strip() + "\n"
if count == config.number_of_fixtures:
printable_string += "```"
return printable_string
else:
count = count + 1
def get_home_away(summary):
"""
Tells if it's a home or an away fixture
"""
if summary.startswith('Liverpool'):
return "home"
else:
return "away"
def ten_games(*team):
"""
Get the results of the last 10 games for EPL Teams from the BBC Website
"""
if not team:
team = "Liverpool"
else:
team = team[0]
url = "http://www.bbc.com/sport/football/premier-league/table"
html = urllib.request.urlopen(url).read()
bs = BeautifulSoup(html, "html.parser")
tables = bs.findChildren('table')
my_table = tables[0]
rows = my_table.findChildren(['tr'])
printable_results = "```"
for row in rows:
if row.find('ol'):
team_name = row.find('td', class_="team-name")
if team.lower() in team_name.string.lower():
ten_games = row.find('ol').findChildren(['li'])
for game in ten_games:
printable_results += game.get('title') + "\n"
printable_results += "```"
print(printable_results)
# return printable_results
def team_form():
"""
Get the results of the last 10 games for EPL Teams from the BBC Website
"""
url = "http://www.bbc.com/sport/football/premier-league/table"
html = urllib.request.urlopen(url).read()
bs = BeautifulSoup(html, "html.parser")
tables = bs.findChildren('table')
my_table = tables[0]
rows = my_table.findChildren(['tr'])
position = 1
printable_form = "```"
for row in rows:
if row.find('ol'):
team_name = row.find('td', class_="team-name")
print(team_name)
ten_games = row.find('ol').findChildren(['li'])
printable_form += str(position).rjust(3) + " " + str(team_name.text.ljust(23))
for game in ten_games:
printable_form += game.string[0] + " "
printable_form += "\n"
position = position + 1
printable_form += "```"
# return printable_form
print(printable_form)
|
mit
| 744,983,116,459,146,000
| 29.71875
| 165
| 0.580671
| false
| 3.564177
| false
| false
| false
|
richardcornish/smsweather
|
fabfile.py
|
1
|
2185
|
from fabric import task
from django.utils.termcolors import colorize
# 1. Local: ssh-add ~/.ssh/aws.pem
# 2. Local: Edit hosts, repo_name, pythonpath (if necessary)
# 3. Remote: Copy .env to to {code_dir}/.env:
hosts = [{
'host': 'ec2-3-89-247-193.compute-1.amazonaws.com',
'user': 'ubuntu',
}]
repo_name = 'emojiweather'
pythonpath = repo_name
service_name = repo_name
code_dir = f'/home/ubuntu/{repo_name}'
@task
def update(c):
print(colorize('\nUpdating code...', fg='white'))
c.run(f'cd {code_dir} && git pull origin master')
@task
def install(c):
print(colorize('\nInstalling dependencies...', fg='white'))
c.run(f'cd {code_dir} && source env/bin/activate && pip install -r requirements.txt')
@task
def migrate(c):
print(colorize('\nMigrating database...', fg='white'))
c.inline_ssh_env = True
c.run(f'source {code_dir}/.env && cd {code_dir} && source env/bin/activate && python {pythonpath}/manage.py migrate --noinput', env={'DEBUG': '$DEBUG', 'DATABASE_PASSWORD': '$DATABASE_PASSWORD'})
@task
def collect(c):
print(colorize('\nCopying static files...', fg='white'))
c.run(f'cd {code_dir} && source env/bin/activate && python {pythonpath}/manage.py collectstatic --noinput')
@task
def clear(c):
print(colorize('\nDeleting sessions...', fg='white'))
c.inline_ssh_env = True
c.run(f'source {code_dir}/.env && cd {code_dir} && source env/bin/activate && python {pythonpath}/manage.py clearsessions', env={'DEBUG': '$DEBUG', 'DATABASE_PASSWORD': '$DATABASE_PASSWORD'})
@task
def restart(c):
print(colorize('\nRestarting web server...\n', fg='white'))
c.run(f'sudo systemctl restart {service_name}')
c.run(f'sudo systemctl status {service_name}')
print('')
c.run('sudo systemctl restart nginx')
c.run('sudo systemctl status nginx')
@task(hosts=hosts)
def deploy(c):
print(colorize('\nStarting deploy... 👌', fg='green'))
try:
update(c)
install(c)
migrate(c)
collect(c)
# clear(c)
restart(c)
print(colorize('\nDeploy succeeded 🎉', fg='green'))
except:
print(colorize('\nDeploy failed ❌', fg='red'))
|
bsd-3-clause
| 7,770,267,891,711,363,000
| 26.910256
| 199
| 0.637115
| false
| 3.159652
| false
| false
| false
|
espenhgn/hybridLFPy
|
examples/example_microcircuit_params_lognormalweights.py
|
1
|
38825
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Modified parameters file for the Hybrid LFP scheme, applying the methodology
with the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
'''
import numpy as np
import os
import json
from mpi4py import MPI # this is needed to initialize other classes correctly
import multiprocessing as mp # to facilitate OpenMP parallelization w. NEST
# if MPI.SIZE == 1
###################################
# Initialization of MPI stuff #
###################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
####################################
# HELPER FUNCTIONS #
####################################
def flattenlist(lst): return sum(sum(lst, []), [])
####################################
# SPATIAL CONNECTIVITY EXTRACTION #
####################################
'''
Include functions that extract information from binzegger.json here
'''
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']):
'''
Extract frequency of occurrences of those cell types that are modeled.
The data set contains cell types that are not modeled (TCs etc.)
The returned percentages are renormalized onto modeled cell-types,
i.e. they sum up to 1
'''
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
occurr = []
for cell_type in y:
occurr += [data['data'][cell_type]['occurrence']]
return list(np.array(occurr) / np.sum(occurr))
def get_L_yXL(fname, y, x_in_X, L):
'''
compute the layer specificity, defined as:
::
L_yXL = k_yXL / k_yX
'''
def _get_L_yXL_per_yXL(fname, x_in_X, X_index,
y, layer):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# Get number of synapses
if layer in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
# init variables
k_yXL = 0
k_yX = 0
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][layer][x] / 100.
k_yL = data['data'][y]['syn_dict'][layer]['number of synapses per neuron']
k_yXL += p_yxL * k_yL
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
if k_yXL != 0.:
return k_yXL / k_yX
else:
return 0.
else:
return 0.
# init dict
L_yXL = {}
# iterate over postsynaptic cell types
for y_value in y:
# container
data = np.zeros((len(L), len(x_in_X)))
# iterate over lamina
for i, Li in enumerate(L):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
data[i][j] = _get_L_yXL_per_yXL(fname, x_in_X,
X_index=j,
y=y_value,
layer=Li)
L_yXL[y_value] = data
return L_yXL
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y):
'''
compute the cell type specificity, defined as:
::
T_yX = K_yX / K_YX
= F_y * k_yX / sum_y(F_y*k_yX)
'''
def _get_k_yX_mul_F_y(y, y_index, X_index):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# init variables
k_yX = 0.
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
return k_yX * F_y[y_index]
# container
T_yX = np.zeros((len(y), len(x_in_X)))
# iterate over postsynaptic cell types
for i, y_value in enumerate(y):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
k_yX_mul_F_y = 0
for k, yy in enumerate(sum(y_in_Y, [])):
if y_value in yy:
for yy_value in yy:
ii = np.where(np.array(y) == yy_value)[0][0]
k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j)
if k_yX_mul_F_y != 0:
T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y
return T_yX
class general_params(object):
'''class defining general model parameters'''
def __init__(self):
'''class defining general model parameters'''
####################################
# REASON FOR THIS SIMULATION #
####################################
self.reason = 'Default Potjans model with spontaneous activity'
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
####################################
# MAIN SIMULATION CONTROL #
####################################
# simulation step size
self.dt = 0.1
# simulation start
self.tstart = 0
# simulation stop
self.tstop = 1200
####################################
# OUTPUT LOCATIONS #
####################################
# folder for all simulation output and scripts
# using the cluster's dedicated SCRATCH area
if 'SCRATCH' in os.environ and os.path.isdir(
os.path.join(os.environ['SCRATCH'], os.environ['USER'])):
self.savefolder = os.path.join(
os.environ['SCRATCH'],
os.environ['USER'],
'hybrid_model',
'simulation_output_example_microcircuit_lognormalsweights')
# LOCALLY
else:
self.savefolder = 'simulation_output_example_microcircuit_lognormweights'
# folder for simulation scripts
self.sim_scripts_path = os.path.join(self.savefolder, 'sim_scripts')
# folder for each individual cell's output
self.cells_path = os.path.join(self.savefolder, 'cells')
# folder for figures
self.figures_path = os.path.join(self.savefolder, 'figures')
# folder for population resolved output signals
self.populations_path = os.path.join(self.savefolder, 'populations')
# folder for raw nest output files
self.raw_nest_output_path = os.path.join(self.savefolder,
'raw_nest_output')
# folder for processed nest output files
self.spike_output_path = os.path.join(self.savefolder,
'processed_nest_output')
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# POPULATIONS #
####################################
# Number of populations
self.Npops = 9
# number of neurons in each population (unscaled)
self.full_scale_num_neurons = [[20683, # layer 23 e
5834], # layer 23 i
[21915, # layer 4 e
5479], # layer 4 i
[4850, # layer 5 e
1065], # layer 5 i
[14395, # layer 6 e
2948]] # layer 6 i
# Number of thalamic neurons/ point processes
self.n_thal = 902
# population names TODO: rename
self.X = [
'TC',
'L23E',
'L23I',
'L4E',
'L4I',
'L5E',
'L5I',
'L6E',
'L6I']
self.Y = self.X[1:]
# TC and cortical population sizes in one list TODO: rename
self.N_X = np.array([self.n_thal] +
flattenlist([self.full_scale_num_neurons]))
####################################
# CONNECTIVITY #
####################################
# intra-cortical connection probabilities between populations
# 23e 23i 4e 4i 5e 5i 6e 6i
self.conn_probs = np.array([[0.1009, 0.1689, 0.0437, 0.0818,
0.0323, 0., 0.0076, 0.], # 23e
[0.1346, 0.1371, 0.0316, 0.0515,
0.0755, 0., 0.0042, 0.], # 23i
[0.0077, 0.0059, 0.0497, 0.135,
0.0067, 0.0003, 0.0453, 0.], # 4e
[0.0691, 0.0029, 0.0794, 0.1597,
0.0033, 0., 0.1057, 0.], # 4i
[0.1004, 0.0622, 0.0505, 0.0057,
0.0831, 0.3726, 0.0204, 0.], # 5e
[0.0548, 0.0269, 0.0257, 0.0022,
0.06, 0.3158, 0.0086, 0.], # 5i
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572,
0.0197, 0.0396, 0.2252], # 6e
[0.0364, 0.001, 0.0034, 0.0005,
0.0277, 0.008, 0.0658, 0.1443]]) # 6i
self.conn_probs *= 1.0
# connection probabilities for thalamic input
self.C_th = [[0.0, # layer 23 e
0.0], # layer 23 i
[0.0983, # layer 4 e
0.0619], # layer 4 i
[0.0, # layer 5 e
0.0], # layer 5 i
[0.0512, # layer 6 e
0.0196]] # layer 6 i
# full connection probabilities including TC connections
self.C_YX = np.c_[flattenlist([self.C_th]), self.conn_probs]
####################################
# CONNECTION PROPERTIES #
####################################
# mean EPSP amplitude (mV) for all connections except L4e->L23e
self.PSP_e = 0.15
# mean EPSP amplitude (mv) for L4e->L23e connections
# FIX POLISH NOTATION !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.PSP_23e_4e = self.PSP_e * 2
# standard deviation of PSC amplitudes relative to mean PSC amplitudes
# this is sigma/mu in probability distribution
# Gaussian (lognormal_weights = False): mu is mean, sigma is standard deviation
# Lognormal (lognormal_weights = False): mean and stdev can be
# calculated from mu and sigma
self.PSC_rel_sd = 3.0
# IPSP amplitude relative to EPSP amplitude
self.g = -4.
# set L4i ->L4e stronger in order to get rid of 84 Hz peak
self.g_4e_4i = self.g * 1.15
# Whether to use lognormal weights or not
self.lognormal_weights = True
# mean dendritic delays for excitatory and inhibitory transmission (ms)
self.delays = [1.5, 0.75]
# standard deviation relative to mean delays
self.delay_rel_sd = 0.5
####################################
# CELL-TYPE PARAMETERS #
####################################
# Note that these parameters are only relevant for the point-neuron network in case
# one wants to calculate depth-resolved cell-type specific input
# currents
# point to .json connectivity table file
self.connectivity_table = 'binzegger_connectivity_table.json'
# list of cell type names used in this script
# names of every post-syn pop layer
self.y_in_Y = [
[['p23'], ['b23', 'nb23']],
[['p4', 'ss4(L23)', 'ss4(L4)'], ['b4', 'nb4']],
[['p5(L23)', 'p5(L56)'], ['b5', 'nb5']],
[['p6(L4)', 'p6(L56)'], ['b6', 'nb6']]]
self.y = flattenlist(self.y_in_Y)
# need presynaptic cell type to population mapping
self.x_in_X = [['TCs', 'TCn']] + sum(self.y_in_Y, [])
# map the pre-synaptic populations to the post-syn populations
self.mapping_Yy = list(zip(
['L23E', 'L23I', 'L23I',
'L4E', 'L4E', 'L4E', 'L4I', 'L4I',
'L5E', 'L5E', 'L5I', 'L5I',
'L6E', 'L6E', 'L6I', 'L6I'],
self.y))
# Frequency of occurrence of each cell type (F_y); 1-d array
self.F_y = get_F_y(fname=self.connectivity_table, y=self.y)
# Relative frequency of occurrence of each cell type within its
# population (F_{y,Y})
self.F_yY = [[get_F_y(fname=self.connectivity_table, y=y)
for y in Y] for Y in self.y_in_Y]
# Number of neurons of each cell type (N_y); 1-d array
self.N_y = np.array([self.full_scale_num_neurons[layer][pop] * self.F_yY[layer][pop][k]
for layer, array in enumerate(self.y_in_Y)
for pop, cell_types in enumerate(array)
for k, _ in enumerate(cell_types)]).astype(int)
# compute the number of synapses as in Potjans&Diesmann 2012
K_YX = np.zeros(self.C_YX.shape)
for i in range(K_YX.shape[1]):
K_YX[:, i] = (np.log(1. - self.C_YX[:, i]) /
np.log(1. - 1. / (self.N_X[1:] *
self.N_X[i])))
# spatial connection probabilites on each subpopulation
# Each key must correspond to a subpopulation like 'L23E' used everywhere else,
# each array maps thalamic and intracortical connections.
# First column is thalamic connections, and the rest intracortical,
# ordered like 'L23E', 'L23I' etc., first row is normalised probability of
# connection withing L1, L2, etc.;
self.L_yXL = get_L_yXL(fname=self.connectivity_table,
y=self.y,
x_in_X=self.x_in_X,
L=['1', '23', '4', '5', '6'])
# compute the cell type specificity
self.T_yX = get_T_yX(fname=self.connectivity_table, y=self.y,
y_in_Y=self.y_in_Y, x_in_X=self.x_in_X,
F_y=self.F_y)
Y, y = list(zip(*self.mapping_Yy))
# assess relative distribution of synapses for a given celltype
self.K_yXL = {}
#self.T_yX = {}
for i, (Y, y) in enumerate(self.mapping_Yy):
# fill in K_yXL (layer specific connectivity)
self.K_yXL[y] = (self.T_yX[i, ] *
K_YX[np.array(self.Y) == Y, ] *
self.L_yXL[y]).astype(int)
# number of incoming connections per cell type per layer per cell
self.k_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.k_yXL.update({y: (1. * self.K_yXL[y]).astype(int) // N_y})
# calculate corresponding connectivity to K_yXL
self.C_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.C_yXL.update(
{y: 1. - (1. - 1. / (N_y * self.N_X))**self.K_yXL[y]})
##########################################################################
class point_neuron_network_params(general_params):
'''class point-neuron network parameters'''
def __init__(self):
'''class point-neuron network parameters'''
# inherit general params
general_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
# use same number of threads as MPI COMM.size() for parallel jobs
# else the number of processors for serial jobs
if SIZE > 1:
self.total_num_virtual_procs = SIZE
else:
self.total_num_virtual_procs = mp.cpu_count()
####################################
# RNG PROPERTIES #
####################################
# offset for RNGs
self.seed_offset = 45
####################################
# RECORDING PARAMETERS #
####################################
self.overwrite_existing_files = True
# recording can either be done from a fraction of neurons in each
# population or from a fixed number
# whether to record spikes from a fixed fraction of neurons in each
# population.
self.record_fraction_neurons_spikes = True
if self.record_fraction_neurons_spikes:
self.frac_rec_spikes = 1.
else:
self.n_rec_spikes = 100
# whether to record membrane potentials from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_voltage = False
if self.record_fraction_neurons_voltage:
self.frac_rec_voltage = 0.1
else:
self.n_rec_voltage = 0
# whether to record weighted input spikes from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_input_spikes = False
if self.record_fraction_neurons_input_spikes:
self.frac_rec_input_spikes = 0.1
else:
self.n_rec_input_spikes = 0
# number of recorded neurons for depth resolved input currents
self.n_rec_depth_resolved_input = 0
# NESTio recording format
self.record_to = 'ascii'
# whether to record thalamic spikes
self.record_thalamic_spikes = True
# global ID file name
self.GID_filename = 'population_GIDs.dat'
# readout global ID file name
self.readout_GID_filename = 'readout_GIDs.dat'
# stem for spike detector file labels
self.spike_recorder_label = 'spikes_'
# stem for voltmeter file labels
self.voltmeter_label = 'voltages_'
# stem for thalamic spike detector file labels
self.th_spike_recorder_label = 'spikes_0'
# stem for in-degree file labels
self.in_degree_label = 'in_degrees_'
# stem for file labels for in-degree from thalamus
self.th_in_degree_label = 'in_degrees_th_'
# stem for weighted input spikes labels
self.weighted_input_spikes_label = 'weighted_input_spikes_'
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING #
####################################
# scaling parameter for population sizes
self.area = 1.0
# preserve indegrees when downscaling
self.preserve_K = False
####################################
# SINGLE NEURON PARAMS #
####################################
# neuron model
self.neuron_model = '/iaf_psc_exp'
# mean of initial membrane potential (mV)
self.Vm0_mean = -58.0
# std of initial membrane potential (mV)
self.Vm0_std = 10.0
# mean of threshold potential (mV)
self.V_th_mean = -50.
# std of threshold potential (mV)
self.V_th_std = 1E-8 # nest::NormalParameter: std > 0 required.
self.model_params = {'tau_m': 10., # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
# absolute refractory period (ms)
't_ref': 2.,
# resting membrane potential (mV)
'E_L': -65.,
'V_th': self.V_th_mean, # spike threshold (mV)
'C_m': 250., # membrane capacitance (pF)
'V_reset': -65. # reset potential (mV)
}
####################################
# EXTERNAL INPUTS #
####################################
# number of external inputs (Potjans-Diesmann model 2012)
self.K_bg = [[1600, # layer 23 e
1500], # layer 23 i
[2100, # layer 4 e
1900], # layer 4 i
[2000, # layer 5 e
1900], # layer 5 i
[2900, # layer 6 e
2100]] # layer 6 i
# rate of Poisson input at each external input synapse (spikess)
self.bg_rate = 0.
# rate of equivalent input used for DC amplitude calculation,
# set to zero if self.bg_rate > 0.
self.bg_rate_dc = 8.
# DC amplitude at each external input synapse (pA)
# to each neuron via 'dc_amplitude = tau_syn_ex/1000*bg_rate*PSC_ext'
self.dc_amplitude = self.model_params["tau_syn_ex"] * \
self.bg_rate_dc * self._compute_J()
# mean EPSP amplitude (mV) for thalamic and non-thalamic external input
# spikes
self.PSP_ext = 0.15
# mean delay of thalamic input (ms)
self.delay_th = 1.5
# standard deviation relative to mean delay of thalamic input
self.delay_th_rel_sd = 0.5
####################################
# THALAMIC INPUT VERSIONS #
####################################
# off-option for start of thalamic input versions
self.off = 100. * self.tstop
# poisson_generator (pure Poisson input)
self.th_poisson_start = self.off # onset (ms)
self.th_poisson_duration = 10. # duration (ms)
self.th_poisson_rate = 120. # rate (spikess)
# spike_generator
# Note: This can be used with a large Gaussian delay distribution in order to mimic a
# Gaussian pulse packet which is different for each thalamic neuron
self.th_spike_times = [self.off] # time of the thalamic pulses (ms)
# create n_thal spikegenerator nodes connected to each respective
# postsynaptic parrot_neuron. Expected format is a len(self.n_thal) list
# of lists of activation times.
# Turn activation off by setting it as [[] for i in range(self.n_thal)]
self.th_spike_generator_times = [[] for i in range(self.n_thal)]
# sinusoidal_poisson_generator (oscillatory Poisson input)
self.th_sin_start = self.off # onset (ms)
self.th_sin_duration = 5000. # duration (ms)
self.th_sin_mean_rate = 30. # mean rate (spikess)
# rate modulation amplitude (spikess)
self.th_sin_fluc_rate = 30.
# frequency of the rate modulation (Hz)
self.th_sin_freq = 15.
# phase of rate modulation (deg)
self.th_sin_phase = 0.
# Gaussian_pulse_packages
self.th_gauss_times = [self.off] # package center times
self.th_gauss_num_spikes_per_packet = 1 # number of spikes per packet
self.th_gauss_sd = 5. # std of Gaussian pulse packet (ms^2)
####################################
# SPATIAL ORGANIZATION #
####################################
# needed for spatially resolved input currents
# number of layers TODO: find a better solution for that
self.num_input_layers = 5
def _compute_J(self):
'''
Compute the current amplitude corresponding to the exponential
synapse model PSP amplitude
Derivation using sympy:
::
from sympy import *
#define symbols
t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax')
#assume zero delay, t >= 0
#using eq. 8.10 in Sterrat et al
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V = %s' % V
#find time of V == Vmax
dVdt = diff(V, t)
print 'dVdt = %s' % dVdt
[t] = solve(dVdt, t)
print 't(t@dVdT==Vmax) = %s' % t
#solve for Is at time of maxima
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V(%s) = %s' % (t, V)
[Is] = solve(V-Vmax, Is)
print 'Is = %s' % Is
resulting in:
::
Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts))
- exp(ts*log(ts/tm)/(tm - ts))))
'''
# LIF params
tm = self.model_params['tau_m']
Cm = self.model_params['C_m']
# synapse
ts = self.model_params['tau_syn_ex']
Vmax = self.PSP_e
# max current amplitude
J = Cm * Vmax * (-tm + ts) / (tm * ts * (np.exp(tm * np.log(ts / tm) /
(tm - ts)) - np.exp(ts * np.log(ts / tm) / (tm - ts))))
# unit conversion pF*mV -> nA
J *= 1E-3
return J
class multicompartment_params(point_neuron_network_params):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and population.DummyNetwork
This class do not take any kwargs
'''
def __init__(self):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and population.DummyNetwork
This class do not take any kwargs
'''
# initialize parent classes
point_neuron_network_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
#######################################
# PARAMETERS FOR LOADING NEST RESULTS #
#######################################
# parameters for class population.DummyNetwork class
self.networkSimParams = {
'simtime': self.tstop - self.tstart,
'dt': self.dt,
'spike_output_path': self.spike_output_path,
'label': 'population_spikes',
'ext': 'dat',
'GIDs': self.get_GIDs(),
'X': self.X,
'skiprows': 0,
}
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING (VOLUME not density) #
####################################
self.SCALING = 1.0
####################################
# MORPHOLOGIES #
####################################
# list of morphology files with default location, testing = True
# will point to simplified morphologies
testing = True
if testing:
self.PATH_m_y = os.path.join('morphologies', 'ballnsticks')
self.m_y = [Y + '_' + y + '.hoc' for Y, y in self.mapping_Yy]
else:
self.PATH_m_y = os.path.join('morphologies', 'stretched')
self.m_y = [
'L23E_oi24rpy1.hoc',
'L23I_oi38lbc1.hoc',
'L23I_oi38lbc1.hoc',
'L4E_53rpy1.hoc',
'L4E_j7_L4stellate.hoc',
'L4E_j7_L4stellate.hoc',
'L4I_oi26rbc1.hoc',
'L4I_oi26rbc1.hoc',
'L5E_oi15rpy4.hoc',
'L5E_j4a.hoc',
'L5I_oi15rbc1.hoc',
'L5I_oi15rbc1.hoc',
'L6E_51-2a.CNG.hoc',
'L6E_oi15rpy4.hoc',
'L6I_oi15rbc1.hoc',
'L6I_oi15rbc1.hoc',
]
####################################
# CONNECTION WEIGHTS #
####################################
# compute the synapse weight from fundamentals of exp synapse LIF
# neuron
self.J = self._compute_J()
# set up matrix containing the synapse weights between any population X
# and population Y, including exceptions for certain connections
J_YX = np.zeros(self.C_YX.shape)
J_YX += self.J
J_YX[:, 2::2] *= self.g
if hasattr(self, 'PSP_23e_4e'):
J_YX[0, 3] *= self.PSP_23e_4e / self.PSP_e
if hasattr(self, 'g_4e_4i'):
J_YX[2, 4] *= self.g_4e_4i / self.g
# extrapolate weights between populations X and
# cell type y in population Y
self.J_yX = {}
for Y, y in self.mapping_Yy:
[i] = np.where(np.array(self.Y) == Y)[0]
self.J_yX.update({y: J_YX[i, ]})
####################################
# GEOMETRY OF CORTICAL COLUMN #
####################################
# set the boundaries of each layer, L1->L6,
# and mean depth of soma layers
self.layerBoundaries = np.array([[0.0, -81.6],
[-81.6, -587.1],
[-587.1, -922.2],
[-922.2, -1170.0],
[-1170.0, -1491.7]])
# assess depth of each 16 subpopulation
self.depths = self._calcDepths()
# make a nice structure with data for each subpopulation
self.y_zip_list = list(zip(self.y, self.m_y,
self.depths, self.N_y))
##############################################################
# POPULATION PARAMS (cells, population, synapses, electrode) #
##############################################################
# Global LFPy.Cell-parameters, by default shared between populations
# Some passive parameters will not be fully consistent with LIF params
self.cellParams = {
'v_init': self.model_params['E_L'],
'cm': 1.0,
'Ra': 150,
'passive': True,
'passive_parameters': dict(g_pas=1. / (self.model_params['tau_m'] * 1E3), # assume cm=1
e_pas=self.model_params['E_L']),
'nsegs_method': 'lambda_f',
'lambda_f': 100,
'dt': self.dt,
'tstart': self.tstart,
'tstop': self.tstop,
'verbose': False,
}
# layer specific LFPy.Cell-parameters as nested dictionary
self.yCellParams = self._yCellParams()
# set the axis of which each cell type y is randomly rotated,
# SS types and INs are rotated around both x- and z-axis
# in the population class, while P-types are
# only rotated around the z-axis
self.rand_rot_axis = {}
for y, _, _, _ in self.y_zip_list:
# identify pyramidal cell populations:
if y.rfind('p') >= 0:
self.rand_rot_axis.update({y: ['z']})
else:
self.rand_rot_axis.update({y: ['x', 'z']})
# additional simulation kwargs, see LFPy.Cell.simulate() docstring
self.simulationParams = {'rec_imem': True}
# a dict setting the number of cells N_y and geometry
# of cell type population y
self.populationParams = {}
for y, _, depth, N_y in self.y_zip_list:
self.populationParams.update({
y: {
'number': int(N_y * self.SCALING),
'radius': np.sqrt(1000**2 / np.pi),
'z_min': depth - 25,
'z_max': depth + 25,
'min_cell_interdist': 1.,
'min_r': [[-1E199, -1600, -1550, 1E99], [0, 0, 10, 10]]
}
})
# Set up cell type specific synapse parameters in terms of synapse model
# and synapse locations
self.synParams = {}
for y in self.y:
if y.rfind('p') >= 0:
# pyramidal types have apical dendrites
section = ['apic', 'dend']
else:
# other cell types do not
section = ['dend']
self.synParams.update({
y: {
'syntype': 'ExpSynI', # current based exponential synapse
'section': section,
# 'tau' : self.model_params["tau_syn_ex"],
},
})
# set up dictionary of synapse time constants specific to each
# postsynaptic cell type and presynaptic population
self.tau_yX = {}
for y in self.y:
self.tau_yX.update({
y: [self.model_params["tau_syn_in"] if 'I' in X else
self.model_params["tau_syn_ex"] for X in self.X]
})
# synaptic delay parameters, loc and scale is mean and std for every
# network population, negative values will be removed
self.synDelayLoc, self.synDelayScale = self._synDelayParams()
# Define electrode geometry corresponding to a laminar electrode,
# where contact points have a radius r, surface normal vectors N,
# and LFP calculated as the average LFP in n random points on
# each contact. Recording electrode emulate NeuroNexus array,
# contact 0 is superficial
self.electrodeParams = {
# contact locations:
'x': np.zeros(16),
'y': np.zeros(16),
'z': -np.mgrid[0:16] * 100,
# extracellular conductivity:
'sigma': 0.3,
# contact surface normals, radius, n-point averaging
'N': np.array([[1, 0, 0]] * 16),
'r': 7.5,
'n': 50,
'seedvalue': None,
# dendrite line sources, soma sphere source (Linden2014)
'method': 'root_as_point',
}
# parameters for LFPykit.LaminarCurrentSourceDensity
self.CSDParams = dict(
z=np.array([[-(i + 1) * 100, -i * 100] for i in range(16)]) + 50.,
r=np.ones(16) * np.sqrt(1000**2 / np.pi) # same as pop radius
)
# these cell attributes variables will be saved to file
self.savelist = []
#########################################
# MISC #
#########################################
# time resolution of downsampled data in ms
self.dt_output = 1.
# set fraction of neurons from population which LFP output is stored
self.recordSingleContribFrac = 0.
def get_GIDs(self):
GIDs = {}
ind = 1
for i, (X, N_X) in enumerate(zip(self.X, self.N_X)):
GIDs[X] = [ind, N_X]
ind += N_X
return GIDs
def _synDelayParams(self):
'''
set up the detailed synaptic delay parameters,
loc is mean delay,
scale is std with low bound cutoff,
assumes numpy.random.normal is used later
'''
delays = {}
# mean delays
loc = np.zeros((len(self.y), len(self.X)))
loc[:, 0] = self.delays[0]
loc[:, 1::2] = self.delays[0]
loc[:, 2::2] = self.delays[1]
# standard deviations
scale = loc * self.delay_rel_sd
# prepare output
delay_loc = {}
for i, y in enumerate(self.y):
delay_loc.update({y: loc[i]})
delay_scale = {}
for i, y in enumerate(self.y):
delay_scale.update({y: scale[i]})
return delay_loc, delay_scale
def _calcDepths(self):
'''
return the cortical depth of each subpopulation
'''
depths = self.layerBoundaries.mean(axis=1)[1:]
depth_y = []
for y in self.y:
if y in ['p23', 'b23', 'nb23']:
depth_y = np.r_[depth_y, depths[0]]
elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']:
depth_y = np.r_[depth_y, depths[1]]
elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']:
depth_y = np.r_[depth_y, depths[2]]
elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']:
depth_y = np.r_[depth_y, depths[3]]
else:
raise Exception('Error, revise parameters')
return depth_y
def _yCellParams(self):
'''
Return dict with parameters for each population.
The main operation is filling in cell type specific morphology
'''
# cell type specific parameters going into LFPy.Cell
yCellParams = {}
for layer, morpho, _, _ in self.y_zip_list:
yCellParams.update({layer: self.cellParams.copy()})
yCellParams[layer].update({
'morphology': os.path.join(self.PATH_m_y, morpho),
})
return yCellParams
if __name__ == '__main__':
params = multicompartment_params()
print(dir(params))
|
gpl-3.0
| -5,546,768,391,567,412,000
| 35.082714
| 111
| 0.457051
| false
| 3.895355
| false
| false
| false
|
MichSchli/QuestionAnsweringGCN
|
example_reader/graph_reader/graph_converter.py
|
1
|
2831
|
from example_reader.graph_reader.edge_type_utils import EdgeTypeUtils
from example_reader.graph_reader.graph import Graph
import numpy as np
class GraphConverter:
hypergraph_interface = None
edge_type_utils = None
def __init__(self, hypergraph_interface):
self.hypergraph_interface = hypergraph_interface
self.edge_type_utils = EdgeTypeUtils()
def get_neighborhood_graph(self, entities):
hypergraph = self.hypergraph_interface.get_neighborhood_graph(entities)
graph = Graph()
graph.centroid_indexes = hypergraph.centroid_indexes
graph.entity_centroid_paths = hypergraph.centroid_paths
graph.vertices = np.concatenate((hypergraph.entity_vertices, hypergraph.event_vertices))
graph.entity_vertex_indexes = np.arange(hypergraph.entity_vertices.shape[0], dtype=np.int32)
graph.update_general_vertex_to_entity_index_map()
#graph.nearby_centroid_map = []
#for vertex in hypergraph.entity_vertices:
# graph.nearby_centroid_map.append(hypergraph.get_nearby_centroids(vertex))
#for vertex in hypergraph.event_vertices:
# graph.nearby_centroid_map.append(hypergraph.get_nearby_centroids(vertex))
graph.edges = np.concatenate((hypergraph.entity_to_entity_edges,
hypergraph.event_to_entity_edges,
hypergraph.entity_to_event_edges))
graph.edge_types = [np.array([], dtype=np.int32) for _ in range(self.edge_type_utils.count_types())]
graph.edge_types[0] = np.arange(hypergraph.entity_to_entity_edges.shape[0], dtype=np.int32)
acc = hypergraph.entity_to_entity_edges.shape[0]
graph.edge_types[1] = np.arange(hypergraph.event_to_entity_edges.shape[0], dtype=np.int32) + acc
acc += hypergraph.event_to_entity_edges.shape[0]
graph.edge_types[2] = np.arange(hypergraph.entity_to_event_edges.shape[0], dtype=np.int32) + acc
vertex_name_map = {hypergraph.to_index(k):v for k,v in hypergraph.name_map.feature_map.items()}
graph.set_index_to_name_map(vertex_name_map)
entity_vertex_types = np.array([[1,0,0,0,0,0] for _ in range(hypergraph.entity_vertices.shape[0])], dtype=np.float32)
event_vertex_types = np.array([[0,1,0,0,0,0] for _ in range(hypergraph.event_vertices.shape[0])], dtype=np.float32)
if entity_vertex_types.shape[0] == 0:
entity_vertex_types = np.empty((0,6), dtype=np.float32)
if event_vertex_types.shape[0] == 0:
event_vertex_types = np.empty((0,6), dtype=np.float32)
graph.vertex_types = np.concatenate((entity_vertex_types, event_vertex_types))
graph.nearby_centroid_map = [hypergraph.nearby_centroid_map[entity] for entity in graph.vertices]
return graph
|
mit
| 1,603,126,252,323,630,600
| 48.684211
| 125
| 0.671494
| false
| 3.427361
| false
| false
| false
|
the-virtual-brain/tvb-hpc
|
tvb_hpc/rng.py
|
1
|
3024
|
# Copyright 2018 TVB-HPC contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SIMD friendly random number generation.
Currently uses only Philox4x64 which assumes 128-bit, but for GPU usage,
switch to a 32-bit. See include/Random123/index.html for details.
"""
import numpy as np
import ctypes
from .compiler import CppCompiler, Spec
from .utils import include_dir
rng_template = """
#include <Random123/philox.h>
#include <Random123/boxmuller.hpp>
extern "C" {
void tvb_rng(long long int seed, unsigned int nout,
float * __restrict out) {
// TODO other variants might vectorize better?
%(loop_pragma)s
for(unsigned int i=0; i<(nout/4); ++i) {
philox4x32_ctr_t ctr;
philox4x32_key_t key;
ctr.v[0] = seed + 4*i;
ctr.v[1] = seed + 4*i + 1;
ctr.v[2] = seed + 4*i + 2;
ctr.v[3] = seed + 4*i + 3;
philox4x32_ctr_t result = philox4x32(ctr, key);
r123::float2 normal = r123::boxmuller(result.v[0], result.v[1]);
out[i*4 + 0] = normal.x;
out[i*4 + 1] = normal.y;
r123::float2 normal2 = r123::boxmuller(result.v[2], result.v[3]);
out[i*4 + 2] = normal2.x;
out[i*4 + 3] = normal2.y;
}
}
}
"""
class RNG:
def __init__(self, comp: CppCompiler=None):
self.comp = comp or CppCompiler() # type: Compiler
# TODO consider loopy support for calling user functions / preamble
def generate_c(self, spec: Spec=None):
spec = spec or Spec()
self.comp.cflags += ['-I' + include_dir]
loop_pragma = ''
if spec.openmp:
loop_pragma = '#pragma omp parallel for'
decls = []
# decls += self.generate_alignments(['out'], spec)
return rng_template % {
'loop_pragma': loop_pragma,
'decls': '\n '.join(decls),
}
def build(self, spec):
self.dll = self.comp.build(self.generate_c(spec))
self.fn = self.dll.tvb_rng
self.fn.restype = None
self.fn.argtypes = [ctypes.c_longlong,
ctypes.c_uint,
ctypes.POINTER(ctypes.c_float)]
def fill(self, array, seed=42):
assert array.dtype == np.float32
self.fn(
self.fn.argtypes[0](seed),
self.fn.argtypes[1](array.size),
array.ctypes.data_as(self.fn.argtypes[2])
)
|
apache-2.0
| -6,177,618,747,205,311,000
| 30.5
| 78
| 0.581019
| false
| 3.401575
| false
| false
| false
|
asoc/snakewatch
|
snakewatch/action/Write.py
|
1
|
2980
|
"""
This file is part of snakewatch.
snakewatch is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
snakewatch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with snakewatch. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function, absolute_import, unicode_literals, division
import os
from ._ConfirmAction import ConfirmAction
from ..util import AbortError, ConfigError, ui_print
class WriteAction(ConfirmAction):
"""An Action that returns the line with possible colouring"""
instances = dict()
def __init__(self, cfg, ui_confirm):
self.mode = 'w' if cfg.get('truncate', False) else 'a'
if 'filename' in cfg:
filename = cfg['filename']
if filename.startswith('~'):
filename = os.path.expanduser(filename)
self.filename = os.path.abspath(filename)
super(WriteAction, self).__init__(cfg, ui_confirm, ['filename'])
WriteAction.open_file_instance(self)
@classmethod
def open_file_instance(cls, inst):
try:
file_instances = cls.instances[inst.filename]
except KeyError:
file_instances = list()
cls.instances[inst.filename] = file_instances
if file_instances:
inst.fp = file_instances[0]
if inst.fp.mode != inst.mode:
raise ConfigError('File {} is opened in conflicting modes.'.format(inst.filename))
else:
try:
inst.fp = open(inst.filename, inst.mode)
except (OSError, IOError) as err:
ui_print().error(
'Cannot open {} for writing.'.format(inst.filename),
str(err), sep='\n'
)
raise AbortError()
file_instances.append(inst)
@classmethod
def close_file_instance(cls, inst):
try:
file_instances = cls.instances[inst.filename]
except KeyError:
return
try:
file_instances.remove(inst)
except ValueError:
pass
if not file_instances:
inst.fp.close()
def run_on(self, line):
self.fp.write(line)
self.fp.flush()
os.fsync(self.fp)
return None
def release_resources(self):
WriteAction.close_file_instance(self)
def confirm_message(self):
return 'The file {} will be {}.'.format(
self.filename,
'overwritten' if self.mode == 'w' else 'written to',
)
|
bsd-3-clause
| 2,404,914,814,539,267,600
| 29.721649
| 98
| 0.614094
| false
| 4.28777
| false
| false
| false
|
eddieantonio/statically-typed-python
|
my_hip_site/my_hip_site.py
|
1
|
1932
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from flask import Flask, request
from css import css as _
app = Flask(__name__)
@app.route('/')
def home():
return _('''
<form action="/" method="POST">
<label> <input name=a> </label><br />
<label> ÷ <input name=b> </label><br />
<button type=submit> Divide! </button>
</form>
''')
@app.route('/', methods=['POST'])
def divide_numbers():
a = request.form['a']
b = request.form['b']
answer = a / b
return _('''
<main>{a} ÷ {b} = {answer:.5f}</main>
'''.format(a=a, b=b, answer=answer))
if __name__ == '__main__':
app.run()
|
unlicense
| -7,519,536,901,940,282,000
| 31.711864
| 73
| 0.67513
| false
| 3.67619
| false
| false
| false
|
makefu/bepasty-server
|
bepasty/views/display.py
|
1
|
6026
|
import errno
import time
from flask import current_app, render_template, Markup, url_for
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from pygments import highlight
from pygments.lexers import get_lexer_for_mimetype
from pygments.util import ClassNotFound as NoPygmentsLexer
from ..utils.permissions import *
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.formatters import CustomHtmlFormatter
from ..utils._compat import iteritems
from . import blueprint
from .filelist import file_infos
def rendering_allowed(item_type, item_size, use_pygments, complete):
"""
check if rendering is allowed, checks for:
* whether the item is completely uploaded
* whether the size is within the configured limits for the content-type
"""
if not complete:
return False
if use_pygments:
# if we use pygments, special restrictions apply
item_type = 'HIGHLIGHT_TYPES'
# create a tuple list [(content_type_prefix, max_size), ...] with long prefixes first
ct_size = sorted(iteritems(current_app.config['MAX_RENDER_SIZE']), key=lambda e: len(e[0]), reverse=True)
for ct, size in ct_size:
if item_type.startswith(ct):
return item_size <= size
# there should be one entry with ct == '', so we should never get here:
return False
class DisplayView(MethodView):
def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
with item as item:
complete = item.meta['complete']
if not complete and not may(ADMIN):
error = 'Upload incomplete. Try again later.'
return render_template('error.html', heading=item.meta['filename'], body=error), 409
if item.meta['locked'] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
def read_data(item):
# reading the item for rendering is registered like a download
data = item.data.read(item.data.size, 0)
item.meta['timestamp-download'] = int(time.time())
return data
size = item.meta['size']
ct = item.meta['type']
try:
get_lexer_for_mimetype(ct)
use_pygments = True
ct_pygments = ct
except NoPygmentsLexer:
if ct.startswith('text/'):
# seems like we found a text type not supported by pygments
# use text/plain so we get a display with line numbers
use_pygments = True
ct_pygments = 'text/plain'
else:
use_pygments = False
if rendering_allowed(ct, size, use_pygments, complete):
if ct.startswith('text/x-bepasty-'):
# special bepasty items - must be first, don't feed to pygments
if ct == 'text/x-bepasty-list':
names = read_data(item).decode('utf-8').splitlines()
files = sorted(file_infos(names), key=lambda f: f['filename'])
rendered_content = Markup(render_template('filelist_tableonly.html', files=files))
else:
rendered_content = u"Can't render this content type."
elif ct.startswith('image/'):
src = url_for('bepasty.download', name=name)
rendered_content = Markup(u'<img src="%s" alt="the image" width="800">' % src)
elif ct.startswith('audio/'):
src = url_for('bepasty.download', name=name)
alt_msg = u'html5 audio element not supported by your browser.'
rendered_content = Markup(u'<audio controls src="%s">%s</audio>' % (src, alt_msg))
elif ct.startswith('video/'):
src = url_for('bepasty.download', name=name)
alt_msg = u'html5 video element not supported by your browser.'
rendered_content = Markup(u'<video controls src="%s">%s</video>' % (src, alt_msg))
elif ct in ['application/pdf', 'application/x-pdf', ]:
src = url_for('bepasty.inline', name=name)
link_txt = u'Click to see PDF'
rendered_content = Markup(u'<a href="%s">%s</a>' % (src, link_txt))
elif use_pygments:
text = read_data(item)
# TODO we don't have the coding in metadata
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
# well, it is not utf-8 or ascii, so we can only guess...
text = text.decode('iso-8859-1')
lexer = get_lexer_for_mimetype(ct_pygments)
formatter = CustomHtmlFormatter(linenos='table', lineanchors="L",
lineparagraphs="L", anchorlinenos=True)
rendered_content = Markup(highlight(text, lexer, formatter))
else:
rendered_content = u"Can't render this content type."
else:
if not complete:
rendered_content = u"Rendering not allowed (not complete). Is it still being uploaded?"
else:
rendered_content = u"Rendering not allowed (too big?). Try download"
return render_template('display.html', name=name, item=item,
rendered_content=rendered_content)
blueprint.add_url_rule('/<itemname:name>', view_func=DisplayView.as_view('display'))
|
bsd-2-clause
| -7,702,975,127,156,539,000
| 44.651515
| 109
| 0.554597
| false
| 4.5548
| false
| false
| false
|
dimtion/jml
|
inputFiles/opponents/team-roquette/greedy.py
|
1
|
3203
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import lib.PyratApi as api
import lib.travelHeuristics as th
import lib.utils as ut
import time
import operator
BOT_NAME = "greedy"
PATH = []
METAGRAPH = {}
BESTPATH = {}
MOVING = False
EATENCOINS = []
NB_COINS_TO_COMPUTE = 5
CURRENTCOIN = []
# This function should not return anything, but should be used for a short preprocessing
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
global METAGRAPH
global BESTPATHS
iniTime = time.time()
METAGRAPH, BESTPATHS = th.generateMetaGraph(mazeMap, playerLocation, coins)
api.debug(time.time() - iniTime)
return "Everything seems fine, let's start !"
def updateCoins (metaGraph, eatenCoins, elLocation):
if elLocation in metaGraph:
eatenCoins.append(elLocation)
return eatenCoins
def orderNodes(metaGraph, currentNode, eatenCoins):
temp = metaGraph[currentNode]
nodesList = [x for x in list(temp.items()) if x[0] not in eatenCoins]
nodesList.sort(key = operator.itemgetter(1))
return nodesList
def chooseCoin (metaGraph, playerLocation, eatenCoins):
# Determination des sommets à calculer avec l'algo naif
nodesToCompute = orderNodes(metaGraph, playerLocation, eatenCoins)
# Création du chemin par l'algo naif
besDis, bestPaths = th.travellingSalesman(playerLocation, nodesToCompute[:NB_COINS_TO_COMPUTE -1], 0, [])
return bestPaths[0]
# This is where you should write your code to determine the next direction
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
global MOVING
global METAGRAPH
global BESTPATHS
global EATENCOINS
global PATH
global CURRENTCOIN
EATENCOINS = updateCoins(METAGRAPH, EATENCOINS, opponentLocation)
EATENCOINS = updateCoins(METAGRAPH, EATENCOINS, playerLocation)
if MOVING :
if not PATH :
MOVING = False
if opponentLocation == CURRENTCOIN and playerLocation != CURRENTCOIN:
PATH = []
PATH = th.findNearestCoin(mazeMap, playerLocation, coins)
if not MOVING :
CURRENTCOIN = chooseCoin(METAGRAPH, playerLocation, EATENCOINS)
PATH = BESTPATHS[playerLocation][CURRENTCOIN]
PATH.pop()
MOVING = True
nextPos = PATH.pop()
return ut.convertPosesToDir(nextPos, playerLocation, mazeMap)
####
if __name__ == "__main__" :
# We let technical stuff happen
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = api.initGame(BOT_NAME)
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = api.processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
api.writeToPipe(nextMove)
|
mit
| 1,538,619,702,752,280,000
| 26.358974
| 141
| 0.702281
| false
| 3.430868
| false
| false
| false
|
vialectrum/vialectrum
|
electrum_ltc/gui/qt/util.py
|
1
|
34021
|
import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from functools import partial, lru_cache
from typing import NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem,
QPalette, QIcon, QFontMetrics, QShowEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu)
from electrum_ltc.i18n import _, languages
from electrum_ltc.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum_ltc.util import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING
if TYPE_CHECKING:
from .main_window import ElectrumWindow
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.show()
QCoreApplication.processEvents()
task()
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, header_layout, ok_label, default=None, allow_multi=False):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
_prior_text, user_role = self.tv.text_txid_from_coordinate(row, col)
# check that we didn't forget to set UserRole on an editable field
assert user_role is not None, (row, col)
self.tv.on_edited(idx, user_role, new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
return super().createEditor(parent, option, idx)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is not None:
editable_columns = set(editable_columns)
elif stretch_column is not None:
editable_columns = {stretch_column}
else:
editable_columns = {}
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def current_item_user_role(self, col) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.model().itemFromIndex(idx)
if item:
return item.data(Qt.UserRole)
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
model = self.model()
model.setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, user_role, text):
self.parent.wallet.set_label(user_role, text)
self.parent.history_model.refresh('on_edited in MyTreeView')
self.parent.update_completions()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def text_txid_from_coordinate(self, row_num, column):
assert not isinstance(self.model(), QSortFilterProxyModel)
idx = self.model().index(row_num, column)
item = self.model().itemFromIndex(idx)
user_role = item.data(Qt.UserRole)
return item.text(), user_role
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
txt, _ = self.text_txid_from_coordinate(row_num, column)
txt = txt.lower()
if self.current_filter in txt:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.model().horizontalHeaderItem(column).text()
item_col = self.model().itemFromIndex(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = not self.isVisible() and not self._forced_update
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = []
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window, title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getOpenFileName(_("Open {} file").format(title), filter_)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window, title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getSaveFileName(_("Select file to save your {}").format(title),
'electrum-ltc_{}.json'.format(title), filter_)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def get_parent_main_window(widget):
"""Returns a reference to the ElectrumWindow this widget belongs to."""
from .main_window import ElectrumWindow
from .transaction_dialog import TxDialog
for _ in range(100):
if widget is None:
return None
if isinstance(widget, ElectrumWindow):
return widget
elif isinstance(widget, TxDialog):
return widget.main_window
else:
widget = widget.parentWidget()
return None
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
sys.exit(0)
else:
webbrowser.open(url)
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
mit
| -5,169,814,777,630,352,000
| 34.364865
| 124
| 0.608271
| false
| 3.912708
| false
| false
| false
|
GearsAD/semisorted_arnerve
|
arnerve/core/LCMManager.py
|
1
|
3344
|
'''
Created on Sep 7, 2014
@author: gearsad
'''
import lcm
#Import the user types
from user_update_t import user_update_t
#Import the bot types
from bot_update_t import bot_update_t
from bot_control_command_t import bot_control_command_t
#Import the role types
from role_response_t import role_response_t
class LCMManager():
def __init__(self):
#Broadcast across the wire
self.lc = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.__subscriptions = []
self.__subscriptions.append(self.lc.subscribe("ARNerve_UserUpdates", self.UpdateUsersHandler))
self.__subscriptions.append(self.lc.subscribe("ARNerve_UserHerder_RoleResponses", self.UpdateFromRoleResponse))
# Add all the bot channels.
self.__subscriptions.append(self.lc.subscribe("ARNerve_Bot_Update_GIRR", self.UpdateBot))
def Attach(self, userManager, roleManager, botManager):
'''
Attach relevant objects to the RoleManager
'''
self.__userManager = userManager
self.__roleManager = roleManager
self.__botManager = botManager
def UpdateUsersHandler(self, channel, data):
'''
Get the updated user and add it to the user manager
'''
msg = user_update_t.decode(data)
if(self.__userManager):
self.__userManager.UpdateUserFromLCM(msg)
#HACK TESTING...
# botControl = bot_control_command_t()
# botControl.name = "GIRR"
# botControl.botTreadVelLeft = 0
# botControl.botTreadVelLeft = 0
# if msg.kinect.is_lhandclosed and msg.kinect.is_rhandclosed:
# botControl.botTreadVelLeft = 1.0
# botControl.botTreadVelright = 1.0
# else:
# if msg.kinect.is_lhandclosed:
# print "---Left Hand CLosed!"
# botControl.botTreadVelLeft = 1.0
# botControl.botTreadVelright = -1.0
# if msg.kinect.is_rhandclosed:
# print "---Right Hand CLosed!"
# botControl.botTreadVelLeft = -1.0
# botControl.botTreadVelright = 1.0
# botControl.isInfraredOn = 0
# botControl.isLightsOn = 0
# botControl.timestamp = 0
# self.lc.publish("ARNerve_Bot_Control_GIRR", botControl.encode())
def UpdateFromRoleResponse(self, channel, data):
'''
Get the role response, parse it, and send it to the role manager
'''
roleResponse = role_response_t.decode(data)
# Now pass it to the role manager
self.__roleManager.ParseRoleResponse(roleResponse)
def UpdateBot(self, channel, data):
'''
Update from a bot frame
'''
botUpdate = bot_update_t.decode(data)
print "[LCMManager] Got an update for bot {0}".format(botUpdate.name)
self.__botManager.UpdateBotFromLCM(botUpdate)
return
def Update(self):
self.lc.handle()
def Disconnect(self):
for subscription in self.__subscriptions:
self.lc.unsubscribe(subscription)
def SendRoleRequest(self, lcmRoleRequest):
'''
Send a role change request to the UserHerder
'''
self.lc.publish("ARNerve_UserHerder_RoleRequests", lcmRoleRequest)
|
mit
| -770,272,538,006,643,300
| 32.787879
| 119
| 0.609151
| false
| 3.690949
| false
| false
| false
|
1upon0/rfid-auth-system
|
GUI/printer/Pillow-2.7.0/Tests/test_format_hsv.py
|
1
|
5636
|
from helper import unittest, PillowTestCase, hopper
from PIL import Image
import colorsys
import itertools
class TestFormatHSV(PillowTestCase):
def int_to_float(self, i):
return float(i)/255.0
def str_to_float(self, i):
return float(ord(i))/255.0
def to_int(self, f):
return int(f*255.0)
def tuple_to_ints(self, tp):
x, y, z = tp
return (int(x*255.0), int(y*255.0), int(z*255.0))
def test_sanity(self):
Image.new('HSV', (100, 100))
def wedge(self):
w = Image._wedge()
w90 = w.rotate(90)
(px, h) = w.size
r = Image.new('L', (px*3, h))
g = r.copy()
b = r.copy()
r.paste(w, (0, 0))
r.paste(w90, (px, 0))
g.paste(w90, (0, 0))
g.paste(w, (2*px, 0))
b.paste(w, (px, 0))
b.paste(w90, (2*px, 0))
img = Image.merge('RGB', (r, g, b))
# print (("%d, %d -> "% (int(1.75*px),int(.25*px))) + \
# "(%s, %s, %s)"%img.getpixel((1.75*px, .25*px)))
# print (("%d, %d -> "% (int(.75*px),int(.25*px))) + \
# "(%s, %s, %s)"%img.getpixel((.75*px, .25*px)))
return img
def to_xxx_colorsys(self, im, func, mode):
# convert the hard way using the library colorsys routines.
(r, g, b) = im.split()
if bytes is str:
conv_func = self.str_to_float
else:
conv_func = self.int_to_float
if hasattr(itertools, 'izip'):
iter_helper = itertools.izip
else:
iter_helper = itertools.zip_longest
converted = [self.tuple_to_ints(func(conv_func(_r), conv_func(_g),
conv_func(_b)))
for (_r, _g, _b) in iter_helper(r.tobytes(), g.tobytes(),
b.tobytes())]
if str is bytes:
new_bytes = b''.join(chr(h)+chr(s)+chr(v) for (
h, s, v) in converted)
else:
new_bytes = b''.join(bytes(chr(h)+chr(s)+chr(v), 'latin-1') for (
h, s, v) in converted)
hsv = Image.frombytes(mode, r.size, new_bytes)
return hsv
def to_hsv_colorsys(self, im):
return self.to_xxx_colorsys(im, colorsys.rgb_to_hsv, 'HSV')
def to_rgb_colorsys(self, im):
return self.to_xxx_colorsys(im, colorsys.hsv_to_rgb, 'RGB')
def test_wedge(self):
src = self.wedge().resize((3*32, 32), Image.BILINEAR)
im = src.convert('HSV')
comparable = self.to_hsv_colorsys(src)
# print (im.getpixel((448, 64)))
# print (comparable.getpixel((448, 64)))
# print(im.split()[0].histogram())
# print(comparable.split()[0].histogram())
# im.split()[0].show()
# comparable.split()[0].show()
self.assert_image_similar(im.split()[0], comparable.split()[0],
1, "Hue conversion is wrong")
self.assert_image_similar(im.split()[1], comparable.split()[1],
1, "Saturation conversion is wrong")
self.assert_image_similar(im.split()[2], comparable.split()[2],
1, "Value conversion is wrong")
# print (im.getpixel((192, 64)))
comparable = src
im = im.convert('RGB')
# im.split()[0].show()
# comparable.split()[0].show()
# print (im.getpixel((192, 64)))
# print (comparable.getpixel((192, 64)))
self.assert_image_similar(im.split()[0], comparable.split()[0],
3, "R conversion is wrong")
self.assert_image_similar(im.split()[1], comparable.split()[1],
3, "G conversion is wrong")
self.assert_image_similar(im.split()[2], comparable.split()[2],
3, "B conversion is wrong")
def test_convert(self):
im = hopper('RGB').convert('HSV')
comparable = self.to_hsv_colorsys(hopper('RGB'))
# print ([ord(x) for x in im.split()[0].tobytes()[:80]])
# print ([ord(x) for x in comparable.split()[0].tobytes()[:80]])
# print(im.split()[0].histogram())
# print(comparable.split()[0].histogram())
self.assert_image_similar(im.split()[0], comparable.split()[0],
1, "Hue conversion is wrong")
self.assert_image_similar(im.split()[1], comparable.split()[1],
1, "Saturation conversion is wrong")
self.assert_image_similar(im.split()[2], comparable.split()[2],
1, "Value conversion is wrong")
def test_hsv_to_rgb(self):
comparable = self.to_hsv_colorsys(hopper('RGB'))
converted = comparable.convert('RGB')
comparable = self.to_rgb_colorsys(comparable)
# print(converted.split()[1].histogram())
# print(target.split()[1].histogram())
# print ([ord(x) for x in target.split()[1].tobytes()[:80]])
# print ([ord(x) for x in converted.split()[1].tobytes()[:80]])
self.assert_image_similar(converted.split()[0], comparable.split()[0],
3, "R conversion is wrong")
self.assert_image_similar(converted.split()[1], comparable.split()[1],
3, "G conversion is wrong")
self.assert_image_similar(converted.split()[2], comparable.split()[2],
3, "B conversion is wrong")
if __name__ == '__main__':
unittest.main()
# End of file
|
apache-2.0
| 8,927,930,943,546,535,000
| 32.349112
| 78
| 0.503549
| false
| 3.424058
| true
| false
| false
|
mollyproject/mollyproject
|
molly/apps/places/providers/tfl.py
|
1
|
3958
|
from urllib2 import urlopen
from xml.dom import minidom
from collections import defaultdict
import threading
import logging
from django.utils.translation import ugettext_lazy as _
from molly.apps.places.providers import BaseMapsProvider
logger = logging.getLogger(__name__)
class TubeRealtimeProvider(BaseMapsProvider):
"""
Populates tube station entities with real-time departure information
"""
TRACKERNET_STATUS_URL = 'http://cloud.tfl.gov.uk/TrackerNet/StationStatus'
TRACKERNET_PREDICTION_URL = 'http://cloud.tfl.gov.uk/TrackerNet/PredictionDetailed/%s/%s'
def get_statuses(self):
statuses = {}
xml = minidom.parseString(urlopen(self.TRACKERNET_STATUS_URL).read())
for stationstatus in xml.getElementsByTagName('StationStatus'):
name = stationstatus.getElementsByTagName('Station')[0].getAttribute('Name')
status = stationstatus.getElementsByTagName('Status')[0].getAttribute('Description')
status += ' ' + stationstatus.getAttribute('StatusDetails')
statuses[name] = status
return statuses
def augment_metadata(self, entities, **kwargs):
threads = []
for entity in filter(lambda e: e.primary_type.slug == 'tube-station', entities):
# Try and match up entity with StationStatus name
for station, status in self.get_statuses().items():
if entity.title.startswith(station):
entity.metadata['real_time_information'] = {
'pip_info': [status] if status != 'Open ' else [],
}
if 'real_time_information' not in entity.metadata:
entity.metadata['real_time_information'] = {}
if 'london-underground-identifiers' in entity.metadata:
thread = threading.Thread(target=self.get_times, args=[entity])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def get_times(self, entity):
try:
services = []
station = entity.metadata['london-underground-identifiers']['station-code']
for line in entity.metadata['london-underground-identifiers']['line-codes']:
xml = minidom.parseString(urlopen(self.TRACKERNET_PREDICTION_URL % (line, station)).read())
for platform in xml.getElementsByTagName('P'):
next_info = defaultdict(list)
for tag in platform.getElementsByTagName('T'):
dest = '%s (%s)' % (
tag.getAttribute('Destination'),
xml.getElementsByTagName('LineName')[0].childNodes[0].nodeValue
)
next_info[dest].append(int(tag.getAttribute('SecondsTo')))
for dest, eta in next_info.items():
services.append({
'service': _('Plat %s') % platform.getAttribute('Num'),
'destination': dest,
'etas': eta
})
services.sort(key=lambda s: s['etas'][0])
for service in services:
etas = [round(e/60) for e in service['etas']]
# Translators: This refers to arrival times of trains, in minutes
etas = [_('DUE') if e == 0 else _('%d mins') % e for e in etas]
service['next'] = etas[0]
service['following'] = etas[1:]
del service['etas']
entity.metadata['real_time_information']['services'] = services
entity.metadata['meta_refresh'] = 30
except Exception as e:
logger.exception('Failed to get RTI from Trackernet')
|
apache-2.0
| -4,033,144,124,539,551,000
| 42.977778
| 107
| 0.55331
| false
| 4.768675
| false
| false
| false
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-affycompatible/package.py
|
1
|
2149
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycompatible(RPackage):
"""This package provides an interface to Affymetrix chip annotation
and sample attribute files. The package allows an easy way for users
to download and manage local data bases of Affynmetrix NetAffx
annotation files. The package also provides access to GeneChip
Operating System (GCOS) and GeneChip Command Console
(AGCC)-compatible sample annotation files."""
homepage = "https://www.bioconductor.org/packages/AffyCompatible/"
url = "https://git.bioconductor.org/packages/AffyCompatible"
version('1.36.0', 'https://git.bioconductor.org/packages/AffyCompatible', commit='dbbfd43a54ae1de6173336683a9461084ebf38c3')
depends_on('r@3.4.0:3.4.9', when=('@1.36.0'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
|
lgpl-2.1
| 8,644,395,620,993,898,000
| 47.840909
| 128
| 0.689158
| false
| 3.91439
| false
| false
| false
|
ryankanno/py-utilities
|
tests/time/test_date_utilities.py
|
1
|
1831
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
from datetime import timedelta
from nose.tools import ok_
from py_utilities.time.date_utilities import EPOCH_AS_STRUCT_TIME
from py_utilities.time.date_utilities import EPOCH_AS_DATETIME
from py_utilities.time.date_utilities import next_day
from py_utilities.time.date_utilities import random_datetime
import pytz
import time
import unittest
class TestDateUtilities(unittest.TestCase):
def test_epoch_as_struct_time(self):
ok_(EPOCH_AS_STRUCT_TIME == time.gmtime(0))
def test_epoch_as_datetime(self):
ok_(EPOCH_AS_DATETIME.year == 1970)
def test_next_day_same_week_where_day_hasnt_passed(self):
# epoch is Thursday, January 1, 1970
saturday = next_day(EPOCH_AS_DATETIME, 6)
ok_(saturday.day == 3)
ok_(saturday.year == 1970)
ok_(saturday.month == 1)
def test_next_day_next_week_where_day_has_passed(self):
# epoch is Thursday, January 1, 1970
next_wednesday = next_day(EPOCH_AS_DATETIME, 3)
ok_(next_wednesday.day == 7)
ok_(next_wednesday.year == 1970)
ok_(next_wednesday.month == 1)
def test_random_datetime_with_utc_tz(self):
for x in xrange(1000):
x += 1
start_datetime = pytz.utc.localize(EPOCH_AS_DATETIME)
start_timestamp = calendar.timegm(start_datetime.utctimetuple())
end_datetime = pytz.utc.localize(EPOCH_AS_DATETIME +
timedelta(days=x))
end_timestamp = calendar.timegm(end_datetime.utctimetuple())
random = random_datetime(start_timestamp, end_timestamp,
pytz.utc)
ok_(random >= start_datetime)
ok_(end_datetime >= random)
# vim: filetype=apython
|
mit
| -4,479,779,889,592,963,000
| 34.901961
| 76
| 0.631349
| false
| 3.480989
| true
| false
| false
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/functionalities/avoids-fd-leak/TestFdLeak.py
|
1
|
4011
|
"""
Test whether a process started by lldb has no extra file descriptors open.
"""
import lldb
from lldbsuite.test import lldbutil
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
def python_leaky_fd_version(test):
import sys
# Python random module leaks file descriptors on some versions.
if sys.version_info >= (2, 7, 8) and sys.version_info < (2, 7, 10):
return "Python random module leaks file descriptors in this python version"
return None
class AvoidsFdLeakTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_basic(self):
self.do_test([])
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_log(self):
self.do_test(["log enable -f '/dev/null' lldb commands"])
def do_test(self, commands):
self.build()
exe = self.getBuildArtifact("a.out")
for c in commands:
self.runCmd(c)
target = self.dbg.CreateTarget(exe)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertTrue(
process.GetState() == lldb.eStateExited,
"Process should have exited.")
self.assertTrue(
process.GetExitStatus() == 0,
"Process returned non-zero status. Were incorrect file descriptors passed?")
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_multitarget(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
breakpoint = target.BreakpointCreateBySourceRegex(
'Set breakpoint here', lldb.SBFileSpec("main.c", False))
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process1 = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process1, PROCESS_IS_VALID)
self.assertTrue(
process1.GetState() == lldb.eStateStopped,
"Process should have been stopped.")
target2 = self.dbg.CreateTarget(exe)
process2 = target2.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process2, PROCESS_IS_VALID)
self.assertTrue(
process2.GetState() == lldb.eStateExited,
"Process should have exited.")
self.assertTrue(
process2.GetExitStatus() == 0,
"Process returned non-zero status. Were incorrect file descriptors passed?")
|
bsd-3-clause
| -3,330,200,459,424,116,000
| 36.839623
| 106
| 0.667913
| false
| 3.920821
| true
| false
| false
|
jwlin/web-crawler-tutorial
|
ch4/google_finance_api.py
|
1
|
2251
|
import requests
import json
from datetime import datetime, timedelta
def get_stock(query):
# query 可以是多支股票, 如 TPE:2330,TPE:2498, 不同股票以 , 分開
resp = requests.get('http://finance.google.com/finance/info?client=ig&q=' + query)
if resp.status_code == 200:
# 移除回傳資料開頭的 //
# 剩下的資料是一個 list of dict, 每個 dict 是一支股票的資訊
return json.loads(resp.text.replace('//', ''))
else:
return None
def get_stock_history(stock_id, stock_mkt):
resp = requests.get('http://www.google.com/finance/getprices?q=' + stock_id + '&x=' + stock_mkt + '&i=86400&p=1M')
''' e.g.,
EXCHANGE%3DTPE
MARKET_OPEN_MINUTE=540
MARKET_CLOSE_MINUTE=810
INTERVAL=86400
COLUMNS=DATE,CLOSE,HIGH,LOW,OPEN,VOLUME
DATA=
TIMEZONE_OFFSET=480
a1488346200,186,188.5,186,188.5,46176000
1,186,188.5,185,188,39914000
2,184,185,184,184.5,28085000
5,183.5,184.5,183.5,184,12527000
...
'''
index = -1
lines = resp.text.split('\n')
for line in lines:
# 'a' 開頭表示股價資訊起始列
if line.startswith('a'):
index = lines.index(line)
break
if index > 0:
lines = lines[index:]
# 找出起始行日期
unix_time = int(lines[0].split(',')[0][1:])
init_time = datetime.fromtimestamp(unix_time)
rows = list()
# 處理第一列
first_row = lines[0].split(',')
first_row[0] = init_time
rows.append(first_row)
# 處理剩餘列
for l in lines[1:]:
if l:
row = l.split(',')
delta = int(row[0])
row[0] = init_time + timedelta(days=delta)
rows.append(row)
return rows
else:
return None
if __name__ == '__main__':
query = 'TPE:2330'
print(query, '即時股價')
stocks = get_stock(query)
print(stocks[0])
print('-----')
stock_id, stock_mkt = '2330', 'TPE'
print(stock_mkt, stock_id, '歷史股價 (Date, Close, High, Low, Open, Volume)')
rows = get_stock_history('2330', 'TPE')
for row in rows:
print(row[0].strftime("%Y/%m/%d"), row[1:])
|
mit
| -177,420,440,146,556,900
| 28.097222
| 118
| 0.556086
| false
| 2.615481
| false
| false
| false
|
devilry/devilry-django
|
devilry/devilry_group/feedbackfeed_builder/feedbackfeed_sidebarbuilder.py
|
1
|
1342
|
# -*- coding: utf-8 -*-
# Devilry imports
from devilry.devilry_comment.models import CommentFile
from devilry.devilry_group.feedbackfeed_builder import builder_base
from devilry.devilry_group import models as group_models
class FeedbackFeedSidebarBuilder(builder_base.FeedbackFeedBuilderBase):
def __init__(self, **kwargs):
super(FeedbackFeedSidebarBuilder, self).__init__(**kwargs)
self.feedbackset_dict = {}
def __get_files_for_comment(self, comment):
commentfiles = comment.commentfile_set.all()
commentfilelist = []
for commentfile in commentfiles:
commentfilelist.append(commentfile)
return commentfilelist
def build(self):
for feedbackset in self.feedbacksets:
self.feedbackset_dict[feedbackset.created_datetime] = {
'feedbackset_num': 0,
'feedbackset': feedbackset
}
self.feedbackset_dict = self.sort_dict(self.feedbackset_dict)
def get_as_list(self):
feedbackset_list = []
num = 1
for key_datetime in sorted(self.feedbackset_dict.keys()):
feedbacksets = self.feedbackset_dict[key_datetime]
feedbacksets['feedbackset_num'] = num
feedbackset_list.append(feedbacksets)
num += 1
return feedbackset_list
|
bsd-3-clause
| -1,378,049,649,158,134,000
| 34.315789
| 71
| 0.651267
| false
| 4.03003
| false
| false
| false
|
bbengfort/cloudscope
|
cloudscope/console/commands/modify.py
|
1
|
12414
|
# cloudscope.console.commands.modify
# Modifies topologies in place for deploying to alternative sites.
#
# Author: Benjamin Bengfort <bengfort@cs.umd.edu>
# Created: Fri Aug 12 11:36:41 2016 -0400
#
# Copyright (C) 2016 University of Maryland
# For license information, see LICENSE.txt
#
# ID: modify.py [] benjamin@bengfort.com $
"""
Modifies topologies in place for deploying to alternative sites.
The original version of this script resets local paths for the traces and
modifies local and wide area latency for nodes.
"""
##########################################################################
## Imports
##########################################################################
import os
import json
import argparse
import warnings
from commis import Command
from commis.exceptions import ConsoleError
from cloudscope.experiment import compute_tick
##########################################################################
## Key/Value Type
##########################################################################
def keyval(string):
"""
Parses a key/value pair from the command line.
"""
pairs = [
map(lambda s: s.strip(), pair.split("="))
for pair in string.split("&")
]
if not all([len(pair) == 2 for pair in pairs]):
raise argparse.ArgumentTypeError(
"Must pass key/value pairs as key1=value1&key2=value2"
)
return dict(pairs)
##########################################################################
## Command
##########################################################################
class ModifyTopologyCommand(Command):
name = 'modify'
help = 'modifies a topology in place with new information'
args = {
'--Lm': {
"type": int,
"default": None,
"dest": "local_mean",
"help": 'modify the local area connection mean latencies',
},
'--Ls': {
"type": int,
"default": None,
"dest": "local_stddev",
"help": 'modify the local area connection latency standard deviation',
},
'--Wm': {
"type": int,
"default": None,
"dest": "wide_mean",
"help": 'modify the wide area connection mean latencies',
},
'--Ws': {
"type": int,
"default": None,
"dest": "wide_stddev",
"help": 'modify the wide area connection latency standard deviation',
},
'--sync-prob': {
"metavar": "P",
"type": float,
"default": None,
"help": "modify the sync probability of eventual nodes",
},
'--local-prob': {
"metavar": "P",
"type": float,
"default": None,
"help": "modify the select local probability of eventual nodes",
},
('-T', '--traces'): {
"metavar": "PATH",
"default": None,
"help": "specify a directory or trace to replace traces information",
},
('-M', '--meta'): {
"metavar": "KEY=VAL",
"default": None,
"type": keyval,
"help": "specify key/value pairs to modify in the meta data",
},
'topologies': {
'nargs': '+',
'metavar': 'topo.json',
'help': 'path(s) to the experiment topologies to modify',
}
}
def handle(self, args):
"""
Handles the modification of one or more topology files, collecting
information about how many edits are being made in the topology.
"""
mods = 0 # Track how many key/value pairs are being modified.
for path in args.topologies:
mods += self.modify_topology(path, args)
return "Modified {} key/value pairs in {} topologies".format(
mods, len(args.topologies)
)
def modify_topology(self, path, args):
"""
Modifies a topology in a file-like object with data input from the
command line, tracking how many changes are made at each point.
"""
# Load the topology data
with open(path, 'r') as fobj:
topo = json.load(fobj)
# Track the number of modifications
mods = 0
# If the local area parameters have been passed in, modify them.
if args.local_mean or args.local_stddev:
mods += self.modify_local_network(
topo, args.local_mean, args.local_stddev
)
# If the wide area parameters have been passed in, modify them.
if args.wide_mean or args.wide_stddev:
mods += self.modify_wide_network(
topo, args.wide_mean, args.wide_stddev
)
# If new traces have been passed in, modify it.
if args.traces:
mods += self.modify_traces(
topo, args.traces
)
# Modify Raft nodes
mods += self.modify_sequential(topo, args)
# Modify Eventual nodes
mods += self.modify_eventual(topo, args)
# Modify the meta data with the new information.
mods += self.modify_meta_info(topo, args)
# Dump the topology that has been modified back to disk.
# TODO: should we check if we've made any modifications before this?
with open(path, 'w') as fobj:
json.dump(topo, fobj, indent=2)
return mods
def modify_local_network(self, topo, mean, stddev):
"""
Modifies local area connections according to the network mean and
standard deviation. Returns number of modifications.
"""
# Modifications
mods = 0
# Must supply both the mean and the stddev
if not mean or not stddev:
raise ConsoleError(
"Must supply both the local mean and local standard deviation!"
)
# Modify the local links only!
for link in topo['links']:
if link['area'] == 'local':
mods += self.update_dict_value(link, 'latency', (mean, stddev))
# Modify the meta data about local connections
mods += self.update_meta_param(topo, 'local_latency', (mean, stddev))
return mods
def modify_wide_network(self, topo, mean, stddev):
"""
Modifies wide area connections according to the network mean and
standard deviation. This function will also update timing parameters
of the nodes according to the tick; it will also necessarily update
some of the meta information. Returns number of modifications.
"""
# Modifications
mods = 0
# Must supply both the mean and the stddev
if not mean or not stddev:
raise ConsoleError(
"Must supply both the wide mean and wide standard deviation!"
)
# Compute the tick parameter and timing params
tick_model = model=topo['meta'].get('tick_param_model', 'conservative')
T = compute_tick(mean, stddev, tick_model)
# Timing parameters for individual nodes
eto = (T, 2*T)
hbi = T/2
aed = T/4
# Modify each node's timing parameters
for node in topo['nodes']:
if 'election_timeout' in node:
mods += self.update_dict_value(node, 'election_timeout', eto)
if 'heartbeat_interval' in node:
mods += self.update_dict_value(node, 'heartbeat_interval', hbi)
if 'anti_entropy_delay' in node:
mods += self.update_dict_value(node, 'anti_entropy_delay', aed)
# Modify the wide links only!
for link in topo['links']:
if link['area'] == 'wide':
mods += self.update_dict_value(link, 'latency', (mean, stddev))
# Modify the meta data
mods += self.update_meta_param(topo, 'tick_param_model', tick_model)
mods += self.update_meta_param(topo, 'wide_latency', (mean, stddev))
mods += self.update_meta_param(topo, 'anti_entropy_delay', aed)
mods += self.update_meta_param(topo, 'election_timeout', eto)
mods += self.update_meta_param(topo, 'heartbeat_interval', hbi)
mods += self.update_meta_param(topo, 'latency_mean', mean)
mods += self.update_meta_param(topo, 'latency_stddev', stddev)
mods += self.update_meta_param(topo, 'tick_metric', T)
mods += self.update_meta_param(topo, 'variable', "{}-{}ms".format(
mean - 2*stddev, mean + 2*stddev)
)
return mods
def modify_traces(self, topo, traces):
"""
Modifies the traces inside the meta data of the topology. Returns the
number of modifications made.
"""
# Modifications
mods = 0
if os.path.isdir(traces):
# Replace the metadata trace with a new directory
name = os.path.basename(topo['meta']['trace'])
path = os.path.abspath(os.path.join(traces, name))
# Quick check to make sure the trace exists
if not os.path.exists(path):
raise ConsoleError(
"Trace at {} does not exist!".format(path)
)
mods += self.update_meta_param(topo, 'trace', path)
elif os.path.isfile(traces):
# Replace the trace with the specified file.
mods += self.update_meta_param(topo, 'trace', traces)
else:
raise ConsoleError(
"Supply either a valid directory or path to a trace!"
)
return mods
def modify_meta_info(self, topo, args):
"""
Finalizes the meta information of the topology according to any global
changes that may have been made and need to be tracked. Returns the
total number of modifications made to the topology meta info.
"""
# Modifications
mods = 0
# Modify the overall latency range
local = topo['meta'].get('local_latency', [None, None])[0]
wide = topo['meta'].get('wide_latency', [None, None])[0]
lrng = [min(local, wide), max(local, wide)]
mods += self.update_meta_param(topo, 'latency_range', lrng)
if args.meta:
for key, val in args.meta.items():
mods += self.update_meta_param(topo, key, val)
return mods
def modify_sequential(self, topo, args):
"""
Modify sequential nodes with specific policies.
For now, this method is a noop.
"""
return 0
def modify_eventual(self, topo, args):
"""
Modify eventual nodes with specific policies. This method currently:
- sets the sync probability if given (and modifies the meta)
- sets the local probability if given (and modifies the meta)
Returns the number of modifications made.
"""
mods = 0 # count the number of modifications
# Modify each node's local and sync probabilities
for node in topo['nodes']:
# Only modify eventual or stentor nodes
if node['consistency'] not in {'eventual', 'stentor'}:
continue
if args.sync_prob is not None:
mods += self.update_dict_value(node, 'sync_prob', args.sync_prob)
if args.local_prob is not None:
mods += self.update_dict_value(node, 'local_prob', args.local_prob)
# Modify the meta information
if args.sync_prob is not None:
mods += self.update_meta_param(topo, 'sync_prob', args.sync_prob)
if args.local_prob is not None:
mods += self.update_meta_param(topo, 'local_prob', args.local_prob)
return mods
def update_dict_value(self, item, key, value):
"""
Updates a value in the dictionary if the supplied value doesn't match
the value for that key and returns 1, otherwise returns 0.
"""
if item.get(key, None) != value:
item[key] = value
return 1
return 0
def update_meta_param(self, topo, key, value):
"""
Updates a meta data parameter if the supplied key doesn't match the
value and returns 1 otherwise returns 0.
"""
return self.update_dict_value(topo['meta'], key, value)
|
mit
| 4,806,752,857,086,505,000
| 33.010959
| 83
| 0.550991
| false
| 4.406816
| false
| false
| false
|
keturn/txOpenBCI
|
txopenbci/control.py
|
1
|
6185
|
# -*- coding: utf-8 -*-
"""
Players:
* one who makes sure a connection to the device is open
- a stable presence in the community; everyone knows where to find them
* one who holds the connection to the device
- may come and go with the connection
* one who knows how to command the device
* one who hears what the device tells us
* those who listen, and interpret
* those who listen, and record
* those who listen, and display
"""
import os
from twisted.application.service import Service
from twisted.internet.endpoints import connectProtocol
from twisted.internet.error import ConnectionClosed
from twisted.python import log
from ._sausage import makeProtocol
from . import protocol
try:
import numpy
except ImportError, e:
numpy = None
numpy_reason = e
else:
numpy_reason = None
from .serial_endpoint import SerialPortEndpoint
def serialOpenBCI(serialPortName, reactor):
return SerialPortEndpoint(serialPortName, reactor,
baudrate=protocol.BAUD_RATE)
class DeviceSender(object):
_transport = None
def setTransport(self, transport):
self._transport = transport
def stopFlow(self):
self._transport = None
def _write(self, content):
return self._transport.write(content)
def reset(self):
self._write(protocol.CMD_RESET)
def start_stream(self):
self._write(protocol.CMD_STREAM_START)
def stop_stream(self):
log.msg("sending stop command")
self._write(protocol.CMD_STREAM_STOP)
class RawSample(object):
__slots__ = ['counter', 'eeg', 'accelerometer']
def __init__(self, counter, eeg, accelerometer):
self.counter = counter
self.eeg = eeg
self.accelerometer = accelerometer
def __hash__(self):
return hash((self.counter, self.eeg, self.accelerometer))
class DeviceReceiver(object):
currentRule = 'idle'
def __init__(self, commander):
"""
:type commander: DeviceCommander
"""
self.commander = commander
self._debugLog = None
self._sampleSubscribers = set()
def logIncoming(self, data):
if not self._debugLog:
filename = 'debug.%x.raw' % (os.getpid(),)
self._debugLog = file(filename, 'wb')
self._debugLog.write(data)
def handleResponse(self, content):
log.msg("device response:")
log.msg(content)
# sw33t hacks to capture some debug data
# log.msg("entering debug dump mode")
# self.currentRule = 'debug'
# self.sender.start_stream()
# from twisted.internet import reactor
# reactor.callLater(0.4, self.sender.stop_stream)
def handleSample(self, counter, sample):
# TODO: handle wrapping counter
# TODO: handle skipped packets
if self._sampleSubscribers:
eeg = protocol.int32From3Bytes(sample, 8, 0)
accelerometer = protocol.accelerometerFromBytes(sample, 24)
sample = RawSample(counter, eeg, accelerometer)
self._publishSample(sample)
def _publishSample(self, sample):
for listener in self._sampleSubscribers:
listener(sample)
# == Interfaces for subscribers ==
def subscribeToSampleData(self, listener):
self._sampleSubscribers.add(listener)
# prepareParsing and finishParsing are not called from the grammar, but
# from the ParserProtocol, as connection-related events.
def prepareParsing(self, parser):
self.commander.deviceOpen()
def finishParsing(self, reason):
self.commander.deviceLost(reason)
class DeviceCommander(object):
_senderFactory = DeviceSender
_connecting = None
def __init__(self):
self.client = None
self.sender = DeviceSender()
self.receiver = DeviceReceiver(self)
self._protocolClass = makeProtocol(
protocol.grammar, self.sender, self.receiver,
name="OpenBCIDevice")
def connect(self, endpoint):
if self.client:
raise RuntimeError("Already connected to %s" % (self.client,))
if self._connecting:
raise RuntimeError("Connection already in progress.")
self._connecting = connectProtocol(endpoint, self._protocolClass())
self._connecting.addCallbacks(self._setClient, self._connectFailed)
def _setClient(self, client):
self.client = client
self._connecting = None
def _connectFailed(self, reason):
log.msg(reason.getErrorMessage())
self._connecting = None
# == Events we get from DeviceReceiver ==
def deviceOpen(self):
# Send the reset command, so we know we're starting with a predictable
# state.
self.sender.reset()
def deviceLost(self, reason):
if not reason.check(ConnectionClosed):
log.msg("Parser error: %s" % (reason.getErrorMessage(),))
log.msg(reason.getTraceback())
else:
log.msg("Receiver finished: %s" % (reason.getErrorMessage(),))
self.client = None
# == Outward-facing commands: ==
def hangUp(self):
if self.client:
self.sender.stop_stream()
self.client.transport.loseConnection()
def destroy(self):
self.hangUp()
self.client = None
if self._connecting:
self._connecting.cancel()
def startStream(self):
self.receiver.currentRule = 'sample'
self.sender.start_stream()
def stopStream(self):
self.sender.stop_stream()
# TODO: set currentRule back once stream actually ends
def reset(self):
self.sender.reset()
class DeviceService(Service):
def __init__(self, endpoint):
self.endpoint = endpoint
self.commander = DeviceCommander()
def startService(self):
log.msg("Starting service.")
if numpy_reason:
log.msg("Note: numpy is not available: %s" % (numpy_reason,))
Service.startService(self)
self.commander.connect(self.endpoint)
def stopService(self):
self.commander.destroy()
Service.stopService(self)
|
apache-2.0
| -3,535,835,345,981,307,000
| 25.545064
| 78
| 0.638157
| false
| 4.128838
| false
| false
| false
|
mdworks2016/work_development
|
Python/20_Third_Certification/venv/lib/python3.7/site-packages/celery/backends/s3.py
|
1
|
2745
|
# -*- coding: utf-8 -*-
"""s3 result store backend."""
from __future__ import absolute_import, unicode_literals
from kombu.utils.encoding import bytes_to_str
from celery.exceptions import ImproperlyConfigured
from .base import KeyValueStoreBackend
try:
import boto3
import botocore
except ImportError:
boto3 = None
botocore = None
__all__ = ('S3Backend',)
class S3Backend(KeyValueStoreBackend):
"""An S3 task result store.
Raises:
celery.exceptions.ImproperlyConfigured:
if module :pypi:`boto3` is not available,
if the :setting:`aws_access_key_id` or
setting:`aws_secret_access_key` are not set,
or it the :setting:`bucket` is not set.
"""
def __init__(self, **kwargs):
super(S3Backend, self).__init__(**kwargs)
if not boto3 or not botocore:
raise ImproperlyConfigured('You must install boto3'
'to use s3 backend')
conf = self.app.conf
self.endpoint_url = conf.get('s3_endpoint_url', None)
self.aws_region = conf.get('s3_region', None)
self.aws_access_key_id = conf.get('s3_access_key_id', None)
self.aws_secret_access_key = conf.get('s3_secret_access_key', None)
self.bucket_name = conf.get('s3_bucket', None)
if not self.bucket_name:
raise ImproperlyConfigured('Missing bucket name')
self.base_path = conf.get('s3_base_path', None)
self._s3_resource = self._connect_to_s3()
def _get_s3_object(self, key):
key_bucket_path = self.base_path + key if self.base_path else key
return self._s3_resource.Object(self.bucket_name, key_bucket_path)
def get(self, key):
key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
try:
s3_object.load()
return s3_object.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == "404":
return None
raise error
def set(self, key, value):
key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
s3_object.put(Body=value)
def delete(self, key):
s3_object = self._get_s3_object(key)
s3_object.delete()
def _connect_to_s3(self):
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region
)
if session.get_credentials() is None:
raise ImproperlyConfigured('Missing aws s3 creds')
return session.resource('s3', endpoint_url=self.endpoint_url)
|
apache-2.0
| 759,880,483,341,247,200
| 30.551724
| 75
| 0.601821
| false
| 3.630952
| false
| false
| false
|
jhartford/pybo
|
pybo/demos/intermediate.py
|
1
|
1969
|
"""
Demo performing Bayesian optimization on an objective function sampled from a
Gaussian process. This script also demonstrates user-defined visualization via
a callback function that is imported from the advanced demo.
Note that in this demo we are sampling an objective function from a Gaussian
process. We are not, however, modifying the default GP used internally by
`pybo.solve_bayesopt`. The default model used within `pybo.solve_bayesopt` is a
GP with constant mean, Matern 5 kernel, and hyperparameters marginalized using
MCMC. To modify this behavior see the advanced demo.
In this demo we also explore the following additional Bayesian optimization
modules that can be user-defined:
- the initial search grid,
- the selection policy,
- the recommendation strategy, and
- composite kernels (a `pygp` feature).
"""
import numpy as np
import pygp
import pybo
# import callback from advanced demo
import os
import sys
sys.path.append(os.path.dirname(__file__))
from advanced import callback
if __name__ == '__main__':
rng = 0 # random seed
bounds = np.array([3, 5]) # bounds of search space
dim = bounds.shape[0] # dimension of space
# define a GP which we will sample an objective from.
likelihood = pygp.likelihoods.Gaussian(sigma=1e-6)
kernel = pygp.kernels.Periodic(1, 1, 0.5) + pygp.kernels.SE(1, 1)
gp = pygp.inference.ExactGP(likelihood, kernel, mean=0.0)
objective = pybo.functions.GPModel(bounds, gp, rng=rng)
info = pybo.solve_bayesopt(
objective,
bounds,
niter=30*dim,
init='latin', # initialization policy
policy='thompson', # exploration policy
recommender='observed', # recommendation policy
noisefree=True,
rng=rng,
callback=callback)
|
bsd-2-clause
| -4,735,991,816,232,202,000
| 37.607843
| 80
| 0.6516
| false
| 4.207265
| false
| true
| false
|
adongy/adminradius
|
admin_radius/models.py
|
1
|
4418
|
from django.db import models
from .raw_models import *
from django.core.urlresolvers import reverse
import datetime
from django.core.exceptions import ValidationError
class RadPassManager(models.Manager):
def get_queryset(self):
return super(RadPassManager, self).get_queryset().filter(attribute='NT-Password', op=':=')
class RadStartDateManager(models.Manager):
def get_queryset(self):
return super(RadStartDateManager, self).get_queryset().filter(attribute='User-Start-Date', op=':=')
class RadEndDateManager(models.Manager):
def get_queryset(self):
return super(RadEndDateManager, self).get_queryset().filter(attribute='User-End-Date', op=':=')
class RadPass(Radcheck):
objects = RadPassManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'NT-Password'
self._meta.get_field('op').default = ':='
super(RadPass, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadPass, self).clean_fields(exclude)
if self.value and len(self.value) != 32:
raise ValidationError(_("Hash is incorrectly formatted. Input as a 32 hexadecimal character string without a leading '0x' prefix."))
class Meta:
proxy = True
class RadStartDate(Radcheck):
objects = RadStartDateManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'User-Start-Date'
self._meta.get_field('op').default = ':='
super(RadStartDate, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadStartDate, self).clean_fields(exclude)
if self.value:
try:
datetime.datetime.strptime(self.value, '%Y%m%d')
except ValueError:
raise ValidationError(_("Input date is not formatted as YYYYMMDD."))
def get_date(self):
if self.value:
return datetime.datetime.strptime(self.value, '%Y%m%d')
else:
return None
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
class Meta:
proxy = True
class RadEndDate(Radcheck):
objects = RadEndDateManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'User-End-Date'
self._meta.get_field('op').default = ':='
super(RadEndDate, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadEndDate, self).clean_fields(exclude)
if self.value:
try:
datetime.datetime.strptime(self.value, '%Y%m%d')
except ValueError:
raise ValidationError(_("Input date is not formatted as YYYYMMDD."))
def get_date(self):
if self.value:
return datetime.datetime.strptime(self.value, '%Y%m%d')
else:
return None
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
class Meta:
proxy = True
class RadUser(models.Model):
username = models.CharField(max_length=64, unique=True)
start_date = models.OneToOneField(RadStartDate)
end_date = models.OneToOneField(RadEndDate)
password = models.OneToOneField(RadPass, blank=True, null=True)
@property
def is_online(self):
return Radacct.objects.filter(
username=self.username,
acctstoptime=None).exists()
"""
def clean(self):
# username must be consistent
if self.start_date and self.username and self.start_date.username != self.username:
raise ValidationError({'start_date': _('Usernames do not match.')})
if self.end_date and self.username and self.end_date.username != self.username:
raise ValidationError({'end_date': _('Usernames do not match.')})
if self.password and self.username and self.password.username != self.username:
raise ValidationError({'password': _('Usernames do not match.')})
"""
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
def __str__(self):
return "<Raduser {}>".format(self.username)
|
mit
| 73,345,521,450,151,820
| 36.449153
| 144
| 0.611815
| false
| 4.083179
| false
| false
| false
|
YuxuanLing/trunk
|
trunk/code/study/python/Fluent-Python-example-code/21-class-metaprog/bulkfood/model_v8.py
|
1
|
2193
|
import abc
import collections
class AutoStorage:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
setattr(instance, self.storage_name, value)
class Validated(abc.ABC, AutoStorage):
def __set__(self, instance, value):
value = self.validate(instance, value)
super().__set__(instance, value)
@abc.abstractmethod
def validate(self, instance, value):
"""return validated value or raise ValueError"""
class Quantity(Validated):
"""a number greater than zero"""
def validate(self, instance, value):
if value <= 0:
raise ValueError('value must be > 0')
return value
class NonBlank(Validated):
"""a string with at least one non-space character"""
def validate(self, instance, value):
value = value.strip()
if len(value) == 0:
raise ValueError('value cannot be empty or blank')
return value
# BEGIN MODEL_V8
class EntityMeta(type):
"""Metaclass for business entities with validated fields"""
@classmethod
def __prepare__(cls, name, bases):
return collections.OrderedDict() # <1>
def __init__(cls, name, bases, attr_dict):
super().__init__(name, bases, attr_dict)
cls._field_names = [] # <2>
for key, attr in attr_dict.items(): # <3>
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
cls._field_names.append(key) # <4>
class Entity(metaclass=EntityMeta):
"""Business entity with validated fields"""
@classmethod
def field_names(cls): # <5>
for name in cls._field_names:
yield name
# END MODEL_V8
|
gpl-3.0
| 8,952,538,520,680,736,000
| 25.4125
| 67
| 0.559052
| false
| 4.185115
| false
| false
| false
|
diogo149/dooML
|
loss.py
|
1
|
2707
|
"""Table of Contents
-modified_huber
-hinge
-squared_hinge
-log
-squared
-huber
-epsilon_insensitive
-squared_epislon_insensitive
-alpha_huber
-absolute
"""
import numpy as np
def modified_huber(p, y):
"""Modified Huber loss for binary classification with y in {-1, 1}; equivalent to quadratically smoothed SVM with gamma = 2
"""
z = p * y
loss = -4.0 * z
idx = z >= -1.0
loss[idx] = (z[idx] - 1.0) ** 2
loss[z >= 1.0] = 0.0
return loss
def hinge(p, y, threshold=1.0):
"""Hinge loss for binary classification tasks with y in {-1,1}
Parameters
----------
threshold : float > 0.0
Margin threshold. When threshold=1.0, one gets the loss used by SVM.
When threshold=0.0, one gets the loss used by the Perceptron.
"""
z = p * y
loss = threshold - z
loss[loss < 0] = 0.0
return loss
def squared_hinge(p, y, threshold=1.0):
"""Squared Hinge loss for binary classification tasks with y in {-1,1}
Parameters
----------
threshold : float > 0.0
Margin threshold. When threshold=1.0, one gets the loss used by
(quadratically penalized) SVM.
"""
return hinge(p, y, threshold) ** 2
def log(p, y):
"""Logistic regression loss for binary classification with y in {-1, 1}"""
z = p * y
return np.log(1.0 + np.exp(-z))
def squared(p, y):
"""Squared loss traditional used in linear regression."""
return 0.5 * (p - y) ** 2
def huber(p, y, epsilon=0.1):
"""Huber regression loss
Variant of the SquaredLoss that is robust to outliers (quadratic near zero,
linear in for large errors).
http://en.wikipedia.org/wiki/Huber_Loss_Function
"""
abs_r = np.abs(p - y)
loss = 0.5 * abs_r ** 2
idx = abs_r <= epsilon
loss[idx] = epsilon * abs_r[idx] - 0.5 * epsilon ** 2
return loss
def epsilon_insensitive(p, y, epsilon=0.1):
"""Epsilon-Insensitive loss (used by SVR).
loss = max(0, |y - p| - epsilon)
"""
loss = np.abs(y - p) - epsilon
loss[loss < 0.0] = 0.0
return loss
def squared_epislon_insensitive(p, y, epsilon=0.1):
"""Epsilon-Insensitive loss.
loss = max(0, |y - p| - epsilon)^2
"""
return epsilon_insensitive(p, y, epsilon) ** 2
def alpha_huber(p, y, alpha=0.9):
""" sets the epislon in huber loss equal to a percentile of the residuals
"""
abs_r = np.abs(p - y)
loss = 0.5 * abs_r ** 2
epsilon = np.percentile(loss, alpha * 100)
idx = abs_r <= epsilon
loss[idx] = epsilon * abs_r[idx] - 0.5 * epsilon ** 2
return loss
def absolute(p, y):
""" absolute value of loss
"""
return np.abs(p - y)
|
gpl-3.0
| 2,330,747,692,834,036,700
| 22.53913
| 127
| 0.586997
| false
| 3.162383
| false
| false
| false
|
DiamondLightSource/diffcalc
|
diffcalc/hkl/vlieg/calc.py
|
1
|
32658
|
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
from math import pi, asin, acos, sin, cos, sqrt, atan2, fabs, atan
from diffcalc import settings
try:
from numpy import matrix
from numpy.linalg import norm
except ImportError:
from numjy import matrix
from numjy.linalg import norm
from diffcalc.hkl.calcbase import HklCalculatorBase
from diffcalc.hkl.vlieg.transform import TransformCInRadians
from diffcalc.util import dot3, cross3, bound, differ
from diffcalc.hkl.vlieg.geometry import createVliegMatrices, \
createVliegsPsiTransformationMatrix, \
createVliegsSurfaceTransformationMatrices, calcPHI
from diffcalc.hkl.vlieg.geometry import VliegPosition
from diffcalc.hkl.vlieg.constraints import VliegParameterManager
from diffcalc.hkl.vlieg.constraints import ModeSelector
from diffcalc.ub.calc import PaperSpecificUbCalcStrategy
TORAD = pi / 180
TODEG = 180 / pi
transformC = TransformCInRadians()
PREFER_POSITIVE_CHI_SOLUTIONS = True
I = matrix('1 0 0; 0 1 0; 0 0 1')
y = matrix('0; 1; 0')
def check(condition, ErrorOrStringOrCallable, *args):
"""
fail = check(condition, ErrorOrString) -- if condition is false raises the
Exception passed in, or creates one from a string. If a callable function
is passed in this is called with any args specified and the thing returns
false.
"""
# TODO: Remove (really nasty) check function
if condition == False:
if callable(ErrorOrStringOrCallable):
ErrorOrStringOrCallable(*args)
return False
elif isinstance(ErrorOrStringOrCallable, str):
raise Exception(ErrorOrStringOrCallable)
else: # assume input is an exception
raise ErrorOrStringOrCallable
return True
def sign(x):
if x < 0:
return -1
else:
return 1
def vliegAnglesToHkl(pos, wavelength, UBMatrix):
"""
Returns hkl indices from pos object in radians.
"""
wavevector = 2 * pi / wavelength
# Create transformation matrices
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
# Create the plane normal vector in the alpha axis coordinate frame
qa = ((DELTA * GAMMA) - ALPHA.I) * matrix([[0], [wavevector], [0]])
# Transform the plane normal vector from the alpha frame to reciprical
# lattice frame.
hkl = UBMatrix.I * PHI.I * CHI.I * OMEGA.I * qa
return hkl[0, 0], hkl[1, 0], hkl[2, 0]
class VliegUbCalcStrategy(PaperSpecificUbCalcStrategy):
def calculate_q_phi(self, pos):
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
u1a = (DELTA * GAMMA - ALPHA.I) * y
u1p = PHI.I * CHI.I * OMEGA.I * u1a
return u1p
class VliegHklCalculator(HklCalculatorBase):
def __init__(self, ubcalc,
raiseExceptionsIfAnglesDoNotMapBackToHkl=True):
r = raiseExceptionsIfAnglesDoNotMapBackToHkl
HklCalculatorBase.__init__(self, ubcalc,
raiseExceptionsIfAnglesDoNotMapBackToHkl=r)
self._gammaParameterName = ({'arm': 'gamma', 'base': 'oopgamma'}
[settings.geometry.gamma_location])
self.mode_selector = ModeSelector(settings.geometry, None,
self._gammaParameterName)
self.parameter_manager = VliegParameterManager(
settings.geometry, settings.hardware, self.mode_selector,
self._gammaParameterName)
self.mode_selector.setParameterManager(self.parameter_manager)
def __str__(self):
# should list paramemeters and indicate which are used in selected mode
result = "Available mode_selector:\n"
result += self.mode_selector.reportAvailableModes()
result += '\nCurrent mode:\n'
result += self.mode_selector.reportCurrentMode()
result += '\n\nParameters:\n'
result += self.parameter_manager.reportAllParameters()
return result
def _anglesToHkl(self, pos, wavelength):
"""
Return hkl tuple from VliegPosition in radians and wavelength in
Angstroms.
"""
return vliegAnglesToHkl(pos, wavelength, self._getUBMatrix())
def _anglesToVirtualAngles(self, pos, wavelength):
"""
Return dictionary of all virtual angles in radians from VliegPosition
object win radians and wavelength in Angstroms. The virtual angles are:
Bin, Bout, azimuth and 2theta.
"""
# Create transformation matrices
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
S = TAU * SIGMA
y_vector = matrix([[0], [1], [0]])
# Calculate Bin from equation 15:
surfacenormal_alpha = OMEGA * CHI * PHI * S * matrix([[0], [0], [1]])
incoming_alpha = ALPHA.I * y_vector
minusSinBetaIn = dot3(surfacenormal_alpha, incoming_alpha)
Bin = asin(bound(-minusSinBetaIn))
# Calculate Bout from equation 16:
# surfacenormal_alpha has just ben calculated
outgoing_alpha = DELTA * GAMMA * y_vector
sinBetaOut = dot3(surfacenormal_alpha, outgoing_alpha)
Bout = asin(bound(sinBetaOut))
# Calculate 2theta from equation 25:
cosTwoTheta = dot3(ALPHA * DELTA * GAMMA * y_vector, y_vector)
twotheta = acos(bound(cosTwoTheta))
psi = self._anglesToPsi(pos, wavelength)
return {'Bin': Bin, 'Bout': Bout, 'azimuth': psi, '2theta': twotheta}
def _hklToAngles(self, h, k, l, wavelength):
"""
Return VliegPosition and virtual angles in radians from h, k & l and
wavelength in Angstroms. The virtual angles are those fixed or
generated while calculating the position: Bin, Bout and 2theta; and
azimuth in four and five circle modes.
"""
if self._getMode().group in ("fourc", "fivecFixedGamma",
"fivecFixedAlpha"):
return self._hklToAnglesFourAndFiveCirclesModes(h, k, l,
wavelength)
elif self._getMode().group == "zaxis":
return self._hklToAnglesZaxisModes(h, k, l, wavelength)
else:
raise RuntimeError(
'The current mode (%s) has an unrecognised group: %s.'
% (self._getMode().name, self._getMode().group))
def _hklToAnglesFourAndFiveCirclesModes(self, h, k, l, wavelength):
"""
Return VliegPosition and virtual angles in radians from h, k & l and
wavelength in Angstrom for four and five circle modes. The virtual
angles are those fixed or generated while calculating the position:
Bin, Bout, 2theta and azimuth.
"""
# Results in radians during calculations, returned in degreess
pos = VliegPosition(None, None, None, None, None, None)
# Normalise hkl
wavevector = 2 * pi / wavelength
hklNorm = matrix([[h], [k], [l]]) / wavevector
# Compute hkl in phi axis coordinate frame
hklPhiNorm = self._getUBMatrix() * hklNorm
# Determine Bin and Bout
if self._getMode().name == '4cPhi':
Bin = Bout = None
else:
Bin, Bout = self._determineBinAndBoutInFourAndFiveCirclesModes(
hklNorm)
# Determine alpha and gamma
if self._getMode().group == 'fourc':
pos.alpha, pos.gamma = \
self._determineAlphaAndGammaForFourCircleModes(hklPhiNorm)
else:
pos.alpha, pos.gamma = \
self._determineAlphaAndGammaForFiveCircleModes(Bin, hklPhiNorm)
if pos.alpha < -pi:
pos.alpha += 2 * pi
if pos.alpha > pi:
pos.alpha -= 2 * pi
# Determine delta
(pos.delta, twotheta) = self._determineDelta(hklPhiNorm, pos.alpha,
pos.gamma)
# Determine omega, chi & phi
pos.omega, pos.chi, pos.phi, psi = \
self._determineSampleAnglesInFourAndFiveCircleModes(
hklPhiNorm, pos.alpha, pos.delta, pos.gamma, Bin)
# (psi will be None in fixed phi mode)
# Ensure that by default omega is between -90 and 90, by possibly
# transforming the sample angles
if self._getMode().name != '4cPhi': # not in fixed-phi mode
if pos.omega < -pi / 2 or pos.omega > pi / 2:
pos = transformC.transform(pos)
# Gather up the virtual angles calculated along the way...
# -pi<psi<=pi
if psi is not None:
if psi > pi:
psi -= 2 * pi
if psi < (-1 * pi):
psi += 2 * pi
v = {'2theta': twotheta, 'Bin': Bin, 'Bout': Bout, 'azimuth': psi}
return pos, v
def _hklToAnglesZaxisModes(self, h, k, l, wavelength):
"""
Return VliegPosition and virtual angles in radians from h, k & l and
wavelength in Angstroms for z-axis modes. The virtual angles are those
fixed or generated while calculating the position: Bin, Bout, and
2theta.
"""
# Section 6:
# Results in radians during calculations, returned in degreess
pos = VliegPosition(None, None, None, None, None, None)
# Normalise hkl
wavevector = 2 * pi / wavelength
hkl = matrix([[h], [k], [l]])
hklNorm = hkl * (1.0 / wavevector)
# Compute hkl in phi axis coordinate frame
hklPhi = self._getUBMatrix() * hkl
hklPhiNorm = self._getUBMatrix() * hklNorm
# Determine Chi and Phi (Equation 29):
pos.phi = -self._getTau() * TORAD
pos.chi = -self._getSigma() * TORAD
# Equation 30:
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
None, None, None, None, pos.chi, pos.phi)
del ALPHA, DELTA, GAMMA, OMEGA
Hw = CHI * PHI * hklPhi
# Determine Bin and Bout:
(Bin, Bout) = self._determineBinAndBoutInZaxisModes(
Hw[2, 0] / wavevector)
# Determine Alpha and Gamma (Equation 32):
pos.alpha = Bin
pos.gamma = Bout
# Determine Delta:
(pos.delta, twotheta) = self._determineDelta(hklPhiNorm, pos.alpha,
pos.gamma)
# Determine Omega:
delta = pos.delta
gamma = pos.gamma
d1 = (Hw[1, 0] * sin(delta) * cos(gamma) - Hw[0, 0] *
(cos(delta) * cos(gamma) - cos(pos.alpha)))
d2 = (Hw[0, 0] * sin(delta) * cos(gamma) + Hw[1, 0] *
(cos(delta) * cos(gamma) - cos(pos.alpha)))
if fabs(d2) < 1e-30:
pos.omega = sign(d1) * sign(d2) * pi / 2.0
else:
pos.omega = atan2(d1, d2)
# Gather up the virtual angles calculated along the way
return pos, {'2theta': twotheta, 'Bin': Bin, 'Bout': Bout}
###
def _determineBinAndBoutInFourAndFiveCirclesModes(self, hklNorm):
"""(Bin, Bout) = _determineBinAndBoutInFourAndFiveCirclesModes()"""
BinModes = ('4cBin', '5cgBin', '5caBin')
BoutModes = ('4cBout', '5cgBout', '5caBout')
BeqModes = ('4cBeq', '5cgBeq', '5caBeq')
azimuthModes = ('4cAzimuth')
fixedBusingAndLeviWmodes = ('4cFixedw')
# Calculate RHS of equation 20
# RHS (1/K)(S^-1*U*B*H)_3 where H/K = hklNorm
UB = self._getUBMatrix()
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
#S = SIGMA * TAU
S = TAU * SIGMA
RHS = (S.I * UB * hklNorm)[2, 0]
if self._getMode().name in BinModes:
Bin = self._getParameter('betain')
check(Bin != None, "The parameter betain must be set for mode %s" %
self._getMode().name)
Bin = Bin * TORAD
sinBout = RHS - sin(Bin)
check(fabs(sinBout) <= 1, "Could not compute Bout")
Bout = asin(sinBout)
elif self._getMode().name in BoutModes:
Bout = self._getParameter('betaout')
check(Bout != None, "The parameter Bout must be set for mode %s" %
self._getMode().name)
Bout = Bout * TORAD
sinBin = RHS - sin(Bout)
check(fabs(sinBin) <= 1, "Could not compute Bin")
Bin = asin(sinBin)
elif self._getMode().name in BeqModes:
sinBeq = RHS / 2
check(fabs(sinBeq) <= 1, "Could not compute Bin=Bout")
Bin = Bout = asin(sinBeq)
elif self._getMode().name in azimuthModes:
azimuth = self._getParameter('azimuth')
check(azimuth != None, "The parameter azimuth must be set for "
"mode %s" % self._getMode().name)
del azimuth
# TODO: codeit
raise NotImplementedError()
elif self._getMode().name in fixedBusingAndLeviWmodes:
bandlomega = self._getParameter('blw')
check(bandlomega != None, "The parameter abandlomega must be set "
"for mode %s" % self._getMode().name)
del bandlomega
# TODO: codeit
raise NotImplementedError()
else:
raise RuntimeError("AngleCalculator does not know how to handle "
"mode %s" % self._getMode().name)
return (Bin, Bout)
def _determineBinAndBoutInZaxisModes(self, Hw3OverK):
"""(Bin, Bout) = _determineBinAndBoutInZaxisModes(HwOverK)"""
BinModes = ('6czBin')
BoutModes = ('6czBout')
BeqModes = ('6czBeq')
if self._getMode().name in BinModes:
Bin = self._getParameter('betain')
check(Bin != None, "The parameter betain must be set for mode %s" %
self._getMode().name)
Bin = Bin * TORAD
# Equation 32a:
Bout = asin(Hw3OverK - sin(Bin))
elif self._getMode().name in BoutModes:
Bout = self._getParameter('betaout')
check(Bout != None, "The parameter Bout must be set for mode %s" %
self._getMode().name)
Bout = Bout * TORAD
# Equation 32b:
Bin = asin(Hw3OverK - sin(Bout))
elif self._getMode().name in BeqModes:
# Equation 32c:
Bin = Bout = asin(Hw3OverK / 2)
return (Bin, Bout)
###
def _determineAlphaAndGammaForFourCircleModes(self, hklPhiNorm):
if self._getMode().group == 'fourc':
alpha = self._getParameter('alpha') * TORAD
gamma = self._getParameter(self._getGammaParameterName()) * TORAD
check(alpha != None, "alpha parameter must be set in fourc modes")
check(gamma != None, "gamma parameter must be set in fourc modes")
return alpha, gamma
else:
raise RuntimeError(
"determineAlphaAndGammaForFourCirclesModes() "
"is not appropriate for %s modes" % self._getMode().group)
def _determineAlphaAndGammaForFiveCircleModes(self, Bin, hklPhiNorm):
## Solve equation 34 for one possible Y, Yo
# Calculate surface normal in phi frame
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
S = TAU * SIGMA
surfaceNormalPhi = S * matrix([[0], [0], [1]])
# Compute beta in vector
BetaVector = matrix([[0], [-sin(Bin)], [cos(Bin)]])
# Find Yo
Yo = self._findMatrixToTransformAIntoB(surfaceNormalPhi, BetaVector)
## Calculate Hv from equation 39
Z = matrix([[1, 0, 0],
[0, cos(Bin), sin(Bin)],
[0, -sin(Bin), cos(Bin)]])
Hv = Z * Yo * hklPhiNorm
# Fixed gamma:
if self._getMode().group == 'fivecFixedGamma':
gamma = self._getParameter(self._getGammaParameterName())
check(gamma != None,
"gamma parameter must be set in fivecFixedGamma modes")
gamma = gamma * TORAD
H2 = (hklPhiNorm[0, 0] ** 2 + hklPhiNorm[1, 0] ** 2 +
hklPhiNorm[2, 0] ** 2)
a = -(0.5 * H2 * sin(Bin) - Hv[2, 0])
b = -(1.0 - 0.5 * H2) * cos(Bin)
c = cos(Bin) * sin(gamma)
check((b * b + a * a - c * c) >= 0, 'Could not solve for alpha')
alpha = 2 * atan2(-(b + sqrt(b * b + a * a - c * c)), -(a + c))
# Fixed Alpha:
elif self._getMode().group == 'fivecFixedAlpha':
alpha = self._getParameter('alpha')
check(alpha != None,
"alpha parameter must be set in fivecFixedAlpha modes")
alpha = alpha * TORAD
H2 = (hklPhiNorm[0, 0] ** 2 + hklPhiNorm[1, 0] ** 2 +
hklPhiNorm[2, 0] ** 2)
t0 = ((2 * cos(alpha) * Hv[2, 0] - sin(Bin) * cos(alpha) * H2 +
cos(Bin) * sin(alpha) * H2 - 2 * cos(Bin) * sin(alpha)) /
(cos(Bin) * 2.0))
check(abs(t0) <= 1, "Cannot compute gamma: sin(gamma)>1")
gamma = asin(t0)
else:
raise RuntimeError(
"determineAlphaAndGammaInFiveCirclesModes() is not "
"appropriate for %s modes" % self._getMode().group)
return (alpha, gamma)
###
def _determineDelta(self, hklPhiNorm, alpha, gamma):
"""
(delta, twotheta) = _determineDelta(hklPhiNorm, alpha, gamma) --
computes delta for all modes. Also returns twotheta for sanity
checking. hklPhiNorm is a 3X1 matrix.
alpha, gamma & delta - in radians.
h k & l normalised to wavevector and in phi axis coordinates
"""
h = hklPhiNorm[0, 0]
k = hklPhiNorm[1, 0]
l = hklPhiNorm[2, 0]
# See Vlieg section 5 (with K=1)
cosdelta = ((1 + sin(gamma) * sin(alpha) - (h * h + k * k + l * l) / 2)
/ (cos(gamma) * cos(alpha)))
costwotheta = (cos(alpha) * cos(gamma) * bound(cosdelta) -
sin(alpha) * sin(gamma))
return (acos(bound(cosdelta)), acos(bound(costwotheta)))
def _determineSampleAnglesInFourAndFiveCircleModes(self, hklPhiNorm, alpha,
delta, gamma, Bin):
"""
(omega, chi, phi, psi)=determineNonZAxisSampleAngles(hklPhiNorm, alpha,
delta, gamma, sigma, tau) where hkl has been normalised by the
wavevector and is in the phi Axis coordinate frame. All angles in
radians. hklPhiNorm is a 3X1 matrix
"""
def equation49through59(psi):
# equation 49 R = (D^-1)*PI*D*Ro
PSI = createVliegsPsiTransformationMatrix(psi)
R = D.I * PSI * D * Ro
# eq 57: extract omega from R
if abs(R[0, 2]) < 1e-20:
omega = -sign(R[1, 2]) * sign(R[0, 2]) * pi / 2
else:
omega = -atan2(R[1, 2], R[0, 2])
# eq 58: extract chi from R
sinchi = sqrt(pow(R[0, 2], 2) + pow(R[1, 2], 2))
sinchi = bound(sinchi)
check(abs(sinchi) <= 1, 'could not compute chi')
# (there are two roots to this equation, but only the first is also
# a solution to R33=cos(chi))
chi = asin(sinchi)
# eq 59: extract phi from R
if abs(R[2, 0]) < 1e-20:
phi = sign(R[2, 1]) * sign(R[2, 1]) * pi / 2
else:
phi = atan2(-R[2, 1], -R[2, 0])
return omega, chi, phi
def checkSolution(omega, chi, phi):
_, _, _, OMEGA, CHI, PHI = createVliegMatrices(
None, None, None, omega, chi, phi)
R = OMEGA * CHI * PHI
RtimesH_phi = R * H_phi
print ("R*H_phi=%s, Q_alpha=%s" %
(R * H_phi.tolist(), Q_alpha.tolist()))
return not differ(RtimesH_phi, Q_alpha, .0001)
# Using Vlieg section 7.2
# Needed througout:
[ALPHA, DELTA, GAMMA, _, _, _] = createVliegMatrices(
alpha, delta, gamma, None, None, None)
## Find Ro, one possible solution to equation 46: R*H_phi=Q_alpha
# Normalise hklPhiNorm (As it is currently normalised only to the
# wavevector)
normh = norm(hklPhiNorm)
check(normh >= 1e-10, "reciprical lattice vector too close to zero")
H_phi = hklPhiNorm * (1 / normh)
# Create Q_alpha from equation 47, (it comes normalised)
Q_alpha = ((DELTA * GAMMA) - ALPHA.I) * matrix([[0], [1], [0]])
Q_alpha = Q_alpha * (1 / norm(Q_alpha))
if self._getMode().name == '4cPhi':
### Use the fixed value of phi as the final constraint ###
phi = self._getParameter('phi') * TORAD
PHI = calcPHI(phi)
H_chi = PHI * H_phi
omega, chi = _findOmegaAndChiToRotateHchiIntoQalpha(H_chi, Q_alpha)
return (omega, chi, phi, None) # psi = None as not calculated
else:
### Use Bin as the final constraint ###
# Find a solution Ro to Ro*H_phi=Q_alpha
Ro = self._findMatrixToTransformAIntoB(H_phi, Q_alpha)
## equation 50: Find a solution D to D*Q=norm(Q)*[[1],[0],[0]])
D = self._findMatrixToTransformAIntoB(
Q_alpha, matrix([[1], [0], [0]]))
## Find psi and create PSI
# eq 54: compute u=D*Ro*S*[[0],[0],[1]], the surface normal in
# psi frame
[SIGMA, TAU] = createVliegsSurfaceTransformationMatrices(
self._getSigma() * TORAD, self._getTau() * TORAD)
S = TAU * SIGMA
[u1], [u2], [u3] = (D * Ro * S * matrix([[0], [0], [1]])).tolist()
# TODO: If u points along 100, then any psi is a solution. Choose 0
if not differ([u1, u2, u3], [1, 0, 0], 1e-9):
psi = 0
omega, chi, phi = equation49through59(psi)
else:
# equation 53: V=A*(D^-1)
V = ALPHA * D.I
v21 = V[1, 0]
v22 = V[1, 1]
v23 = V[1, 2]
# equation 55
a = v22 * u2 + v23 * u3
b = v22 * u3 - v23 * u2
c = -sin(Bin) - v21 * u1 # TODO: changed sign from paper
# equation 44
# Try first root:
def myatan2(y, x):
if abs(x) < 1e-20 and abs(y) < 1e-20:
return pi / 2
else:
return atan2(y, x)
psi = 2 * myatan2(-(b - sqrt(b * b + a * a - c * c)), -(a + c))
#psi = -acos(c/sqrt(a*a+b*b))+atan2(b,a)# -2*pi
omega, chi, phi = equation49through59(psi)
# if u points along z axis, the psi could have been either 0 or 180
if (not differ([u1, u2, u3], [0, 0, 1], 1e-9) and
abs(psi - pi) < 1e-10):
# Choose 0 to match that read up by angles-to-virtual-angles
psi = 0.
# if u points a long
return (omega, chi, phi, psi)
def _anglesToPsi(self, pos, wavelength):
"""
pos assumed in radians. -180<= psi <= 180
"""
# Using Vlieg section 7.2
# Needed througout:
[ALPHA, DELTA, GAMMA, OMEGA, CHI, PHI] = createVliegMatrices(
pos.alpha, pos.delta, pos.gamma, pos.omega, pos.chi, pos.phi)
# Solve equation 49 for psi, the rotation of the a reference solution
# about Qalpha or H_phi##
# Find Ro, the reference solution to equation 46: R*H_phi=Q_alpha
# Create Q_alpha from equation 47, (it comes normalised)
Q_alpha = ((DELTA * GAMMA) - ALPHA.I) * matrix([[0], [1], [0]])
Q_alpha = Q_alpha * (1 / norm(Q_alpha))
# Finh H_phi
h, k, l = self._anglesToHkl(pos, wavelength)
H_phi = self._getUBMatrix() * matrix([[h], [k], [l]])
normh = norm(H_phi)
check(normh >= 1e-10, "reciprical lattice vector too close to zero")
H_phi = H_phi * (1 / normh)
# Find a solution Ro to Ro*H_phi=Q_alpha
# This the reference solution with zero azimuth (psi)
Ro = self._findMatrixToTransformAIntoB(H_phi, Q_alpha)
# equation 48:
R = OMEGA * CHI * PHI
## equation 50: Find a solution D to D*Q=norm(Q)*[[1],[0],[0]])
D = self._findMatrixToTransformAIntoB(Q_alpha, matrix([[1], [0], [0]]))
# solve equation 49 for psi
# D*R = PSI*D*Ro
# D*R*(D*Ro)^-1 = PSI
PSI = D * R * ((D * Ro).I)
# Find psi within PSI as defined in equation 51
PSI_23 = PSI[1, 2]
PSI_33 = PSI[2, 2]
psi = atan2(PSI_23, PSI_33)
#print "PSI: ", PSI.tolist()
return psi
def _findMatrixToTransformAIntoB(self, a, b):
"""
Finds a particular matrix Mo that transforms the unit vector a into the
unit vector b. Thats is it finds Mo Mo*a=b. a and b 3x1 matrixes and Mo
is a 3x3 matrix.
Throws an exception if this is not possible.
"""
# Maths from the appendix of "Angle caluculations
# for a 5-circle diffractometer used for surface X-ray diffraction",
# E. Vlieg, J.F. van der Veen, J.E. Macdonald and M. Miller, J. of
# Applied Cryst. 20 (1987) 330.
# - courtesy of Elias Vlieg again
# equation A2: compute angle xi between vectors a and b
cosxi = dot3(a, b)
try:
cosxi = bound(cosxi)
except ValueError:
raise Exception("Could not compute cos(xi), vectors a=%f and b=%f "
"must be of unit length" % (norm(a), norm(b)))
xi = acos(cosxi)
# Mo is identity matrix if xi zero (math below would blow up)
if abs(xi) < 1e-10:
return I
# equation A3: c=cross(a,b)/sin(xi)
c = cross3(a, b) * (1 / sin(xi))
# equation A4: find D matrix that transforms a into the frame
# x = a; y = c x a; z = c. */
a1 = a[0, 0]
a2 = a[1, 0]
a3 = a[2, 0]
c1 = c[0, 0]
c2 = c[1, 0]
c3 = c[2, 0]
D = matrix([[a1, a2, a3],
[c2 * a3 - c3 * a2, c3 * a1 - c1 * a3, c1 * a2 - c2 * a1],
[c1, c2, c3]])
# equation A5: create Xi to rotate by xi about z-axis
XI = matrix([[cos(xi), -sin(xi), 0],
[sin(xi), cos(xi), 0],
[0, 0, 1]])
# eq A6: compute Mo
return D.I * XI * D
def _findOmegaAndChiToRotateHchiIntoQalpha(h_chi, q_alpha):
"""
(omega, chi) = _findOmegaAndChiToRotateHchiIntoQalpha(H_chi, Q_alpha)
Solves for omega and chi in OMEGA*CHI*h_chi = q_alpha where h_chi and
q_alpha are 3x1 matrices with unit length. Omega and chi are returned in
radians.
Throws an exception if this is not possible.
"""
def solve(a, b, c):
"""
x1,x2 = solve(a , b, c)
solves for the two solutions to x in equations of the form
a*sin(x) + b*cos(x) = c
by using the trigonometric identity
a*sin(x) + b*cos(x) = a*sin(x)+b*cos(x)=sqrt(a**2+b**2)-sin(x+p)
where
p = atan(b/a) + {0 if a>=0
{pi if a<0
"""
if a == 0:
p = pi / 2 if b >= 0 else - pi / 2
else:
p = atan(b / a)
if a < 0:
p = p + pi
guts = c / sqrt(a ** 2 + b ** 2)
if guts < -1:
guts = -1
elif guts > 1:
guts = 1
left1 = asin(guts)
left2 = pi - left1
return (left1 - p, left2 - p)
def ne(a, b):
"""
shifts a and b in between -pi and pi and tests for near equality
"""
def shift(a):
if a > pi:
return a - 2 * pi
elif a <= -pi:
return a + 2 * pi
else:
return a
return abs(shift(a) - shift(b)) < .0000001
# 1. Compute some solutions
h_chi1 = h_chi[0, 0]
h_chi2 = h_chi[1, 0]
h_chi3 = h_chi[2, 0]
q_alpha1 = q_alpha[0, 0]
q_alpha2 = q_alpha[1, 0]
q_alpha3 = q_alpha[2, 0]
try:
# a) Solve for chi using Equation 3
chi1, chi2 = solve(-h_chi1, h_chi3, q_alpha3)
# b) Solve for omega Equation 1 and each chi
B = h_chi1 * cos(chi1) + h_chi3 * sin(chi1)
eq1omega11, eq1omega12 = solve(h_chi2, B, q_alpha1)
B = h_chi1 * cos(chi2) + h_chi3 * sin(chi2)
eq1omega21, eq1omega22 = solve(h_chi2, B, q_alpha1)
# c) Solve for omega Equation 2 and each chi
A = -h_chi1 * cos(chi1) - h_chi3 * sin(chi1)
eq2omega11, eq2omega12 = solve(A, h_chi2, q_alpha2)
A = -h_chi1 * cos(chi2) - h_chi3 * sin(chi2)
eq2omega21, eq2omega22 = solve(A, h_chi2, q_alpha2)
except ValueError, e:
raise ValueError(
str(e) + ":\nProblem in fixed-phi calculation for:\nh_chi: " +
str(h_chi.tolist()) + " q_alpha: " + str(q_alpha.tolist()))
# 2. Choose values of chi and omega that are solutions to equations 1 and 2
solutions = []
# a) Check the chi1 solutions
print "_findOmegaAndChiToRotateHchiIntoQalpha:"
if ne(eq1omega11, eq2omega11) or ne(eq1omega11, eq2omega12):
# print "1: eq1omega11, chi1 = ", eq1omega11, chi1
solutions.append((eq1omega11, chi1))
if ne(eq1omega12, eq2omega11) or ne(eq1omega12, eq2omega12):
# print "2: eq1omega12, chi1 = ", eq1omega12, chi1
solutions.append((eq1omega12, chi1))
# b) Check the chi2 solutions
if ne(eq1omega21, eq2omega21) or ne(eq1omega21, eq2omega22):
# print "3: eq1omega21, chi2 = ", eq1omega21, chi2
solutions.append((eq1omega21, chi2))
if ne(eq1omega22, eq2omega21) or ne(eq1omega22, eq2omega22):
# print "4: eq1omega22, chi2 = ", eq1omega22, chi2
solutions.append((eq1omega22, chi2))
# print solutions
# print "*"
if len(solutions) == 0:
e = "h_chi: " + str(h_chi.tolist())
e += " q_alpha: " + str(q_alpha.tolist())
e += ("\nchi1:%4f eq1omega11:%4f eq1omega12:%4f eq2omega11:%4f "
"eq2omega12:%4f" % (chi1 * TODEG, eq1omega11 * TODEG,
eq1omega12 * TODEG, eq2omega11 * TODEG, eq2omega12 * TODEG))
e += ("\nchi2:%4f eq1omega21:%4f eq1omega22:%4f eq2omega21:%4f "
"eq2omega22:%4f" % (chi2 * TODEG, eq1omega21 * TODEG,
eq1omega22 * TODEG, eq2omega21 * TODEG, eq2omega22 * TODEG))
raise Exception("Could not find simultaneous solution for this fixed "
"phi mode problem\n" + e)
if not PREFER_POSITIVE_CHI_SOLUTIONS:
return solutions[0]
positive_chi_solutions = [sol for sol in solutions if sol[1] > 0]
if len(positive_chi_solutions) == 0:
print "WARNING: A +ve chi solution was requested, but none were found."
print " Returning a -ve one. Try the mapper"
return solutions[0]
if len(positive_chi_solutions) > 1:
print ("INFO: Multiple +ve chi solutions were found [(omega, chi) ...]"
" = " + str(positive_chi_solutions))
print " Returning the first"
return positive_chi_solutions[0]
|
gpl-3.0
| -7,248,182,833,805,356,000
| 37.557261
| 79
| 0.552208
| false
| 3.42327
| false
| false
| false
|
vollov/py-parser
|
src/xmlp/coverage.py
|
1
|
2568
|
#!/usr/bin/python
from xml.dom.minidom import parse
import xml.dom.minidom, os
current_directory = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(current_directory, '../data')
file_path = os.path.join(data_directory, 'coverages.xml')
# Open XML document using minidom parser
DOMTree = xml.dom.minidom.parse(file_path)
collection = DOMTree.documentElement
items = collection.getElementsByTagName("GIOSXMLEntry")
for item in items:
type_code = item.getElementsByTagName('GIOSXMLCd')[0]
occ_ind = item.getElementsByTagName('OccasionalDriverInd')
if occ_ind:
description = item.getElementsByTagName('EnglishDesc')[0]
print "TypeCode: {0} OCC: {1} desc: {2}".format(type_code.childNodes[0].data, occ_ind[0].childNodes[0].data, description.childNodes[0].data)
#
# <collection shelf="New Arrivals">
# <movie title="Enemy Behind">
# <type>War, Thriller</type>
# <format>DVD</format>
# <year>2003</year>
# <rating>PG</rating>
# <stars>10</stars>
# <description>Talk about a US-Japan war</description>
# </movie>
# <movie title="Transformers">
# <type>Anime, Science Fiction</type>
# <format>DVD</format>
# <year>1989</year>
# <rating>R</rating>
# <stars>8</stars>
# <description>A schientific fiction</description>
# </movie>
# <movie title="Trigun">
# <type>Anime, Action</type>
# <format>DVD</format>
# <episodes>4</episodes>
# <rating>PG</rating>
# <stars>10</stars>
# <description>Vash the Stampede!</description>
# </movie>
# <movie title="Ishtar">
# <type>Comedy</type>
# <format>VHS</format>
# <rating>PG</rating>
# <stars>2</stars>
# <description>Viewable boredom</description>
# </movie>
# </collection>
# if collection.hasAttribute("shelf"):
# print "Root element : %s" % collection.getAttribute("shelf")
# Get all the movies in the collection
# movies = collection.getElementsByTagName("movie")
# Print detail of each movie.
# for movie in movies:
# print "*****Movie*****"
# if movie.hasAttribute("title"):
# print "Title: %s" % movie.getAttribute("title")
#
# type = movie.getElementsByTagName('type')[0]
# print "Type: %s" % type.childNodes[0].data
# format = movie.getElementsByTagName('format')[0]
# print "Format: %s" % format.childNodes[0].data
# rating = movie.getElementsByTagName('rating')[0]
# print "Rating: %s" % rating.childNodes[0].data
# description = movie.getElementsByTagName('description')[0]
# print "Description: %s" % description.childNodes[0].data
|
mit
| 2,440,359,789,388,073,000
| 30.716049
| 151
| 0.666667
| false
| 3.053508
| false
| false
| false
|
endrjuskr/studies
|
MRJP/LatteCompilerPython/src/lattepar.py
|
1
|
10557
|
__author__ = 'Andrzej Skrodzki - as292510'
from .LatteParsers.LatteTypes import *
from .LatteParsers.LatteExpressions import *
from .LatteParsers.LatteParameters import *
from .LatteParsers.LatteStatements import *
from .LatteParsers.LatteTopDefinitions import *
from .LatteExceptions import *
import ply.yacc as yacc
from tokrules import tokens
exception_list = []
precedence = (
('nonassoc', 'GE', 'GT', 'LE', 'LT', 'NE'),
('right', 'AND', 'OR'),
('nonassoc', 'EQ'),
('left', 'PLUS', 'MINUS'),
('left', 'PLUSPLUS', 'MINUSMINUS'),
('right', 'UNOT', 'UMINUS'),
)
# Program definition
def p_program(p):
'program : listtopdef'
p[0] = Program(p[1])
# List definitions
def p_list_expr(p):
'''listexpr :
| expr
| listexpr COMMA expr'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last expression
p[0] = [p[1]]
else:
# list of expressions
p[0] = p[1]
p[0].append(p[3])
def p_list_topdef(p):
'''listtopdef :
| topdef
| listtopdef topdef'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last function definition
p[0] = [p[1]]
else:
# list of function definitions
p[0] = p[1]
p[0].append(p[2])
def p_list_stmt(p):
'''liststmt : stmt
| liststmt stmt'''
if len(p) == 2:
# last statement
p[0] = [p[1]]
else:
# list of statements
p[0] = p[1]
p[0].append(p[2])
def p_list_fields(p):
'''listfields : field
| listfields field'''
if len(p) == 2:
# last statement
p[0] = [p[1]]
else:
# list of statements
p[0] = p[1]
p[0].append(p[2])
def p_list_item(p):
'''listitem : item
| listitem COMMA item'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last item
p[0] = [p[1]]
else:
# list of items
p[0] = p[1]
p[0].append(p[3])
def p_list_arg(p):
'''listarg :
| arg
| listarg COMMA arg'''
if len(p) == 1:
# empty list
p[0] = []
elif len(p) == 2:
# last argument
p[0] = [p[1]]
else:
#list of arguments
p[0] = p[1]
p[0].append(p[3])
# Item productions
def p_item_noinit(p):
'item : ID'
p[0] = NoInitItem(p[1], p.lineno(1), p.lexpos(1))
def p_item_init(p):
'item : ID EQUALS expr'
p[0] = InitItem(p[1], p[3], p.lineno(1), p.lexpos(1))
# Argument definition
def p_arg(p):
'arg : type ID'
p[0] = Arg(p[1], p[2], p.lineno(2), p.lexpos(2))
def p_arg_o(p):
'arg : ID ID'
p[0] = Arg(Type(p[1]), p[2], p.lineno(2), p.lexpos(2))
def p_arg_oa(p):
'arg : ID LARRAY RARRAY ID'
p[0] = Arg(ArrayType(Type(p[1])), p[4], p.lineno(2), p.lexpos(2))
def p_field_s(p):
'field : type ID SEMI'
p[0] = Field(p[1], p[2], p.lineno(2), p.lexpos(2))
def p_field_o(p):
'field : ID ID SEMI'
p[0] = Field(Type(p[1]), p[2], p.lineno(2), p.lexpos(2))
def p_field_oa(p):
'field : ID LARRAY RARRAY ID SEMI'
p[0] = Field(ArrayType(Type(p[1])), p[4], p.lineno(2), p.lexpos(2))
def p_methoddef(p):
'field : type ID LPAREN listarg RPAREN block'
p[0] = FnDef(p[1], p[2], p[4], p[6], p.lineno(2))
def p_methoddef_o(p):
'field : ID ID LPAREN listarg RPAREN block'
p[0] = FnDef(Type(p[1]), p[2], p[4], p[6], p.lineno(2))
def p_methoddef_oa(p):
'field : ID LARRAY RARRAY ID LPAREN listarg RPAREN block'
p[0] = FnDef(ArrayType(Type(p[1])), p[4], p[6], p[8], p.lineno(2))
# Function definition
def p_class_extends(p):
'''ext :
| EXTENDS ID'''
if len(p) == 1:
p[0] = []
elif len(p) == 3:
p[0] = [p[2]]
def p_classdef(p):
'topdef : CLASS ID ext LBRACE listfields RBRACE'
p[0] = ClassDef(p[2], p[3], p[5], p.lineno(2))
def p_fndef(p):
'topdef : type ID LPAREN listarg RPAREN block'
p[0] = FnDef(p[1], p[2], p[4], p[6], p.lineno(2))
def p_fndef_o(p):
'topdef : ID ID LPAREN listarg RPAREN block'
p[0] = FnDef(Type(p[1]), p[2], p[4], p[6], p.lineno(2))
def p_fndef_oa(p):
'topdef : ID LARRAY RARRAY ID LPAREN listarg RPAREN block'
p[0] = FnDef(ArrayType(Type(p[1])), p[4], p[6], p[8], p.lineno(2))
def p_block(p):
'''block : LBRACE RBRACE
| LBRACE liststmt RBRACE'''
if len(p) == 3:
p[0] = Block([])
else:
p[0] = Block(p[2])
# Statement definitions
def p_statement_empty(p):
'stmt : SEMI'
p[0] = EmptyStmt(p.lineno(1), p.lexpos(1))
def p_statement_block(p):
'stmt : block'
p[0] = BStmt(p[1], p.lineno(1))
def p_statement_decl(p):
'stmt : type listitem SEMI'
p[0] = DeclStmt(p[1], p[2], p.lineno(3), p.lexpos(3))
def p_statement_decl_0(p):
'stmt : ID listitem SEMI'
p[0] = DeclStmt(Type(p[1]), p[2], p.lineno(1), p.lexpos(1))
def p_statement_decl_1(p):
'stmt : ID LARRAY RARRAY listitem SEMI'
p[0] = DeclStmt(ArrayType(Type(p[1])), p[4], p.lineno(1), p.lexpos(1))
def p_statement_var_ass(p):
'''stmt : expr6 EQUALS expr SEMI '''
p[0] = VarAssStmt(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_statement_incr(p):
'stmt : expr6 PLUSPLUS SEMI'
p[0] = IncrStmt(p[1], p.lineno(2), p.lexpos(2))
def p_statement_decr(p):
'stmt : expr6 MINUSMINUS SEMI'
p[0] = DecrStmt(p[1], p.lineno(2), p.lexpos(2))
def p_statement_ret(p):
'stmt : RETURN expr SEMI'
p[0] = RetStmt(p[2], p.lineno(1), p.lexpos(1))
def p_statement_vret(p):
'stmt : RETURN SEMI'
p[0] = VRetStmt(p.lineno(1), p.lexpos(1))
def p_statement_cond(p):
'stmt : IF LPAREN expr RPAREN stmt'
p[0] = CondStmt(p[3], p[5], p.lineno(1), p.lexpos(1))
def p_statement_condelse(p):
'stmt : IF LPAREN expr RPAREN stmt ELSE stmt'
p[0] = CondElseStmt(p[3], p[5], p[7], p.lineno(1), p.lexpos(1))
def p_statement_while(p):
'stmt : WHILE LPAREN expr RPAREN stmt'
p[0] = WhileStmt(p[3], p[5], p.lineno(1), p.lexpos(1))
def p_statement_sexp(p):
'stmt : expr SEMI'
p[0] = SExpStmt(p[1], p.lineno(2), p.lexpos(2))
def p_statement_for(p):
'stmt : FOR LPAREN type_s ID COL expr RPAREN stmt'
p[0] = ForStmt(p[4], p[3], p[6], p[8], p.lineno(1), p.lexpos(1))
def p_statement_for_2(p):
'stmt : FOR LPAREN ID ID COL expr RPAREN stmt'
p[0] = ForStmt(p[4], Type(p[3]), p[6], p[8], p.lineno(1), p.lexpos(1))
# Expression definitions
def p_expression_array_init(p):
'expr6 : NEW type_s LARRAY expr RARRAY'
p[0] = EArrayInit(p[2], p[4], p.lineno(1), p.lexpos(1))
def p_expression_array_init_2(p):
'expr6 : NEW ID LARRAY expr RARRAY'
p[0] = EArrayInit(Type(p[2]), p[4], p.lineno(1), p.lexpos(1))
def p_expression_object_init(p):
'expr6 : NEW ID'
p[0] = EObjectInit(Type(p[2]), p.lineno(1), p.lexpos(1))
def p_expression_var(p):
'expr6 : ID'
p[0] = EVar(p[1], p.lineno(1), p.lexpos(1))
def p_expression_field(p):
'expr6 : expr6 DOT ID'
p[0] = EObjectField(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_field_a(p):
'expr6 : expr6 DOT ID LARRAY expr RARRAY'
p[0] = EObjectFieldApp(p[1], p[3], p[5], p.lineno(2), p.lexpos(2))
def p_expression_array(p):
'expr6 : ID LARRAY expr RARRAY'
p[0] = EArrayApp(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_int(p):
'expr6 : NUMBER'
p[0] = ELitInt(p[1], p.lineno(1), p.lexpos(1))
def p_expression_null(p):
'''expr6 : LPAREN ID RPAREN NULL '''
p[0] = ELitNull(p[2], p.lineno(1), p.lexpos(1))
def p_expression_boolean(p):
'''expr6 : TRUE
| FALSE'''
p[0] = ELitBoolean(p[1], p.lineno(1), p.lexpos(1))
def p_expression_app(p):
'expr6 : ID LPAREN listexpr RPAREN'
p[0] = EApp(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_method_app(p):
'expr6 : expr6 DOT ID LPAREN listexpr RPAREN'
p[0] = EMethodApp(p[1], p[3], p[5], p.lineno(2), p.lexpos(2))
def p_expression_group(p):
'expr6 : LPAREN expr RPAREN'
p[0] = p[2]
def p_expression_string(p):
'expr6 : SENTENCE'
p[0] = EString(p[1], p.lineno(1), p.lexpos(1))
def p_expression_neg(p):
'expr5 : MINUS expr6 %prec UMINUS'
p[0] = ENeg(p[2], p.lineno(1), p.lexpos(1))
def p_expression_not_1(p):
'''expr5 : expr6'''
p[0] = p[1]
def p_expression_not_2(p):
'''expr5 : NOT expr6 %prec UNOT'''
p[0] = ENot(p[2], p.lineno(1), p.lexpos(1))
def p_expression_mul_1(p):
'''expr4 : expr5'''
p[0] = p[1]
def p_mulop(p):
'''mulop : TIMES
| DIVIDE
| MOD'''
p[0] = p[1]
def p_expression_mul_2(p):
'''expr4 : expr4 mulop expr5'''
p[0] = EMul(p[1], p[3], p[2], p[1].no_line, p[1].pos + 1)
def p_addop(p):
'''addop : PLUS
| MINUS'''
p[0] = p[1]
def p_expression_add_1(p):
'''expr3 : expr3 addop expr4'''
p[0] = EAdd(p[1], p[3], p[2], p[1].no_line, p[1].pos + 1)
def p_expression_add_3(p):
'''expr3 : expr4'''
p[0] = p[1]
def p_relop(p):
'''relop : LT
| LE
| GT
| GE
| EQ
| NE'''
p[0] = p[1]
def p_expression_rel_1(p):
'''expr2 : expr2 relop expr3'''
p[0] = ERel(p[1], p[3], p[2], p[1].no_line, p[1].pos + 1)
def p_expression_rel_2(p):
'''expr2 : expr3'''
p[0] = p[1]
def p_expression_and_1(p):
'''expr1 : expr2 AND expr1'''
p[0] = EAnd(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_and_2(p):
'''expr1 : expr2'''
p[0] = p[1]
def p_expression_or_1(p):
'''expr : expr1 OR expr'''
p[0] = EOr(p[1], p[3], p.lineno(2), p.lexpos(2))
def p_expression_or_2(p):
'''expr : expr1'''
p[0] = p[1]
# Type definition
def p_type_s(p):
'''type_s : INT
| STRING
| VOID
| BOOLEAN '''
p[0] = Type(p[1])
def p_type_1(p):
'''type : type_s'''
p[0] = p[1]
def p_type_a(p):
'''type : type_s LARRAY RARRAY'''
p[0] = ArrayType(p[1])
# Error definition
def p_error(p):
if p is None:
return
exception_list.append(SyntaxException("Wrong expression '" + str(p.value) + "'.", p.lineno, pos=p.lexpos))
tok = None
while 1:
tok = yacc.token()
if not tok:
break
if tok.type == 'SEMI':
tok = yacc.token()
yacc.errok()
return tok
def get_parser():
return yacc.yacc(write_tables=0, debug=0, outputdir="src")
|
apache-2.0
| -9,036,227,321,942,147,000
| 19.906931
| 110
| 0.531401
| false
| 2.497516
| false
| false
| false
|
ninjawil/weather-station
|
scripts/setup.py
|
1
|
4064
|
#-------------------------------------------------------------------------------
#
# The MIT License (MIT)
#
# Copyright (c) 2015 William De Freitas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#-------------------------------------------------------------------------------
#!/usr/bin/env python
'''Sets up the enviroment to run the weather station.
Begins by checking that an RRD file exists and that the data sources are
correct. If no RRD file found then create a new one.
Initates scripts via cronjobs.'''
#===============================================================================
# Import modules
#===============================================================================
# Standard Library
import os
import sys
import time
# Third party modules
# Application modules
import log
import settings as s
import rrd_tools
#===============================================================================
# MAIN
#===============================================================================
def main():
'''Entry point for script'''
script_name = os.path.basename(sys.argv[0])
#---------------------------------------------------------------------------
# Set up logger
#---------------------------------------------------------------------------
logger = log.setup('root', '{folder}/logs/{script}.log'.format(
folder= s.SYS_FOLDER,
script= script_name[:-3]))
logger.info('')
logger.info('--- Script {script} Started ---'.format(script= script_name))
#---------------------------------------------------------------------------
# SET UP RRD DATA AND TOOL
#---------------------------------------------------------------------------
rrd = rrd_tools.RrdFile('{fd1}{fd2}{fl}'.format(fd1= s.SYS_FOLDER,
fd2= s.DATA_FOLDER,
fl= s.RRDTOOL_RRD_FILE))
if not os.path.exists(s.RRDTOOL_RRD_FILE):
rrd.create_file(s.SENSOR_SET,
s.RRDTOOL_RRA,
s.UPDATE_RATE,
s.RRDTOOL_HEARTBEAT,
int(time.time() + s.UPDATE_RATE))
logger.info('RRD file not found. New file created')
elif sorted(rrd.ds_list()) != sorted(list(s.SENSOR_SET.keys())):
logger.error('Data sources in RRD file does not match set up.')
sys.exit()
else:
logger.info('RRD file found and checked OK')
#---------------------------------------------------------------------------
# SCRIPTS
#---------------------------------------------------------------------------
#Set up CRONTAB
#===============================================================================
# BOILER PLATE
#===============================================================================
if __name__=='__main__':
sys.exit(main())
|
mit
| -6,399,027,101,030,141,000
| 36.62963
| 80
| 0.450295
| false
| 5.271077
| false
| false
| false
|
ameya30/IMaX_pole_data_scripts
|
imax_lp_max_/imax_rotate_Q_U.py
|
1
|
3065
|
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.optimize import minimize_scalar
from astropy.io import fits
def Q_eq(phi):
# Set inputs for fnction
sumU = 0
for wv in range(0, 5):
uSing = uMes[wv]
qSing = qMes[wv]
uNew = -1 * qSing * np.sin(phi) + uSing * np.cos(phi)
sumU += np.square(uNew)
return sumU
plot_figs = 0
# Load in array
input_list = glob.glob('../Data/mean_rem_output_*.fits')
for i in range (0, len(input_list)):
fullArr = fits.open(input_list[i])
fullArr = fullArr[0].data
save_name = '../Data/imax_lp_max_' + input_list[i].split('_')[3]
save_angs = '../Data/imax_roat_angle_Q_U_' + input_list[i].split('_')[3]
print save_name
print save_angs
fullDim = np.shape(fullArr)
angArr = np.empty(shape = (fullDim[2], fullDim[3]))
uNew = np.empty(shape = (fullDim[1], fullDim[2], fullDim[3]))
qNew = np.empty(shape = (fullDim[1], fullDim[2], fullDim[3]))
for x in range (0, fullDim[3]):
for y in range (0, fullDim[2]):
qMes = fullArr[1, :, y, x]
uMes = fullArr[2, :, y, x]
res = minimize_scalar(Q_eq, bounds=(0, np.pi), method='bounded')
angle = res['x']
angArr[y, x] = angle
for wv in range (0, 5):
uNew[wv, y, x] = -1 * fullArr[1, wv, y, x] * np.sin(angle) + fullArr[2, wv, y, x] * np.cos(angle)
qNew[wv, y, x] = fullArr[1, wv, y, x] * np.cos(angle) + fullArr[2, wv, y, x] * np.sin(angle)
hdu_ang = fits.PrimaryHDU(angArr)
hdu_max = fits.PrimaryHDU(qNew)
hdu_ang.writeto(save_angs)
hdu_max.writeto(save_name)
if plot_figs == 1:
fig, axes = plt.subplots(ncols = 3,
nrows = 1,
figsize = (12, 18))
fig.subplots_adjust(left = 0.04,
right = 0.97,
top = 0.99,
bottom = 0.05,
wspace = 0.08,
hspace = 0.15)
imAng = axes[0].imshow(angArr[:, :], cmap = 'gray', clim = (-0.05,0.05))
minpl = axes[1].imshow(qNew[1, :, :], cmap = 'gray', clim = (-0.05,0.05))
maxpl = axes[2].imshow(uNew[1, :, :], cmap = 'gray', clim = (-0.05,0.05))
for ax in axes:
ax.invert_yaxis()
plt.savefig('../Figures/imax_angle_arr.png', dpi = 500)
fig, axes = plt.subplots(ncols = 2,
nrows = 2,
figsize = (12, 6))
axes[1,1].plot(uNew[:, 102, 636])
axes[0,0].plot(fullArr[1,:, 102, 636])
axes[0,1].plot(fullArr[2,:, 102, 636])
axes[1,0].plot(qNew[:, 102, 636])
axes[0,0].set_title('Mesured Q')
axes[0,1].set_title('Mesured U')
axes[1,0].set_title('new Q' )
axes[1,1].set_title('new U')
plt.savefig('../Figures/imax_roation_line_change.png', dpi = 500)
|
mit
| -1,030,299,242,345,325,800
| 29.346535
| 114
| 0.501142
| false
| 2.896975
| false
| false
| false
|
aiorchestra/aiorchestra-openstack-plugin
|
openstack_plugin/tasks/compute.py
|
1
|
6731
|
# Author: Denys Makogon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aiorchestra.core import utils
from openstack_plugin.common import clients
from openstack_plugin.compute import instances
@utils.operation
async def create(node, inputs):
node.context.logger.info('[{0}] - Attempting to create '
'compute instance.'.format(node.name))
nova = clients.openstack.nova(node)
glance = clients.openstack.glance(node)
host_cap = node.get_capability('host')
if not host_cap:
raise Exception('[{0}] - Unable to get host.flavor '
'capability.'.format(node.name))
image_artifact = node.get_artifact_by_name('image_ref')
if not image_artifact:
raise Exception('[{0}] - Unable to get image '
'node artifact.'.format(node.name))
file_injection_artifacts = setup_injections_from_artifacts(
node.get_artifact_from_type(
'tosca.artifacts.openstack.compute.injection_file'))
userdata = None
userdata_artifact = node.get_artifact_by_name('userdata')
if userdata_artifact:
script_path = userdata_artifact.get('script')
with open(script_path, 'r') as u_d:
userdata = u_d.read()
# in case if file injection was done using dedicated node
files = node.runtime_properties.get('injections', {})
files.update(file_injection_artifacts)
flavor = host_cap['flavor']
image = image_artifact['id']
(compute_name, compute_id, config_drive, ssh_key, nics) = (
node.properties['compute_name'],
node.properties.get('compute_id'),
node.properties.get('config_drive'),
node.runtime_properties.get(
'ssh_keypair', {'name': None})['name'],
node.runtime_properties.get('nics', [])
)
identifier = compute_name if not compute_id else compute_id
instance = await instances.create(
node.context, nova, glance,
identifier, flavor, image,
ssh_keyname=ssh_key, nics=nics,
config_drive=config_drive,
use_existing=True if compute_id else False,
files=files,
userdata=userdata,
)
networks = [port['net-id'] for port in nics]
node.batch_update_runtime_properties(**{
'compute_id': instance.id,
'server': instance.__dict__,
'status': instance.status,
'networks': networks,
'ports': nics,
})
def setup_injections_from_artifacts(injection_artifacts):
mapping = {}
for artifact in injection_artifacts:
source = artifact['source']
destination = artifact['destination']
with open(source, 'r') as s:
mapping[destination] = s.read()
return mapping
@utils.operation
async def setup_injection(node, inputs):
node.context.logger.info('[{0}] - Setting up file injection.'
.format(node.name))
local_file = node.properties['source']
remote_file_path = node.properties['destination']
with open(local_file, 'r') as injection:
local_file_content = injection.read()
node.update_runtime_properties(
'injection', {remote_file_path: local_file_content})
@utils.operation
async def inject_file(source, target, inputs):
source.context.logger.info('[{0} -----> {1}] - Injecting file to '
'compute instance.'
.format(target.name, source.name))
files = source.runtime_properties.get('injections', {})
files.update(target.runtime_properties['injection'])
source.update_runtime_properties('injections', files)
@utils.operation
async def eject_file(source, target, inputs):
source.context.logger.info('[{0} --X--> {1}] - Ejecting file from '
'compute instance.'
.format(target.name, source.name))
if 'injections' in source.runtime_properties:
del source.runtime_properties['injections']
@utils.operation
async def start(node, inputs):
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
nova = clients.openstack.nova(node)
use_existing = True if node.properties.get('compute_id') else False
name_or_id = node.runtime_properties['compute_id']
node.context.logger.info('[{0}] - Attempting to start '
'compute instance.'.format(node.name))
await instances.start(
node.context, nova, name_or_id,
use_existing=use_existing,
task_retries=task_retries,
task_retry_interval=task_retry_interval,
)
@utils.operation
async def delete(node, inputs):
node.context.logger.info('[{0}] - Attempting to delete compute '
'instance.'.format(node.name))
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
use_existing = True if node.properties.get('compute_id') else False
name_or_id = node.runtime_properties['compute_id']
nova = clients.openstack.nova(node)
await instances.delete(node.context, nova, name_or_id,
use_existing=use_existing,
task_retry_interval=task_retry_interval,
task_retries=task_retries)
for attr in ['id', 'server', 'status',
'networks', 'ports']:
if attr in node.runtime_properties:
del node.runtime_properties[attr]
@utils.operation
async def stop(node, inputs):
node.context.logger.info('[{0}] - Attempting to stop compute '
'instance.'.format(node.name))
task_retries = inputs.get('task_retries', 10)
task_retry_interval = inputs.get('task_retry_interval', 10)
nova = clients.openstack.nova(node)
use_existing = True if node.properties.get('compute_id') else False
name_or_id = node.runtime_properties['compute_id']
await instances.stop(node.context, nova, name_or_id,
use_existing=use_existing,
task_retries=task_retries,
task_retry_interval=task_retry_interval)
|
apache-2.0
| 5,980,241,100,788,522,000
| 36.814607
| 78
| 0.626504
| false
| 3.924781
| false
| false
| false
|
kancom/cca
|
grabber/views.py
|
1
|
1536
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, ButtonHolder, Submit
from crispy_forms.bootstrap import Field, InlineRadios, TabHolder, Tab
from django import forms
from . import models
from . import grabber
class NameForm(forms.ModelForm):
class Meta:
fields = ('url',)
model = models.Source_site
def __init__(self, *args, **kwargs):
super(NameForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'url',
ButtonHolder(
Submit('start', 'Start', css_class='btn-primary')
)
)
def index(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = NameForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
# redirect to a new URL:
url=form.cleaned_data['url']
grabber.grab(url)
return HttpResponseRedirect('/admin/')
# if a GET (or any other method) we'll create a blank form
else:
sources = models.Source_site.objects.get()
form = NameForm(instance=sources)
return render(request, 'index.html', {'form': form})
|
apache-2.0
| -3,808,581,804,322,514,000
| 32.391304
| 76
| 0.617839
| false
| 4.254848
| false
| false
| false
|
ldionne/nstl-lang
|
nstl/sema/scope.py
|
1
|
2717
|
"""Interface to store lexical scope related information
used during semantic analysis."""
import sys
from itertools import chain
from ..helpers import orderedset
class Scope(object):
"""Parent class for all scopes.
Implements generic lexical scoping operations.
_parent The parent scope of this scope, or None for outermost scope.
_entity The entity associated with this scope, or None. For example,
the entity of a namespace scope is the namespace itself.
_decls An ordered set keeping track of all declarations in this scope.
"""
def __init__(self, parent=None, entity=None):
if not (isinstance(parent, Scope) or parent is None): raise TypeError(
"invalid type {} for parent scope. must be Scope instance or None."
.format(type(parent)))
self._parent = parent
self._entity = entity
self._decls = orderedset.OrderedSet()
def __contains__(self, decl):
"""Return whether a declaration was declared in this scope."""
return decl in self._decls
def __iter__(self):
"""Iterate over all the declarations made in this scope.
Iteration is done over the declarations in the order they were added.
"""
for decl in self._decls:
yield decl
def add(self, decl):
"""Add a declaration in this scope."""
self._decls.add(decl)
def is_outermost(self):
"""Return whether this scope is an outermost scope."""
return self._parent is None
@property
def parent(self):
"""Return the direct parent of this scope, or None when outermost."""
return self._parent
def parents(self):
"""Iterate over the parents of this scope.
Iteration is done in lexical order, so innermost parents
are visited first.
"""
if self.is_outermost():
raise StopIteration
yield self._parent
for parent in self._parent.parents():
yield parent
def show(self, buf=sys.stdout, decls=False):
"""Write a formatted description of a scope and its parents to a buffer.
If decls is True, the declarations contained in each scope are shown.
"""
lead = ''
for scope in reversed(list(chain([self], self.parents()))):
buf.write(lead + "scope owned by {} :\n".format(self._entity))
if decls:
buf.write(lead + str(self._decls) + "\n")
lead = lead + ' ' * 2
if __name__ == "__main__":
pass
|
bsd-3-clause
| -5,247,343,988,452,406,000
| 31.73494
| 80
| 0.577475
| false
| 4.800353
| false
| false
| false
|
DOTOCA/plugin.video.netflixbmc
|
default.py
|
1
|
62469
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import re
import json
import time
import shutil
import threading
import subprocess
import xbmc
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
from resources.lib import chrome_cookies
trace_on = False
addon = xbmcaddon.Addon()
if addon.getSetting("sslEnable") == "true":
try:
# Add support for newer SSL connections in requests
# Ensure OpenSSL is installed with system package manager on linux
import resources
sys.path.append(os.path.dirname(resources.lib.__file__))
import resources.lib.pyOpenSSL
import OpenSSL
# https://urllib3.readthedocs.org/en/latest/security.html#openssl-pyopenssl
import requests.packages.urllib3.contrib.pyopenssl
requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
verify_ssl = True
except Exception as ex:
import traceback
print traceback.format_exc()
print "ERROR importing OpenSSL handler"
verify_ssl = False
import requests
import HTMLParser
import urllib
import socket
if addon.getSetting("sslEnable") == "false":
verify_ssl = False
print "SSL is Disabled"
#supress warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
try:
import cPickle as pickle
except ImportError:
import pickle
socket.setdefaulttimeout(40)
pluginhandle = int(sys.argv[1])
while (addon.getSetting("username") == "" or addon.getSetting("password") == ""):
addon.openSettings()
htmlParser = HTMLParser.HTMLParser()
addonID = addon.getAddonInfo('id')
osWin = xbmc.getCondVisibility('system.platform.windows')
osLinux = xbmc.getCondVisibility('system.platform.linux')
osOSX = xbmc.getCondVisibility('system.platform.osx')
addonDir = xbmc.translatePath(addon.getAddonInfo('path'))
defaultFanart = os.path.join(addonDir ,'fanart.png')
addonUserDataFolder = xbmc.translatePath("special://profile/addon_data/"+addonID)
icon = xbmc.translatePath('special://home/addons/'+addonID+'/icon.png')
utilityPath = xbmc.translatePath('special://home/addons/'+addonID+'/resources/NetfliXBMC_Utility.exe')
sendKeysPath = xbmc.translatePath('special://home/addons/'+addonID+'/resources/NetfliXBMC_SendKeys.exe')
fakeVidPath = xbmc.translatePath('special://home/addons/'+addonID+'/resources/fakeVid.mp4')
downloadScript = xbmc.translatePath('special://home/addons/'+addonID+'/download.py')
browserScript = xbmc.translatePath('special://home/addons/'+addonID+'/browser.sh')
searchHistoryFolder = os.path.join(addonUserDataFolder, "history")
cacheFolder = os.path.join(addonUserDataFolder, "cache")
cacheFolderCoversTMDB = os.path.join(cacheFolder, "covers")
cacheFolderFanartTMDB = os.path.join(cacheFolder, "fanart")
libraryFolder = xbmc.translatePath(addon.getSetting("libraryPath"))
libraryFolderMovies = os.path.join(libraryFolder, "Movies")
libraryFolderTV = os.path.join(libraryFolder, "TV")
cookieFile = xbmc.translatePath("special://profile/addon_data/"+addonID+"/cookies")
sessionFile = xbmc.translatePath("special://profile/addon_data/"+addonID+"/session")
chromeUserDataFolder = os.path.join(addonUserDataFolder, "chrome-user-data")
dontUseKiosk = addon.getSetting("dontUseKiosk") == "true"
browseTvShows = addon.getSetting("browseTvShows") == "true"
singleProfile = addon.getSetting("singleProfile") == "true"
isKidsProfile = addon.getSetting('isKidsProfile') == 'true'
showProfiles = addon.getSetting("showProfiles") == "true"
forceView = addon.getSetting("forceView") == "true"
useUtility = addon.getSetting("useUtility") == "true"
useChromeProfile = addon.getSetting("useChromeProfile") == "true"
remoteControl = addon.getSetting("remoteControl") == "true"
updateDB = addon.getSetting("updateDB") == "true"
useTMDb = addon.getSetting("useTMDb") == "true"
username = addon.getSetting("username")
password = addon.getSetting("password")
viewIdVideos = addon.getSetting("viewIdVideos")
viewIdEpisodes = addon.getSetting("viewIdEpisodesNew")
viewIdActivity = addon.getSetting("viewIdActivity")
winBrowser = int(addon.getSetting("winBrowserNew"))
language = addon.getSetting("language")
auth = addon.getSetting("auth")
authMyList = addon.getSetting("authMyList")
linuxUseShellScript = addon.getSetting("linuxUseShellScript") == "true"
debug = addon.getSetting("debug") == "true"
country = addon.getSetting("country")
if len(country)==0 and len(language.split("-"))>1:
country = language.split("-")[1]
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
import ssl
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
def init_poolmanager(self, connections, maxsize, block=False):
ssl_version = addon.getSetting("sslSetting")
ssl_version = None if ssl_version == 'Auto' else ssl_version
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl_version)
urlMain = "https://www.netflix.com"
session = None
def newSession():
s = requests.Session()
s.mount('https://', SSLAdapter())
s.headers.update({
'User-Agent': 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.6 Safari/537.36',
})
return s
session = newSession()
def unescape(s):
return htmlParser.unescape(s)
def load(url, post = None):
debug("URL: " + url)
r = ""
try:
if post:
r = session.post(url, data=post, verify=verify_ssl).text
else:
r = session.get(url, verify=verify_ssl).text
except AttributeError:
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC Error: Cookies have been deleted. Please try again.,10000,'+icon+')')
newSession()
saveState()
if post:
r = session.post(url, data=post, verify=verify_ssl).text
else:
r = session.get(url, verify=verify_ssl).text
return r.encode('utf-8')
def saveState():
tempfile = sessionFile+".tmp"
if xbmcvfs.exists(tempfile):
xbmcvfs.delete(tempfile)
ser = pickle.dumps(session)
fh = xbmcvfs.File(tempfile, 'wb')
fh.write(ser)
fh.close()
if xbmcvfs.exists(sessionFile):
xbmcvfs.delete(sessionFile)
xbmcvfs.rename(tempfile, sessionFile)
# Load cached data
if not os.path.isdir(addonUserDataFolder):
os.mkdir(addonUserDataFolder)
if not os.path.isdir(cacheFolder):
os.mkdir(cacheFolder)
if not os.path.isdir(cacheFolderCoversTMDB):
os.mkdir(cacheFolderCoversTMDB)
if not os.path.isdir(cacheFolderFanartTMDB):
os.mkdir(cacheFolderFanartTMDB)
if not os.path.isdir(libraryFolder):
xbmcvfs.mkdir(libraryFolder)
if not os.path.isdir(libraryFolderMovies):
xbmcvfs.mkdir(libraryFolderMovies)
if not os.path.isdir(libraryFolderTV):
xbmcvfs.mkdir(libraryFolderTV)
if os.path.exists(sessionFile):
fh = xbmcvfs.File(sessionFile, 'rb')
content = fh.read()
fh.close()
session = pickle.loads(content)
if not addon.getSetting("html5MessageShown"):
dialog = xbmcgui.Dialog()
ok = dialog.ok('IMPORTANT!', 'NetfliXBMC >=1.3.0 only supports the new Netflix HTML5 User Interface! The only browsers working with HTML5 DRM playback for now are Chrome>=37 (Win/OSX/Linux) and IExplorer>=11 (Win8.1 only). Make sure you have the latest version installed and check your Netflix settings. Using Silverlight may still partially work, but its not supported anymore. The HTML5 Player is also much faster, supports 1080p and gives you a smoother playback (especially on Linux). See forum.xbmc.org for more info...')
addon.setSetting("html5MessageShown", "true")
def index():
if login():
addDir(translation(30011), "", 'main', "", "movie")
addDir(translation(30012), "", 'main', "", "tv")
addDir(translation(30143), "", 'wiHome', "", "both")
if not singleProfile:
profileName = addon.getSetting("profileName")
addDir(translation(30113) + ' - [COLOR blue]' + profileName + '[/COLOR]', "", 'profileDisplayUpdate', 'DefaultAddonService.png', type, contextEnable=False)
xbmcplugin.endOfDirectory(pluginhandle)
def profileDisplayUpdate():
menuPath = xbmc.getInfoLabel('Container.FolderPath')
if not showProfiles:
addon.setSetting("profile", None)
saveState()
xbmc.executebuiltin('Container.Update('+menuPath+')')
def main(type):
addDir(translation(30002), urlMain+"/MyList?leid=595&link=seeall", 'listVideos', "", type)
addDir(translation(30010), "", 'listViewingActivity', "", type)
addDir(translation(30003), urlMain+"/WiRecentAdditionsGallery?nRR=releaseDate&nRT=all&pn=1&np=1&actionMethod=json", 'listVideos', "", type)
if type=="tv":
addDir(translation(30005), urlMain+"/WiGenre?agid=83", 'listVideos', "", type)
addDir(translation(30007), "", 'listTvGenres', "", type)
else:
addDir(translation(30007), "WiGenre", 'listGenres', "", type)
addDir(translation(30008), "", 'search', "", type)
xbmcplugin.endOfDirectory(pluginhandle)
def wiHome(type):
content = load(urlMain+"/WiHome")
match1 = re.compile('<div class="mrow(.+?)"><div class="hd clearfix"><h3> (.+?)</h3></div><div class="bd clearfix"><div class="slider triangleBtns " id="(.+?)"', re.DOTALL).findall(content)
match2 = re.compile('class="hd clearfix"><h3><a href="(.+?)">(.+?)<', re.DOTALL).findall(content)
for temp, title, sliderID in match1:
if not "hide-completely" in temp:
title = re.sub('<.(.+?)</.>', '', title)
addDir(title.strip(), sliderID, 'listSliderVideos', "", type)
for url, title in match2:
if "WiAltGenre" in url or "WiSimilarsByViewType" in url or "WiRecentAdditionsGallery" in url:
addDir(title.strip(), url, 'listVideos', "", type)
xbmcplugin.endOfDirectory(pluginhandle)
def listVideos(url, type):
pDialog = xbmcgui.DialogProgress()
pDialog.create('NetfliXBMC', translation(30142)+"...")
pDialog.update( 0, translation(30142)+"...")
xbmcplugin.setContent(pluginhandle, "movies")
content = load(url)
#content = load(url) # Terrible... currently first call doesn't have the content, it requires two calls....
if not 'id="page-LOGIN"' in content:
if singleProfile and 'id="page-ProfilesGate"' in content:
forceChooseProfile()
else:
if '<div id="queue"' in content:
content = content[content.find('<div id="queue"'):]
content = content.replace("\\t","").replace("\\n", "").replace("\\", "")
match = None
if not match: match = re.compile('<span id="dbs(.+?)_.+?alt=".+?"', re.DOTALL).findall(content)
if not match: match = re.compile('<span class="title.*?"><a id="b(.+?)_', re.DOTALL).findall(content)
#if not match: match = re.compile('<a href="http://dvd.netflix.com/WiPlayer\?movieid=(.+?)&', re.DOTALL).findall(content)
#if not match: match = re.compile('<a class="playHover" href=".+?WiPlayer\?movieid=(.+?)&', re.DOTALL).findall(content)
if not match: match = re.compile('"boxart":".+?","titleId":(.+?),', re.DOTALL).findall(content)
if not match: match = re.compile('WiPlayer\?movieid=([0-9]+?)&', re.DOTALL).findall(content)
i = 1
for videoID in match:
pDialog.update(i*100/len(match), translation(30142)+"...")
listVideo(videoID, "", "", False, False, type)
i+=1
match1 = re.compile('&pn=(.+?)&', re.DOTALL).findall(url)
match2 = re.compile('&from=(.+?)&', re.DOTALL).findall(url)
matchApiRoot = re.compile('"API_ROOT":"(.+?)"', re.DOTALL).findall(content)
matchApiBase = re.compile('"API_BASE_URL":"(.+?)"', re.DOTALL).findall(content)
matchIdentifier = re.compile('"BUILD_IDENTIFIER":"(.+?)"', re.DOTALL).findall(content)
if "agid=" in url and matchApiRoot and matchApiBase and matchIdentifier:
genreID = url[url.find("agid=")+5:]
addDir(translation(30001), matchApiRoot[0]+matchApiBase[0]+"/"+matchIdentifier[0]+"/wigenre?genreId="+genreID+"&full=false&from=51&to=100&_retry=0", 'listVideos', "", type)
elif match1:
currentPage = match1[0]
nextPage = str(int(currentPage)+1)
addDir(translation(30001), url.replace("&pn="+currentPage+"&", "&pn="+nextPage+"&"), 'listVideos', "", type)
elif match2:
currentFrom = match2[0]
nextFrom = str(int(currentFrom)+50)
currentTo = str(int(currentFrom)+49)
nextTo = str(int(currentFrom)+99)
addDir(translation(30001), url.replace("&from="+currentFrom+"&", "&from="+nextFrom+"&").replace("&to="+currentTo+"&", "&to="+nextTo+"&"), 'listVideos', "", type)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdVideos+')')
xbmcplugin.endOfDirectory(pluginhandle)
else:
deleteCookies()
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30127))+',15000,'+icon+')')
def listSliderVideos(sliderID, type):
pDialog = xbmcgui.DialogProgress()
pDialog.create('NetfliXBMC', translation(30142)+"...")
pDialog.update( 0, translation(30142)+"...")
xbmcplugin.setContent(pluginhandle, "movies")
content = load(urlMain+"/WiHome")
if not 'id="page-LOGIN"' in content:
if singleProfile and 'id="page-ProfilesGate"' in content:
forceChooseProfile()
else:
content = content.replace("\\t","").replace("\\n", "").replace("\\", "")
contentMain = content
content = content[content.find('id="'+sliderID+'"'):]
content = content[:content.find('class="ft"')]
match = re.compile('<span id="dbs(.+?)_', re.DOTALL).findall(content)
i = 1
for videoID in match:
listVideo(videoID, "", "", False, False, type)
i+=1
spl = contentMain.split('"remainderHTML":')
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('"rowId":')]
if '"domId":"'+sliderID+'"' in entry:
match = re.compile('<span id="dbs(.+?)_', re.DOTALL).findall(entry)
i = 1
for videoID in match:
pDialog.update(i*100/(len(match)+10), translation(30142)+"...")
listVideo(videoID, "", "", False, False, type)
i+=1
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdVideos+')')
xbmcplugin.endOfDirectory(pluginhandle)
else:
deleteCookies()
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30127))+',15000,'+icon+')')
def listSearchVideos(url, type):
pDialog = xbmcgui.DialogProgress()
pDialog.create('NetfliXBMC', translation(30142)+"...")
pDialog.update( 0, translation(30142)+"...")
xbmcplugin.setContent(pluginhandle, "movies")
content = load(url)
content = json.loads(content)
i = 1
if "galleryVideos" in content:
for item in content["galleryVideos"]["items"]:
pDialog.update(i*100/len(content["galleryVideos"]["items"]), translation(30142)+"...")
listVideo(str(item["id"]), "", "", False, False, type)
i+=1
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdVideos+')')
xbmcplugin.endOfDirectory(pluginhandle)
else:
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30146))+',5000,'+icon+')')
def clean_filename(n, chars=None):
if isinstance(n, str):
return (''.join(c for c in unicode(n, "utf-8") if c not in '/\\:?"*|<>')).strip(chars)
elif isinstance(n, unicode):
return (''.join(c for c in n if c not in '/\\:?"*|<>')).strip(chars)
def listVideo(videoID, title, thumbUrl, tvshowIsEpisode, hideMovies, type):
videoDetails = getVideoInfo(videoID)
match = re.compile('<span class="title.*?>(.+?)<', re.DOTALL).findall(videoDetails)
if not title:
title = match[0].strip()
year = ""
match = re.compile('<span class="year.*?>(.+?)<', re.DOTALL).findall(videoDetails)
if match:
year = match[0]
if not thumbUrl:
match = re.compile('src="(.+?)"', re.DOTALL).findall(videoDetails)
thumbUrl = match[0].replace("/webp/","/images/").replace(".webp",".jpg")
match = re.compile('<span class="mpaaRating.*?>(.+?)<', re.DOTALL).findall(videoDetails)
mpaa = ""
if match:
mpaa = match[0].strip()
match = re.compile('<span class="duration.*?>(.+?)<', re.DOTALL).findall(videoDetails)
duration = ""
if match:
duration = match[0].lower()
if duration.split(' ')[-1] in ["minutes", "minutos", "minuter", "minutter", "minuuttia", "minuten"]:
videoType = "movie"
videoTypeTemp = videoType
duration = duration.split(" ")[0]
else:
videoTypeTemp = "tv"
if tvshowIsEpisode:
videoType = "episode"
year = ""
else:
videoType = "tvshow"
duration = ""
if useTMDb:
yearTemp = year
titleTemp = title
if " - " in titleTemp:
titleTemp = titleTemp[titleTemp.find(" - ")+3:]
if "-" in yearTemp:
yearTemp = yearTemp.split("-")[0]
filename = clean_filename(videoID)+".jpg"
filenameNone = clean_filename(videoID)+".none"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
coverFileNone = os.path.join(cacheFolderCoversTMDB, filenameNone)
if not os.path.exists(coverFile) and not os.path.exists(coverFileNone):
debug("Downloading Cover art. videoType:"+videoTypeTemp+", videoID:" + videoID + ", title:"+titleTemp+", year:"+yearTemp)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(videoTypeTemp)+', '+urllib.quote_plus(videoID)+', '+urllib.quote_plus(titleTemp)+', '+urllib.quote_plus(yearTemp)+')')
match = re.compile('src=".+?">.*?<.*?>(.+?)<', re.DOTALL).findall(videoDetails)
desc = ""
if match:
descTemp = match[0].decode("utf-8", 'ignore')
#replace all embedded unicode in unicode (Norwegian problem)
descTemp = descTemp.replace('u2013', u'\u2013').replace('u2026', u'\u2026')
desc = htmlParser.unescape(descTemp)
match = re.compile('Director:</dt><dd>(.+?)<', re.DOTALL).findall(videoDetails)
director = ""
if match:
director = match[0].strip()
match = re.compile('<span class="genre.*?>(.+?)<', re.DOTALL).findall(videoDetails)
genre = ""
if match:
genre = match[0]
match = re.compile('<span class="rating">(.+?)<', re.DOTALL).findall(videoDetails)
rating = ""
if match:
rating = match[0]
title = htmlParser.unescape(title.decode("utf-8"))
nextMode = "playVideoMain"
if browseTvShows and videoType == "tvshow":
nextMode = "listSeasons"
added = False
if "/MyList" in url and videoTypeTemp==type:
addVideoDirR(title, videoID, nextMode, thumbUrl, videoType, desc, duration, year, mpaa, director, genre, rating)
added = True
elif videoType == "movie" and hideMovies:
pass
elif videoTypeTemp==type or type=="both":
addVideoDir(title, videoID, nextMode, thumbUrl, videoType, desc, duration, year, mpaa, director, genre, rating)
added = True
return added
def listGenres(type, videoType):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
if isKidsProfile:
type = 'KidsAltGenre'
content = load(urlMain+"/WiHome")
match = re.compile('/'+type+'\\?agid=(.+?)">(.+?)<', re.DOTALL).findall(content)
# A number of categories (especially in the Kids genres) have duplicate entires and a lot of whitespice; create a stripped unique set
unique_match = set((k[0].strip(), k[1].strip()) for k in match)
for genreID, title in unique_match:
if not genreID=="83":
if isKidsProfile:
addDir(title, urlMain+"/"+type+"?agid="+genreID+"&pn=1&np=1&actionMethod=json", 'listVideos', "", videoType)
else:
addDir(title, urlMain+"/"+type+"?agid="+genreID, 'listVideos', "", videoType)
xbmcplugin.endOfDirectory(pluginhandle)
def listTvGenres(videoType):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
content = load(urlMain+"/WiGenre?agid=83")
content = content[content.find('id="subGenres_menu"'):]
content = content[:content.find('</div>')]
match = re.compile('<li ><a href=".+?/WiGenre\\?agid=(.+?)&.+?"><span>(.+?)<', re.DOTALL).findall(content)
for genreID, title in match:
addDir(title, urlMain+"/WiGenre?agid="+genreID, 'listVideos', "", videoType)
xbmcplugin.endOfDirectory(pluginhandle)
def listSeasons(seriesName, seriesID, thumb):
content = getSeriesInfo(seriesID)
content = json.loads(content)
seasons = []
for item in content["episodes"]:
if item[0]["season"] not in seasons:
seasons.append(item[0]["season"])
for season in seasons:
addSeasonDir("Season "+str(season), str(season), 'listEpisodes', thumb, seriesName, seriesID)
xbmcplugin.endOfDirectory(pluginhandle)
def listEpisodes(seriesID, season):
xbmcplugin.setContent(pluginhandle, "episodes")
content = getSeriesInfo(seriesID)
content = json.loads(content)
for test in content["episodes"]:
for item in test:
episodeSeason = str(item["season"])
if episodeSeason == season:
episodeID = str(item["episodeId"])
episodeNr = str(item["episode"])
episodeTitle = (episodeNr + ". " + item["title"]).encode('utf-8')
duration = item["runtime"]
bookmarkPosition = item["bookmarkPosition"]
playcount=0
if (duration>0 and float(bookmarkPosition)/float(duration))>=0.9:
playcount=1
desc = item["synopsis"].encode('utf-8')
try:
thumb = item["stills"][0]["url"]
except:
thumb = ""
addEpisodeDir(episodeTitle, episodeID, 'playVideoMain', thumb, desc, str(duration), season, episodeNr, seriesID, playcount)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdEpisodes+')')
xbmcplugin.endOfDirectory(pluginhandle)
def listViewingActivity(type):
pDialog = xbmcgui.DialogProgress()
pDialog.create('NetfliXBMC', translation(30142)+"...")
pDialog.update( 0, translation(30142)+"...")
xbmcplugin.setContent(pluginhandle, "movies")
content = load(urlMain+"/WiViewingActivity")
count = 0
videoIDs = []
spl = re.compile('(<li .*?data-series=.*?</li>)', re.DOTALL).findall(content)
#spl = content.split('')
for i in range(1, len(spl), 1):
entry = spl[i]
pDialog.update((count+1)*100/len(spl), translation(30142)+"...")
matchId = re.compile('data-movieid="(.*?)"', re.DOTALL).findall(entry)
if matchId:
videoID = matchId[0]
match = re.compile('class="col date nowrap">(.+?)<', re.DOTALL).findall(entry)
date = match[0]
matchTitle1 = re.compile('class="seriestitle">(.+?)</a>', re.DOTALL).findall(entry)
matchTitle2 = re.compile('class="col title">.+?>(.+?)<', re.DOTALL).findall(entry)
if matchTitle1:
title = htmlParser.unescape(matchTitle1[0].decode("utf-8")).replace("</span>", "").encode("utf-8")
elif matchTitle2:
title = matchTitle2[0]
else:
title = ""
title = date+" - "+title
if videoID not in videoIDs:
videoIDs.append(videoID)
# due to limitations in the netflix api, there is no way to get the seriesId of an
# episode, so the 4 param is set to True to treat tv episodes the same as movies.
added = listVideo(videoID, title, "", True, False, type)
if added:
count += 1
if count == 40:
break
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdActivity+')')
xbmcplugin.endOfDirectory(pluginhandle)
def getVideoInfo(videoID):
cacheFile = os.path.join(cacheFolder, videoID+".cache")
content = ""
if os.path.exists(cacheFile):
fh = xbmcvfs.File(cacheFile, 'r')
content = fh.read()
fh.close()
if not content:
content = load(urlMain+"/JSON/BOB?movieid="+videoID)
fh = xbmcvfs.File(cacheFile, 'w')
fh.write(content)
fh.close()
return content.replace("\\t","").replace("\\n", "").replace("\\", "")
def getSeriesInfo(seriesID):
cacheFile = os.path.join(cacheFolder, seriesID+"_episodes.cache")
content = ""
if os.path.exists(cacheFile) and (time.time()-os.path.getmtime(cacheFile) < 60*5):
fh = xbmcvfs.File(cacheFile, 'r')
content = fh.read()
fh.close()
if not content:
url = "http://api-global.netflix.com/desktop/odp/episodes?languages="+language+"&forceEpisodes=true&routing=redirect&video="+seriesID+"&country="+country
content = load(url)
fh = xbmcvfs.File(cacheFile, 'w')
fh.write(content)
fh.close()
# if netflix throws exception they may still return content after the exception
index = content.find('{"title":')
if index != -1:
content = content[index:]
return content
def addMyListToLibrary():
if not singleProfile:
token = ""
if addon.getSetting("profile"):
token = addon.getSetting("profile")
load("https://www.netflix.com/SwitchProfile?tkn="+token)
content = load(urlMain+"/MyList?leid=595&link=seeall")
if not 'id="page-LOGIN"' in content:
if singleProfile and 'id="page-ProfilesGate"' in content:
forceChooseProfile()
else:
if '<div id="queue"' in content:
content = content[content.find('<div id="queue"'):]
content = content.replace("\\t","").replace("\\n", "").replace("\\", "")
match1 = re.compile('<span id="dbs(.+?)_.+?alt=".+?"', re.DOTALL).findall(content)
match2 = re.compile('<span class="title.*?"><a id="b(.+?)_', re.DOTALL).findall(content)
match3 = re.compile('<a href="http://dvd.netflix.com/WiPlayer\?movieid=(.+?)&', re.DOTALL).findall(content)
match4 = re.compile('<a class="playHover" href=".+?WiPlayer\?movieid=(.+?)&', re.DOTALL).findall(content)
match5 = re.compile('"boxart":".+?","titleId":(.+?),', re.DOTALL).findall(content)
if match1:
match = match1
elif match2:
match = match2
elif match3:
match = match3
elif match4:
match = match4
elif match5:
match = match5
for videoID in match:
videoDetails = getVideoInfo(videoID)
match = re.compile('<span class="title ".*?>(.+?)<\/span>', re.DOTALL).findall(videoDetails)
title = match[0].strip()
title = htmlParser.unescape(title.decode("utf-8"))
match = re.compile('<span class="year".*?>(.+?)<\/span>', re.DOTALL).findall(videoDetails)
year = ""
if match:
year = match[0]
match = re.compile('<span class="duration.*?".*?>(.+?)<\/span>', re.DOTALL).findall(videoDetails)
duration = ""
if match:
duration = match[0].lower()
if "minutes" in duration:
try:
if year:
title = title+" ("+year+")"
addMovieToLibrary(videoID, title, False)
except:
pass
else:
try:
addSeriesToLibrary(videoID, title, "", False)
except:
pass
if updateDB:
xbmc.executebuiltin('UpdateLibrary(video)')
def playVideo(id):
listitem = xbmcgui.ListItem(path=fakeVidPath)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
playVideoMain(id)
xbmc.PlayList(xbmc.PLAYLIST_VIDEO).clear()
def playVideoMain(id):
xbmc.Player().stop()
if singleProfile:
url = urlMain+"/WiPlayer?movieid="+id
else:
token = ""
if addon.getSetting("profile"):
token = addon.getSetting("profile")
url = "https://www.netflix.com/SwitchProfile?tkn="+token+"&nextpage="+urllib.quote_plus(urlMain+"/WiPlayer?movieid="+id)
if osOSX:
launchChrome(url)
#xbmc.executebuiltin("RunPlugin(plugin://plugin.program.chrome.launcher/?url="+urllib.quote_plus(url)+"&mode=showSite&kiosk="+kiosk+")")
try:
xbmc.sleep(5000)
subprocess.Popen('cliclick c:500,500', shell=True)
subprocess.Popen('cliclick kp:arrow-up', shell=True)
xbmc.sleep(5000)
subprocess.Popen('cliclick c:500,500', shell=True)
subprocess.Popen('cliclick kp:arrow-up', shell=True)
xbmc.sleep(5000)
subprocess.Popen('cliclick c:500,500', shell=True)
subprocess.Popen('cliclick kp:arrow-up', shell=True)
except:
pass
elif osLinux:
if linuxUseShellScript:
xbmc.executebuiltin('LIRC.Stop')
call = '"'+browserScript+'" "'+url+'"';
debug("Browser Call: " + call)
subprocess.call(call, shell=True)
xbmc.executebuiltin('LIRC.Start')
else:
launchChrome(url)
#xbmc.executebuiltin("RunPlugin(plugin://plugin.program.chrome.launcher/?url="+urllib.quote_plus(url)+"&mode=showSite&kiosk="+kiosk+")")
try:
xbmc.sleep(5000)
subprocess.Popen('xdotool mousemove 9999 9999', shell=True)
xbmc.sleep(5000)
subprocess.Popen('xdotool mousemove 9999 9999', shell=True)
xbmc.sleep(5000)
subprocess.Popen('xdotool mousemove 9999 9999', shell=True)
except:
pass
elif osWin:
if winBrowser == 1:
path = 'C:\\Program Files\\Internet Explorer\\iexplore.exe'
path64 = 'C:\\Program Files (x86)\\Internet Explorer\\iexplore.exe'
if os.path.exists(path):
subprocess.Popen('"'+path+'" -k "'+url+'"', shell=False)
elif os.path.exists(path64):
subprocess.Popen('"'+path64+'" -k "'+url+'"', shell=False)
else:
launchChrome(url)
#xbmc.executebuiltin("RunPlugin(plugin://plugin.program.chrome.launcher/?url="+urllib.quote_plus(url)+"&mode=showSite&kiosk="+kiosk+")")
if useUtility:
subprocess.Popen('"'+utilityPath+'"', shell=False)
myWindow = window('window.xml', addon.getAddonInfo('path'), 'default',)
myWindow.doModal()
myWindow.stopWakeupThread() # insurance, in case self.close() wasn't the method by which the window was closed
def launchChrome(url):
kiosk = "yes"
if dontUseKiosk:
kiosk = "no"
profileFolder = ""
if useChromeProfile:
if not os.path.exists(chromeUserDataFolder):
import zipfile
zip = os.path.join(addonDir, "resources", "chrome-user-data.zip")
with open(zip, "rb") as zf:
z = zipfile.ZipFile(zf)
z.extractall(addonUserDataFolder)
profileFolder = "&profileFolder="+urllib.quote_plus(chromeUserDataFolder)
# Inject cookies
chrome_cookies.inject_cookies_into_chrome(session, os.path.join(chromeUserDataFolder, "Default", "Cookies"))
xbmc.executebuiltin("RunPlugin(plugin://plugin.program.chrome.launcher/?url="+urllib.quote_plus(url)+"&mode=showSite&kiosk="+kiosk+profileFolder+")")
def configureUtility():
if osWin:
subprocess.Popen('"'+utilityPath+'"'+' config=yes', shell=False)
def chromePluginOptions():
url = "chrome-extension://najegmllpphoobggcngjhcpknknljhkj/html/options.html"
launchChrome(url)
def deleteCookies():
if os.path.exists(cookieFile):
os.remove(cookieFile)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,Cookies have been deleted!,5000,'+icon+')')
if os.path.exists(sessionFile):
os.remove(sessionFile)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,Session cookies have been deleted!,5000,'+icon+')')
def deleteCache():
if os.path.exists(cacheFolder):
try:
shutil.rmtree(cacheFolder)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,Cache has been deleted!,5000,'+icon+')')
except:
pass
def deleteChromeUserDataFolder():
if os.path.exists(chromeUserDataFolder):
try:
shutil.rmtree(chromeUserDataFolder)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,Chrome UserData has been deleted!,5000,'+icon+')')
except:
pass
def resetAddon():
dialog = xbmcgui.Dialog()
if dialog.yesno("NetfliXBMC:", "Really reset the addon?"):
if os.path.exists(addonUserDataFolder):
try:
shutil.rmtree(addonUserDataFolder)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,Addon has been reset!,5000,'+icon+')')
except:
pass
def search(type):
keyboard = xbmc.Keyboard('', translation(30008))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
search_string = keyboard.getText().replace(" ", "+")
listSearchVideos("http://api-global.netflix.com/desktop/search/instantsearch?esn=www&term="+search_string+"&locale="+language+"&country="+country+"&authURL="+auth+"&_retry=0&routing=redirect", type)
def addToQueue(id):
if authMyList:
encodedAuth = urllib.urlencode({'authURL': authMyList})
load(urlMain+"/AddToQueue?movieid="+id+"&qtype=INSTANT&"+encodedAuth)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30144))+',3000,'+icon+')')
else:
debug("Attempted to addToQueue without valid authMyList")
def removeFromQueue(id):
if authMyList:
encodedAuth = urllib.urlencode({'authURL': authMyList})
load(urlMain+"/QueueDelete?"+encodedAuth+"&qtype=ED&movieid="+id)
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30145))+',3000,'+icon+')')
xbmc.executebuiltin("Container.Refresh")
else:
debug("Attempted to removeFromQueue without valid authMyList")
def displayLoginProgress(progressWindow, value, message):
progressWindow.update( value, "", message, "" )
if progressWindow.iscanceled():
return False
else:
return True
def login():
#setup login progress display
loginProgress = xbmcgui.DialogProgress()
loginProgress.create('NETFLIXBMC', str(translation(30216)) + '...')
displayLoginProgress(loginProgress, 25, str(translation(30217)))
session.cookies.clear()
content = load(urlMain+"/Login")
match = re.compile('"LOCALE":"(.+?)"', re.DOTALL|re.IGNORECASE).findall(content)
if match and not addon.getSetting("language"):
addon.setSetting("language", match[0])
if not "Sorry, Netflix is not available in your country yet." in content and not "Sorry, Netflix hasn't come to this part of the world yet" in content:
match = re.compile('id="signout".+?authURL=(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting("auth", match[0])
if 'id="page-LOGIN"' in content:
match = re.compile('name="authURL" value="(.+?)"', re.DOTALL).findall(content)
authUrl = match[0]
addon.setSetting("auth", authUrl)
#postdata = "authURL="+urllib.quote_plus(authUrl)+"&email="+urllib.quote_plus(username)+"&password="+urllib.quote_plus(password)+"&RememberMe=on"
postdata ={ "authURL":authUrl,
"email":username,
"password":password,
"RememberMe":"on"
}
#content = load("https://signup.netflix.com/Login", "authURL="+urllib.quote_plus(authUrl)+"&email="+urllib.quote_plus(username)+"&password="+urllib.quote_plus(password)+"&RememberMe=on")
displayLoginProgress(loginProgress, 50, str(translation(30218)))
content = load("https://signup.netflix.com/Login", postdata)
if 'id="page-LOGIN"' in content:
# Login Failed
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30127))+',15000,'+icon+')')
return False
match = re.compile('"LOCALE":"(.+?)"', re.DOTALL|re.IGNORECASE).findall(content)
if match and not addon.getSetting("language"):
addon.setSetting("language", match[0])
match = re.compile('"COUNTRY":"(.+?)"', re.DOTALL|re.IGNORECASE).findall(content)
if match:
# always overwrite the country code, to cater for switching regions
debug("Setting Country: " + match[0])
addon.setSetting("country", match[0])
saveState()
displayLoginProgress(loginProgress, 75, str(translation(30219)))
if not addon.getSetting("profile") and not singleProfile:
chooseProfile()
elif not singleProfile and showProfiles:
chooseProfile()
elif not singleProfile and not showProfiles:
loadProfile()
else:
getMyListChangeAuthorisation()
if loginProgress:
if not displayLoginProgress(loginProgress, 100, str(translation(30220))):
return False
xbmc.sleep(500)
loginProgress.close()
return True
else:
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30126))+',10000,'+icon+')')
if loginProgress:
loginProgress.close()
return False
def debug(message):
if debug:
print message
def loadProfile():
savedProfile = addon.getSetting("profile")
if savedProfile:
load("https://api-global.netflix.com/desktop/account/profiles/switch?switchProfileGuid="+savedProfile)
saveState()
else:
debug("LoadProfile: No stored profile found")
getMyListChangeAuthorisation()
def chooseProfile():
content = load("https://www.netflix.com/ProfilesGate?nextpage=http%3A%2F%2Fwww.netflix.com%2FDefault")
matchType = 0
match = re.compile('"profileName":"(.+?)".+?token":"(.+?)"', re.DOTALL).findall(content)
if len(match):
matchType = 1
if not len(match):
match = re.compile('"firstName":"(.+?)".+?guid":"(.+?)".+?experience":"(.+?)"', re.DOTALL).findall(content)
if len(match):
matchType = 1
if not len(match):
match = re.compile('"experience":"(.+?)".+?guid":"(.+?)".+?profileName":"(.+?)"', re.DOTALL).findall(content)
if len(match):
matchType = 2
profiles = []
# remove any duplicated profile data found during page scrape
match = [item for count, item in enumerate(match) if item not in match[:count]]
if matchType == 1:
for p, t, e in match:
profile = {'name': unescape(p), 'token': t, 'isKids': e=='jfk'}
profiles.append(profile)
elif matchType == 2:
for e, t, p in match:
profile = {'name': unescape(p), 'token': t, 'isKids': e=='jfk'}
profiles.append(profile)
if matchType > 0:
dialog = xbmcgui.Dialog()
nr = dialog.select(translation(30113), [profile['name'] for profile in profiles])
if nr >= 0:
selectedProfile = profiles[nr]
else:
selectedProfile = profiles[0]
load("https://api-global.netflix.com/desktop/account/profiles/switch?switchProfileGuid="+selectedProfile['token'])
addon.setSetting("profile", selectedProfile['token'])
addon.setSetting("isKidsProfile", 'true' if selectedProfile['isKids'] else 'false')
addon.setSetting("profileName", selectedProfile['name'])
saveState()
getMyListChangeAuthorisation()
else:
debug("Netflixbmc::chooseProfile: No profiles were found")
def getMyListChangeAuthorisation():
content = load(urlMain+"/WiHome")
match = re.compile('"xsrf":"(.+?)"', re.DOTALL).findall(content)
if match:
authMyList = match[0]
addon.setSetting("authMyList", match[0])
def forceChooseProfile():
addon.setSetting("singleProfile", "false")
xbmc.executebuiltin('XBMC.Notification(NetfliXBMC:,'+str(translation(30111))+',5000,'+icon+')')
chooseProfile()
def addMovieToLibrary(movieID, title, singleUpdate=True):
movieFolderName = clean_filename(title+".strm", ' .').strip(' .')
dirAndFilename = os.path.join(libraryFolderMovies, movieFolderName)
fh = xbmcvfs.File(dirAndFilename, 'w')
fh.write("plugin://plugin.video.netflixbmc/?mode=playVideo&url="+movieID)
fh.close()
if updateDB and singleUpdate:
xbmc.executebuiltin('UpdateLibrary(video)')
def addSeriesToLibrary(seriesID, seriesTitle, season, singleUpdate=True):
seriesFolderName = clean_filename(seriesTitle, ' .')
seriesDir = os.path.join(libraryFolderTV, seriesFolderName)
if not os.path.isdir(seriesDir):
xbmcvfs.mkdir(seriesDir)
content = getSeriesInfo(seriesID)
content = json.loads(content)
for test in content["episodes"]:
for item in test:
episodeSeason = str(item["season"])
seasonCheck = True
if season:
seasonCheck = episodeSeason == season
if seasonCheck:
seasonDir = os.path.join(seriesDir, "Season "+episodeSeason)
if not os.path.isdir(seasonDir):
xbmcvfs.mkdir(seasonDir)
episodeID = str(item["episodeId"])
episodeNr = str(item["episode"])
episodeTitle = item["title"].encode('utf-8')
if len(episodeNr) == 1:
episodeNr = "0"+episodeNr
seasonNr = episodeSeason
if len(seasonNr) == 1:
seasonNr = "0"+seasonNr
filename = "S"+seasonNr+"E"+episodeNr+" - "+episodeTitle+".strm"
filename = clean_filename(filename, ' .')
fh = xbmcvfs.File(os.path.join(seasonDir, filename), 'w')
fh.write("plugin://plugin.video.netflixbmc/?mode=playVideo&url="+episodeID)
fh.close()
if updateDB and singleUpdate:
xbmc.executebuiltin('UpdateLibrary(video)')
def playTrailer(title):
try:
content = load("http://gdata.youtube.com/feeds/api/videos?vq="+title.strip().replace(" ", "+")+"+trailer&racy=include&orderby=relevance")
match = re.compile('<id>http://gdata.youtube.com/feeds/api/videos/(.+?)</id>', re.DOTALL).findall(content.split('<entry>')[2])
xbmc.Player().play("plugin://plugin.video.youtube/play/?video_id=" + match[0])
except:
pass
def translation(id):
return addon.getLocalizedString(id).encode('utf-8')
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addDir(name, url, mode, iconimage, type="", contextEnable=True):
name = htmlParser.unescape(name)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&type="+str(type)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name})
entries = []
if "/MyList" in url:
entries.append((translation(30122), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addMyListToLibrary)',))
liz.setProperty("fanart_image", defaultFanart)
if contextEnable:
liz.addContextMenuItems(entries)
else:
emptyEntries = []
liz.addContextMenuItems(emptyEntries, replaceItems=True)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addVideoDir(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
if duration:
duration = str(int(duration) * 60)
name = name.encode("utf-8")
filename = clean_filename(url)+".jpg"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
if os.path.exists(coverFile):
iconimage = coverFile
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": float(rating)})
if os.path.exists(fanartFile):
liz.setProperty("fanart_image", fanartFile)
elif os.path.exists(coverFile):
liz.setProperty("fanart_image", coverFile)
entries = []
if videoType == "tvshow":
if browseTvShows:
entries.append((translation(30121), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=playVideoMain&url='+urllib.quote_plus(url)+'&thumb='+urllib.quote_plus(iconimage)+')',))
else:
entries.append((translation(30118), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=listSeasons&url='+urllib.quote_plus(url)+'&thumb='+urllib.quote_plus(iconimage)+')',))
if videoType != "episode":
entries.append((translation(30134), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=playTrailer&url='+urllib.quote_plus(name)+')',))
entries.append((translation(30114), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addToQueue&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30140), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=listVideos&url='+urllib.quote_plus(urlMain+"/WiMovie/"+url)+'&type=movie)',))
entries.append((translation(30141), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=listVideos&url='+urllib.quote_plus(urlMain+"/WiMovie/"+url)+'&type=tv)',))
if videoType == "tvshow":
entries.append((translation(30122), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addSeriesToLibrary&url=&name='+urllib.quote_plus(name.strip())+'&seriesID='+urllib.quote_plus(url)+')',))
elif videoType == "movie":
entries.append((translation(30122), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addMovieToLibrary&url='+urllib.quote_plus(url)+'&name='+urllib.quote_plus(name.strip()+' ('+year+')')+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addVideoDirR(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
if duration:
duration = str(int(duration) * 60)
name = name.encode("utf-8")
filename = clean_filename(url)+".jpg"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
if os.path.exists(coverFile):
iconimage = coverFile
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": float(rating)})
if os.path.exists(fanartFile):
liz.setProperty("fanart_image", fanartFile)
elif os.path.exists(coverFile):
liz.setProperty("fanart_image", coverFile)
entries = []
if videoType == "tvshow":
if browseTvShows:
entries.append((translation(30121), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=playVideoMain&url='+urllib.quote_plus(url)+'&thumb='+urllib.quote_plus(iconimage)+')',))
else:
entries.append((translation(30118), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=listSeasons&url='+urllib.quote_plus(url)+'&thumb='+urllib.quote_plus(iconimage)+')',))
entries.append((translation(30134), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=playTrailer&url='+urllib.quote_plus(name)+')',))
entries.append((translation(30115), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=removeFromQueue&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30140), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=listVideos&url='+urllib.quote_plus(urlMain+"/WiMovie/"+url)+'&type=movie)',))
entries.append((translation(30141), 'Container.Update(plugin://plugin.video.netflixbmc/?mode=listVideos&url='+urllib.quote_plus(urlMain+"/WiMovie/"+url)+'&type=tv)',))
if videoType == "tvshow":
entries.append((translation(30122), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addSeriesToLibrary&url=&name='+str(name.strip())+'&seriesID='+str(url)+')',))
elif videoType == "movie":
entries.append((translation(30122), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addMovieToLibrary&url='+urllib.quote_plus(url)+'&name='+str(name.strip()+' ('+year+')')+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addSeasonDir(name, url, mode, iconimage, seriesName, seriesID):
filename = clean_filename(seriesID)+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&seriesID="+urllib.quote_plus(seriesID)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name})
if os.path.exists(fanartFile):
liz.setProperty("fanart_image", fanartFile)
elif os.path.exists(coverFile):
liz.setProperty("fanart_image", coverFile)
entries = []
entries.append((translation(30122), 'RunPlugin(plugin://plugin.video.netflixbmc/?mode=addSeriesToLibrary&url='+urllib.quote_plus(url)+'&name='+str(seriesName.strip())+'&seriesID='+str(seriesID)+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addEpisodeDir(name, url, mode, iconimage, desc="", duration="", season="", episodeNr="", seriesID="", playcount=""):
if duration:
duration = str(int(duration) * 60)
filename = clean_filename(seriesID)+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "season": season, "episode": episodeNr, "playcount": playcount})
if os.path.exists(fanartFile):
liz.setProperty("fanart_image", fanartFile)
elif os.path.exists(coverFile):
liz.setProperty("fanart_image", coverFile)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
class window(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self, *args, **kwargs)
self._stopEvent = threading.Event()
self._wakeUpThread = threading.Thread(target=self._wakeUpThreadProc)
self._wakeUpThread.start()
def _wakeUpThreadProc(self):
while not self._stopEvent.is_set():
if debug:
print "Netflixbmc: Sending wakeup to main UI to avoid idle/DPMS..."
xbmc.executebuiltin("playercontrol(wakeup)")
# bit of a hack above: wakeup is actually not a valid playercontrol argument,
# but there's no error printed if the argument isn't found and any playercontrol
# causes the DPMS/idle timeout to reset itself
self._stopEvent.wait(60)
if debug:
print "Netflixbmc: wakeup thread finishing."
def stopWakeupThread(self):
if debug:
print "Netflixbmc: stopping wakeup thread"
self._stopEvent.set()
self._wakeUpThread.join()
def close(self):
if debug:
print "Netflixbmc: closing dummy window"
self.stopWakeupThread()
xbmcgui.WindowXMLDialog.close(self)
def onAction(self, action):
ACTION_SELECT_ITEM = 7
ACTION_PARENT_DIR = 9
ACTION_PREVIOUS_MENU = 10
ACTION_PAUSE = 12
ACTION_STOP = 13
ACTION_SHOW_INFO = 11
ACTION_SHOW_GUI = 18
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_UP = 3
ACTION_MOVE_DOWN = 4
ACTION_PLAYER_PLAY = 79
ACTION_VOLUME_UP = 88
ACTION_VOLUME_DOWN = 89
ACTION_MUTE = 91
ACTION_CONTEXT_MENU = 117
ACTION_BUILT_IN_FUNCTION = 122
KEY_BUTTON_BACK = 275
if not remoteControl and action != ACTION_BUILT_IN_FUNCTION:
# if we're not passing remote control actions, any non-autogenerated
# remote action that reaches here is a signal to close this dummy
# window as Chrome is gone
if debug:
print "Netflixbmc: Closing dummy window after action %d" % (action.getId())
self.close()
return
if osWin:
proc = subprocess.Popen('WMIC PROCESS get Caption', shell=True, stdout=subprocess.PIPE)
procAll = ""
for line in proc.stdout:
procAll+=line
if "chrome.exe" in procAll:
if action in [ACTION_SHOW_INFO, ACTION_SHOW_GUI, ACTION_STOP, ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_BUTTON_BACK]:
subprocess.Popen('"'+sendKeysPath+'"'+' sendKey=Close', shell=False)
self.close()
elif action==ACTION_SELECT_ITEM:
subprocess.Popen('"'+sendKeysPath+'"'+' sendKey=PlayPause', shell=False)
elif action==ACTION_MOVE_LEFT:
subprocess.Popen('"'+sendKeysPath+'"'+' sendKey=SeekLeft', shell=False)
elif action==ACTION_MOVE_RIGHT:
subprocess.Popen('"'+sendKeysPath+'"'+' sendKey=SeekRight', shell=False)
elif action==ACTION_MOVE_UP:
subprocess.Popen('"'+sendKeysPath+'"'+' sendKey=VolumeUp', shell=False)
elif action==ACTION_MOVE_DOWN:
subprocess.Popen('"'+sendKeysPath+'"'+' sendKey=VolumeDown', shell=False)
else:
self.close()
elif osLinux:
doClose = False
key=None
if action in [ACTION_SHOW_GUI, ACTION_STOP, ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_BUTTON_BACK]:
key="control+shift+q"
doClose=True
elif action in [ ACTION_SELECT_ITEM, ACTION_PLAYER_PLAY, ACTION_PAUSE ]:
key="space"
elif action==ACTION_MOVE_LEFT:
key="Left"
elif action==ACTION_MOVE_RIGHT:
key="Right"
elif action==ACTION_SHOW_INFO:
key="question"
elif action==ACTION_VOLUME_UP:
key="Up"
elif action==ACTION_VOLUME_DOWN:
key="Down"
elif action==ACTION_MUTE:
key="M"
elif action==ACTION_CONTEXT_MENU:
key="ctrl+alt+shift+d"
elif debug:
print "Netflixbmc: unmapped key action=%d" % (action.getId())
if key is not None:
p = subprocess.Popen('xdotool search --onlyvisible --class "google-chrome|Chromium" key %s' % key, shell=True)
p.wait()
# 0 for success, 127 if xdotool not found in PATH. Return code is 1 if window not found (indicating should close).
if not p.returncode in [0,127] or doClose:
self.close()
if debug:
print "Netflixbmc: remote action=%d key=%s xdotool result=%d" % (action.getId(), key, p.returncode)
elif osOSX:
proc = subprocess.Popen('/bin/ps ax', shell=True, stdout=subprocess.PIPE)
procAll = ""
for line in proc.stdout:
procAll+=line
if "chrome" in procAll:
if action in [ACTION_SHOW_INFO, ACTION_SHOW_GUI, ACTION_STOP, ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_BUTTON_BACK]:
subprocess.Popen('cliclick kd:cmd t:q ku:cmd', shell=True)
self.close()
elif action==ACTION_SELECT_ITEM:
subprocess.Popen('cliclick t:p', shell=True)
elif action==ACTION_MOVE_LEFT:
subprocess.Popen('cliclick kp:arrow-left', shell=True)
elif action==ACTION_MOVE_RIGHT:
subprocess.Popen('cliclick kp:arrow-right', shell=True)
elif action==ACTION_MOVE_UP:
subprocess.Popen('cliclick kp:arrow-up', shell=True)
elif action==ACTION_MOVE_DOWN:
subprocess.Popen('cliclick kp:arrow-down', shell=True)
else:
self.close()
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
thumb = urllib.unquote_plus(params.get('thumb', ''))
name = urllib.unquote_plus(params.get('name', ''))
season = urllib.unquote_plus(params.get('season', ''))
seriesID = urllib.unquote_plus(params.get('seriesID', ''))
type = urllib.unquote_plus(params.get('type', ''))
if mode == 'main':
main(type)
elif mode == 'wiHome':
wiHome(type)
elif mode == 'listVideos':
listVideos(url, type)
elif mode == 'listSliderVideos':
listSliderVideos(url, type)
elif mode == 'listSearchVideos':
listSearchVideos(url, type)
elif mode == 'addToQueue':
addToQueue(url)
elif mode == 'removeFromQueue':
removeFromQueue(url)
elif mode == 'playVideo':
playVideo(url)
elif mode == 'playVideoMain':
playVideoMain(url)
elif mode == 'search':
search(type)
elif mode == 'login':
login()
elif mode == 'chooseProfile':
chooseProfile()
elif mode == 'listGenres':
listGenres(url, type)
elif mode == 'listTvGenres':
listTvGenres(type)
elif mode == 'listViewingActivity':
listViewingActivity(type)
elif mode == 'listSeasons':
listSeasons(name, url, thumb)
elif mode == 'listEpisodes':
listEpisodes(seriesID, url)
elif mode == 'configureUtility':
configureUtility()
elif mode == 'chromePluginOptions':
chromePluginOptions()
elif mode == 'deleteCookies':
deleteCookies()
elif mode == 'deleteCache':
deleteCache()
elif mode == 'deleteChromeUserData':
deleteChromeUserDataFolder()
elif mode == 'resetAddon':
resetAddon()
elif mode == 'playTrailer':
playTrailer(url)
elif mode == 'addMyListToLibrary':
addMyListToLibrary()
elif mode == 'addMovieToLibrary':
addMovieToLibrary(url, name)
elif mode == 'addSeriesToLibrary':
addSeriesToLibrary(seriesID, name, url)
elif mode == 'profileDisplayUpdate':
profileDisplayUpdate()
else:
index()
if trace_on:
pydevd.stoptrace()
|
gpl-2.0
| 8,642,612,235,448,023,000
| 44.307635
| 530
| 0.60971
| false
| 3.75347
| false
| false
| false
|
jhseu/tensorflow
|
tensorflow/python/ops/summary_ops_v2.py
|
1
|
46626
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations to emit summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import getpass
import os
import re
import threading
import time
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import profiler as _profiler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_summary_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# Name for graph collection of summary writer init ops, which is only exposed
# as a legacy API for tf.contrib.summary in TF 1.x.
_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2"
_EXPERIMENT_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,256}$")
_RUN_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,512}$")
_USER_NAME_PATTERNS = re.compile(r"^[a-z]([-a-z0-9]{0,29}[a-z0-9])?$", re.I)
class _SummaryState(threading.local):
def __init__(self):
super(_SummaryState, self).__init__()
self.is_recording = None
# TODO(slebedev): why a separate flag for DS and is it on by default?
self.is_recording_distribution_strategy = True
self.writer = None
self.step = None
_summary_state = _SummaryState()
def _should_record_summaries_internal(default_state):
"""Returns boolean Tensor if summaries should/shouldn't be recorded.
Now the summary condition is decided by logical "and" of below conditions:
First, summary writer must be set. Given this constraint is met,
ctx.summary_recording and ctx.summary_recording_distribution_strategy.
The former one is usually set by user, and the latter one is controlled
by DistributionStrategy (tf.distribute.ReplicaContext).
Args:
default_state: can be True or False. The default summary behavior when
summary writer is set and the user does not specify
ctx.summary_recording and ctx.summary_recording_distribution_strategy
is True.
"""
if _summary_state.writer is None:
return constant_op.constant(False)
resolve = lambda x: x() if callable(x) else x
cond_distributed = resolve(_summary_state.is_recording_distribution_strategy)
cond = resolve(_summary_state.is_recording)
if cond is None:
cond = default_state
return math_ops.logical_and(cond_distributed, cond)
def _should_record_summaries_v2():
"""Returns boolean Tensor which is true if summaries should be recorded.
If no recording status has been set, this defaults to True, unlike the public
should_record_summaries().
"""
return _should_record_summaries_internal(default_state=True)
def should_record_summaries():
"""Returns boolean Tensor which is true if summaries should be recorded."""
return _should_record_summaries_internal(default_state=False)
@tf_export("summary.record_if", v1=[])
@tf_contextlib.contextmanager
def record_if(condition):
"""Sets summary recording on or off per the provided boolean value.
The provided value can be a python boolean, a scalar boolean Tensor, or
or a callable providing such a value; if a callable is passed it will be
invoked on-demand to determine whether summary writing will occur.
Args:
condition: can be True, False, a bool Tensor, or a callable providing such.
Yields:
Returns a context manager that sets this value on enter and restores the
previous value on exit.
"""
old = _summary_state.is_recording
try:
_summary_state.is_recording = condition
yield
finally:
_summary_state.is_recording = old
# TODO(apassos) consider how to handle local step here.
def record_summaries_every_n_global_steps(n, global_step=None):
"""Sets the should_record_summaries Tensor to true if global_step % n == 0."""
if global_step is None:
global_step = training_util.get_or_create_global_step()
with ops.device("cpu:0"):
should = lambda: math_ops.equal(global_step % n, 0)
if not context.executing_eagerly():
should = should()
return record_if(should)
def always_record_summaries():
"""Sets the should_record_summaries Tensor to always true."""
return record_if(True)
def never_record_summaries():
"""Sets the should_record_summaries Tensor to always false."""
return record_if(False)
@tf_export("summary.experimental.get_step", v1=[])
def get_step():
"""Returns the default summary step for the current thread.
Returns:
The step set by `tf.summary.experimental.set_step()` if one has been set,
otherwise None.
"""
return _summary_state.step
@tf_export("summary.experimental.set_step", v1=[])
def set_step(step):
"""Sets the default summary step for the current thread.
For convenience, this function sets a default value for the `step` parameter
used in summary-writing functions elsewhere in the API so that it need not
be explicitly passed in every such invocation. The value can be a constant
or a variable, and can be retrieved via `tf.summary.experimental.get_step()`.
Note: when using this with @tf.functions, the step value will be captured at
the time the function is traced, so changes to the step outside the function
will not be reflected inside the function unless using a `tf.Variable` step.
Args:
step: An `int64`-castable default step value, or None to unset.
"""
_summary_state.step = step
@tf_export("summary.SummaryWriter", v1=[])
@six.add_metaclass(abc.ABCMeta)
class SummaryWriter(object):
"""Interface representing a stateful summary writer object."""
@abc.abstractmethod
def set_as_default(self):
"""Enables this summary writer for the current thread."""
raise NotImplementedError()
@abc.abstractmethod
@tf_contextlib.contextmanager
def as_default(self):
"""Returns a context manager that enables summary writing."""
raise NotImplementedError()
def init(self):
"""Initializes the summary writer."""
raise NotImplementedError()
def flush(self):
"""Flushes any buffered data."""
raise NotImplementedError()
def close(self):
"""Flushes and closes the summary writer."""
raise NotImplementedError()
class ResourceSummaryWriter(SummaryWriter):
"""Implementation of SummaryWriter using a SummaryWriterInterface resource."""
def __init__(self,
shared_name,
init_op_fn,
name=None,
v2=False,
metadata=None):
self._resource = gen_summary_ops.summary_writer(
shared_name=shared_name, name=name)
# TODO(nickfelt): cache other constructed ops in graph mode
self._init_op_fn = init_op_fn
self._init_op = init_op_fn(self._resource)
self._v2 = v2
self._metadata = {} if metadata is None else metadata
self._closed = False
if context.executing_eagerly():
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="cpu:0")
else:
ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op)
def set_as_default(self):
"""Enables this summary writer for the current thread."""
if self._v2 and context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
_summary_state.writer = self
@tf_contextlib.contextmanager
def as_default(self):
"""Returns a context manager that enables summary writing."""
if self._v2 and context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
old = _summary_state.writer
try:
_summary_state.writer = self
yield self
# Flushes the summary writer in eager mode or in graph functions, but
# not in legacy graph mode (you're on your own there).
self.flush()
finally:
_summary_state.writer = old
def init(self):
"""Initializes the summary writer."""
if self._v2:
if context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
return self._init_op
# Legacy behavior allows re-initializing the resource.
return self._init_op_fn(self._resource)
def flush(self):
"""Flushes any buffered data."""
if self._v2 and context.executing_eagerly() and self._closed:
return
return _flush_fn(writer=self)
def close(self):
"""Flushes and closes the summary writer."""
if self._v2 and context.executing_eagerly() and self._closed:
return
try:
with ops.control_dependencies([self.flush()]):
with ops.device("cpu:0"):
return gen_summary_ops.close_summary_writer(self._resource)
finally:
if self._v2 and context.executing_eagerly():
self._closed = True
class NoopSummaryWriter(SummaryWriter):
"""A summary writer that does nothing, for create_noop_writer()."""
def set_as_default(self):
pass
@tf_contextlib.contextmanager
def as_default(self):
yield
def init(self):
pass
def flush(self):
pass
def close(self):
pass
@tf_export(v1=["summary.initialize"])
def initialize(
graph=None, # pylint: disable=redefined-outer-name
session=None):
"""Initializes summary writing for graph execution mode.
This operation is a no-op when executing eagerly.
This helper method provides a higher-level alternative to using
`tf.contrib.summary.summary_writer_initializer_op` and
`tf.contrib.summary.graph`.
Most users will also want to call `tf.compat.v1.train.create_global_step`
which can happen before or after this function is called.
Args:
graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call `tf.Session.run`. This defaults
to `tf.compat.v1.get_default_session`.
Raises:
RuntimeError: If the current thread has no default
`tf.contrib.summary.SummaryWriter`.
ValueError: If session wasn't passed and no default session.
"""
if context.executing_eagerly():
return
if _summary_state.writer is None:
raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError("session must be passed if no default session exists")
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(_graph(x, 0), feed_dict={x: data})
@tf_export("summary.create_file_writer", v1=[])
def create_file_writer_v2(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
Returns:
A SummaryWriter object.
"""
if logdir is None:
raise ValueError("logdir cannot be None")
inside_function = ops.inside_function()
with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
# Run init inside an init_scope() to hoist it out of tf.functions.
with ops.init_scope():
if context.executing_eagerly():
_check_create_file_writer_args(
inside_function,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
# Prepend the PID and a process-local UID to the filename suffix to avoid
# filename collisions within the machine (the filename already contains
# the hostname to avoid cross-machine collisions).
unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
filename_suffix = unique_prefix + filename_suffix
# Use a unique shared_name to prevent resource sharing.
if context.executing_eagerly():
shared_name = context.shared_name()
else:
shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access
return ResourceSummaryWriter(
shared_name=shared_name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix),
name=name,
v2=True,
metadata={"logdir": logdir})
def create_file_writer(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer in the current context under the given name.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: Shared name for this SummaryWriter resource stored to default
Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a
summary writer resource with this shared name already exists, the returned
SummaryWriter wraps that resource and the other arguments have no effect.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
"""
if logdir is None:
return NoopSummaryWriter()
logdir = str(logdir)
with ops.device("cpu:0"):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
if name is None:
name = "logdir:" + logdir
return ResourceSummaryWriter(
shared_name=name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix))
def create_db_writer(db_uri,
experiment_name=None,
run_name=None,
user_name=None,
name=None):
"""Creates a summary database writer in the current context.
This can be used to write tensors from the execution graph directly
to a database. Only SQLite is supported right now. This function
will create the schema if it doesn't exist. Entries in the Users,
Experiments, and Runs tables will be created automatically if they
don't already exist.
Args:
db_uri: For example "file:/tmp/foo.sqlite".
experiment_name: Defaults to YYYY-MM-DD in local time if None.
Empty string means the Run will not be associated with an
Experiment. Can't contain ASCII control characters or <>. Case
sensitive.
run_name: Defaults to HH:MM:SS in local time if None. Empty string
means a Tag will not be associated with any Run. Can't contain
ASCII control characters or <>. Case sensitive.
user_name: Defaults to system username if None. Empty means the
Experiment will not be associated with a User. Must be valid as
both a DNS label and Linux username.
name: Shared name for this SummaryWriter resource stored to default
`tf.Graph`.
Returns:
A `tf.summary.SummaryWriter` instance.
"""
with ops.device("cpu:0"):
if experiment_name is None:
experiment_name = time.strftime("%Y-%m-%d", time.localtime(time.time()))
if run_name is None:
run_name = time.strftime("%H:%M:%S", time.localtime(time.time()))
if user_name is None:
user_name = getpass.getuser()
experiment_name = _cleanse_string(
"experiment_name", _EXPERIMENT_NAME_PATTERNS, experiment_name)
run_name = _cleanse_string("run_name", _RUN_NAME_PATTERNS, run_name)
user_name = _cleanse_string("user_name", _USER_NAME_PATTERNS, user_name)
return ResourceSummaryWriter(
shared_name=name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_db_writer,
db_uri=db_uri,
experiment_name=experiment_name,
run_name=run_name,
user_name=user_name))
@tf_export("summary.create_noop_writer", v1=[])
def create_noop_writer():
"""Returns a summary writer that does nothing.
This is useful as a placeholder in code that expects a context manager.
"""
return NoopSummaryWriter()
def _cleanse_string(name, pattern, value):
if isinstance(value, six.string_types) and pattern.search(value) is None:
raise ValueError("%s (%s) must match %s" % (name, value, pattern.pattern))
return ops.convert_to_tensor(value, dtypes.string)
def _nothing():
"""Convenient else branch for when summaries do not record."""
return constant_op.constant(False)
@tf_export(v1=["summary.all_v2_summary_ops"])
def all_v2_summary_ops():
"""Returns all V2-style summary ops defined in the current default graph.
This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except
for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but
does *not* include TF 1.x tf.summary ops.
Returns:
List of summary ops, or None if called under eager execution.
"""
if context.executing_eagerly():
return None
return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
def summary_writer_initializer_op():
"""Graph-mode only. Returns the list of ops to create all summary writers.
Returns:
The initializer ops.
Raises:
RuntimeError: If in Eager mode.
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.contrib.summary.summary_writer_initializer_op is only "
"supported in graph mode.")
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]")
@tf_export("summary.experimental.summary_scope", v1=[])
@tf_contextlib.contextmanager
def summary_scope(name, default_name="summary", values=None):
"""Experimental context manager for use when defining a custom summary op.
This behaves similarly to `tf.name_scope`, except that it returns a generated
summary tag in addition to the scope name. The tag is structurally similar to
the scope name - derived from the user-provided name, prefixed with enclosing
name scopes if any - but we relax the constraint that it be uniquified, as
well as the character set limitation (so the user-provided name can contain
characters not legal for scope names; in the scope name these are removed).
This makes the summary tag more predictable and consistent for the user.
For example, to define a new summary op called `my_op`:
```python
def my_op(name, my_value, step):
with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope):
my_value = tf.convert_to_tensor(my_value)
return tf.summary.write(tag, my_value, step=step)
```
Args:
name: string name for the summary.
default_name: Optional; if provided, used as default name of the summary.
values: Optional; passed as `values` parameter to name_scope.
Yields:
A tuple `(tag, scope)` as described above.
"""
name = name or default_name
current_scope = ops.get_name_scope()
tag = current_scope + "/" + name if current_scope else name
# Strip illegal characters from the scope name, and if that leaves nothing,
# use None instead so we pick up the default name.
name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None
with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope:
yield tag, scope
@tf_export("summary.write", v1=[])
def write(tag, tensor, step=None, metadata=None, name=None):
"""Writes a generic summary to the default SummaryWriter if one exists.
This exists primarily to support the definition of type-specific summary ops
like scalar() and image(), and is not intended for direct use unless defining
a new type-specific summary op.
Args:
tag: string tag used to identify the summary (e.g. in TensorBoard), usually
generated with `tf.summary.summary_scope`
tensor: the Tensor holding the summary data to write or a callable that
returns this Tensor. If a callable is passed, it will only be called when
a default SummaryWriter exists and the recording condition specified by
`record_if()` is met.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
metadata: Optional SummaryMetadata, as a proto or serialized bytes
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_summary") as scope:
if _summary_state.writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set via 'step' argument or "
"tf.summary.experimental.set_step()")
if metadata is None:
serialized_metadata = b""
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = metadata.SerializeToString()
else:
serialized_metadata = metadata
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
summary_tensor = tensor() if callable(tensor) else array_ops.identity(
tensor)
write_summary_op = gen_summary_ops.write_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
step,
summary_tensor,
tag,
serialized_metadata,
name=scope)
with ops.control_dependencies([write_summary_op]):
return constant_op.constant(True)
op = smart_cond.smart_cond(
_should_record_summaries_v2(), record, _nothing, name="summary_cond")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
@tf_export("summary.experimental.write_raw_pb", v1=[])
def write_raw_pb(tensor, step=None, name=None):
"""Writes a summary using raw `tf.compat.v1.Summary` protocol buffers.
Experimental: this exists to support the usage of V1-style manual summary
writing (via the construction of a `tf.compat.v1.Summary` protocol buffer)
with the V2 summary writing API.
Args:
tensor: the string Tensor holding one or more serialized `Summary` protobufs
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_raw_pb") as scope:
if _summary_state.writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set via 'step' argument or "
"tf.summary.experimental.set_step()")
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
raw_summary_op = gen_summary_ops.write_raw_proto_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
step,
array_ops.identity(tensor),
name=scope)
with ops.control_dependencies([raw_summary_op]):
return constant_op.constant(True)
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
_should_record_summaries_v2(), record, _nothing, name="summary_cond")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
name_scope = ops.get_name_scope()
if name_scope:
# Add a slash to allow reentering the name scope.
name_scope += "/"
def record():
with ops.name_scope(name_scope), summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if _summary_state.writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
should_record_summaries(), record, _nothing, name="")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def generic(name, tensor, metadata=None, family=None, step=None):
"""Writes a tensor summary if possible."""
def function(tag, scope):
if metadata is None:
serialized_metadata = constant_op.constant("")
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = constant_op.constant(metadata.SerializeToString())
else:
serialized_metadata = metadata
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
array_ops.identity(tensor),
tag,
serialized_metadata,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def scalar(name, tensor, family=None, step=None):
"""Writes a scalar summary if possible.
Unlike `tf.contrib.summary.generic` this op may change the dtype
depending on the writer, for both practical and efficiency concerns.
Args:
name: An arbitrary name for this summary.
tensor: A `tf.Tensor` Must be one of the following types:
`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
`int8`, `uint16`, `half`, `uint32`, `uint64`.
family: Optional, the summary's family.
step: The `int64` monotonic step variable, which defaults
to `tf.compat.v1.train.get_global_step`.
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
"""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_scalar_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def histogram(name, tensor, family=None, step=None):
"""Writes a histogram summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_histogram_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):
"""Writes an image summary if possible."""
def function(tag, scope):
bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
if bad_color is None else bad_color)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
bad_color_,
max_images,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):
"""Writes an audio summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_audio_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
sample_rate=sample_rate,
max_outputs=max_outputs,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def graph(param, step=None, name=None):
"""Writes a TensorFlow graph to the summary interface.
The graph summary is, strictly speaking, not a summary. Conditions
like `tf.summary.should_record_summaries` do not apply. Only
a single graph can be associated with a particular run. If multiple
graphs are written, then only the last one will be considered by
TensorBoard.
When not using eager execution mode, the user should consider passing
the `graph` parameter to `tf.compat.v1.summary.initialize` instead of
calling this function. Otherwise special care needs to be taken when
using the graph to record the graph.
Args:
param: A `tf.Tensor` containing a serialized graph proto. When
eager execution is enabled, this function will automatically
coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types.
step: The global step variable. This doesn't have useful semantics
for graph summaries, but is used anyway, due to the structure of
event log files. This defaults to the global step.
name: A name for the operation (optional).
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
Raises:
TypeError: If `param` isn't already a `tf.Tensor` in graph mode.
"""
if not context.executing_eagerly() and not isinstance(param, ops.Tensor):
raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph "
"mode, but was: %s" % type(param))
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):
tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)
else:
tensor = array_ops.identity(param)
return gen_summary_ops.write_graph_summary(
writer._resource, _choose_step(step), tensor, name=name) # pylint: disable=protected-access
_graph = graph # for functions with a graph parameter
def import_event(tensor, name=None):
"""Writes a `tf.compat.v1.Event` binary proto.
This can be used to import existing event logs into a new summary writer sink.
Please note that this is lower level than the other summary functions and
will ignore the `tf.summary.should_record_summaries` setting.
Args:
tensor: A `tf.Tensor` of type `string` containing a serialized
`tf.compat.v1.Event` proto.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
return gen_summary_ops.import_event(
_summary_state.writer._resource, tensor, name=name) # pylint: disable=protected-access
@tf_export("summary.flush", v1=[])
def flush(writer=None, name=None):
"""Forces summary writer to send any buffered data to storage.
This operation blocks until that finishes.
Args:
writer: The `tf.summary.SummaryWriter` resource to flush.
The thread default will be used if this parameter is None.
Otherwise a `tf.no_op` is returned.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
if writer is None:
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
if isinstance(writer, ResourceSummaryWriter):
resource = writer._resource # pylint: disable=protected-access
else:
# Assume we were passed a raw resource tensor.
resource = writer
with ops.device("cpu:0"):
return gen_summary_ops.flush_summary_writer(resource, name=name)
_flush_fn = flush # for within SummaryWriter.flush()
def eval_dir(model_dir, name=None):
"""Construct a logdir for an eval summary writer."""
return os.path.join(model_dir, "eval" if not name else "eval_" + name)
@deprecation.deprecated(date=None,
instructions="Renamed to create_file_writer().")
def create_summary_file_writer(*args, **kwargs):
"""Please use `tf.contrib.summary.create_file_writer`."""
logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
"to create_file_writer")
return create_file_writer(*args, **kwargs)
def _serialize_graph(arbitrary_graph):
if isinstance(arbitrary_graph, ops.Graph):
return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return arbitrary_graph.SerializeToString()
def _choose_step(step):
if step is None:
return training_util.get_or_create_global_step()
if not isinstance(step, ops.Tensor):
return ops.convert_to_tensor(step, dtypes.int64)
return step
def _check_create_file_writer_args(inside_function, **kwargs):
"""Helper to check the validity of arguments to a create_file_writer() call.
Args:
inside_function: whether the create_file_writer() call is in a tf.function
**kwargs: the arguments to check, as kwargs to give them names.
Raises:
ValueError: if the arguments are graph tensors.
"""
for arg_name, arg in kwargs.items():
if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tensor(arg):
if inside_function:
raise ValueError(
"Invalid graph Tensor argument \"%s=%s\" to create_file_writer() "
"inside an @tf.function. The create call will be lifted into the "
"outer eager execution context, so it cannot consume graph tensors "
"defined inside the function body." % (arg_name, arg))
else:
raise ValueError(
"Invalid graph Tensor argument \"%s=%s\" to eagerly executed "
"create_file_writer()." % (arg_name, arg))
def run_metadata(name, data, step=None):
"""Writes entire RunMetadata summary.
A RunMetadata can contain DeviceStats, partition graphs, and function graphs.
Please refer to the proto for definition of each field.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata"
# version number = 1
summary_metadata.plugin_data.content = b"1"
with summary_scope(name,
"graph_run_metadata_summary",
[data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(data.SerializeToString(),
dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
def run_metadata_graphs(name, data, step=None):
"""Writes graphs from a RunMetadata summary.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph"
# version number = 1
summary_metadata.plugin_data.content = b"1"
data = config_pb2.RunMetadata(
function_graphs=data.function_graphs,
partition_graphs=data.partition_graphs)
with summary_scope(name,
"graph_run_metadata_graph_summary",
[data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(data.SerializeToString(),
dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
def keras_model(name, data, step=None):
"""Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model fails
to serialze as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_keras_model"
# version number = 1
summary_metadata.plugin_data.content = b"1"
try:
json_string = data.to_json()
except Exception as exc: # pylint: disable=broad-except
# An exception should not break a model code.
logging.warn("Model failed to serialize as JSON. Ignoring... %s" % exc)
return False
with summary_scope(name, "graph_keras_model", [data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(json_string, dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler"))
_current_trace_context_lock = threading.Lock()
_current_trace_context = None
@tf_export("summary.trace_on", v1=[])
def trace_on(graph=True, profiler=False): # pylint: disable=redefined-outer-name
"""Starts a trace to record computation graphs and profiling information.
Must be invoked in eager mode.
When enabled, TensorFlow runtime will collection information that can later be
exported and consumed by TensorBoard. The trace is activated across the entire
TensorFlow runtime and affects all threads of execution.
To stop the trace and export the collected information, use
`tf.summary.trace_export`. To stop the trace without exporting, use
`tf.summary.trace_off`.
Args:
graph: If True, enables collection of executed graphs. It includes ones from
tf.function invocation and ones from the legacy graph mode. The default
is True.
profiler: If True, enables the advanced profiler. Enabling profiler
implicitly enables the graph collection. The profiler may incur a high
memory overhead. The default is False.
"""
if ops.inside_function():
logging.warn("Cannot enable trace inside a tf.function.")
return
if not context.context().executing_eagerly():
logging.warn("Must enable trace in eager mode.")
return
global _current_trace_context
with _current_trace_context_lock:
if _current_trace_context:
logging.warn("Trace already enabled")
return
if graph and not profiler:
context.context().enable_graph_collection()
if profiler:
context.context().enable_run_metadata()
_profiler.start()
_current_trace_context = _TraceContext(graph=graph, profiler=profiler)
@tf_export("summary.trace_export", v1=[])
def trace_export(name, step=None, profiler_outdir=None):
"""Stops and exports the active trace as a Summary and/or profile file.
Stops the trace and exports all metadata collected during the trace to the
default SummaryWriter, if one has been set.
Args:
name: A name for the summary to be written.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
profiler_outdir: Output directory for profiler. This is only used when the
profiler was enabled when the trace was started. In that case, if there is
a logdir-based default SummaryWriter, this defaults to the same directory,
but otherwise the argument must be passed.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
global _current_trace_context
if ops.inside_function():
logging.warn("Cannot export trace inside a tf.function.")
return
if not context.context().executing_eagerly():
logging.warn("Can only export trace while executing eagerly.")
return
with _current_trace_context_lock:
if _current_trace_context is None:
raise ValueError("Must enable trace before export.")
graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name
if profiler_outdir is None \
and isinstance(_summary_state.writer, ResourceSummaryWriter):
logdir = _summary_state.writer._metadata.get("logdir") # pylint: disable=protected-access
if logdir is not None:
profiler_outdir = logdir
if profiler and profiler_outdir is None:
raise ValueError("Must set profiler_outdir or "
"enable summary writer with logdir.")
run_meta = context.context().export_run_metadata()
if graph and not profiler:
run_metadata_graphs(name, run_meta, step)
else:
run_metadata(name, run_meta, step)
if profiler:
_profiler.save(profiler_outdir, _profiler.stop())
trace_off()
@tf_export("summary.trace_off", v1=[])
def trace_off():
"""Stops the current trace and discards any collected information."""
global _current_trace_context
with _current_trace_context_lock:
_current_trace_context = None
# Disabling run_metadata disables graph collection as well.
context.context().disable_run_metadata()
# profiler only has start and stop. One needs to stop in order to export
# and stopping when it is not running will raise an error.
try:
_profiler.stop()
except _profiler.ProfilerNotRunningError:
pass
|
apache-2.0
| 2,818,230,846,669,279,700
| 35.626866
| 104
| 0.690816
| false
| 3.996743
| false
| false
| false
|
forkbong/qutebrowser
|
qutebrowser/config/websettings.py
|
1
|
9471
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Bridge from QWeb(Engine)Settings to our own settings."""
import re
import argparse
import functools
import dataclasses
from typing import Any, Callable, Dict, Optional
from PyQt5.QtCore import QUrl, pyqtSlot, qVersion
from PyQt5.QtGui import QFont
import qutebrowser
from qutebrowser.config import config
from qutebrowser.utils import usertypes, urlmatch, qtutils, utils
from qutebrowser.misc import objects, debugcachestats
UNSET = object()
@dataclasses.dataclass
class UserAgent:
"""A parsed user agent."""
os_info: str
webkit_version: str
upstream_browser_key: str
upstream_browser_version: str
qt_key: str
@classmethod
def parse(cls, ua: str) -> 'UserAgent':
"""Parse a user agent string into its components."""
comment_matches = re.finditer(r'\(([^)]*)\)', ua)
os_info = list(comment_matches)[0].group(1)
version_matches = re.finditer(r'(\S+)/(\S+)', ua)
versions = {}
for match in version_matches:
versions[match.group(1)] = match.group(2)
webkit_version = versions['AppleWebKit']
if 'Chrome' in versions:
upstream_browser_key = 'Chrome'
qt_key = 'QtWebEngine'
elif 'Version' in versions:
upstream_browser_key = 'Version'
qt_key = 'Qt'
else:
raise ValueError("Invalid upstream browser key: {}".format(ua))
upstream_browser_version = versions[upstream_browser_key]
return cls(os_info=os_info,
webkit_version=webkit_version,
upstream_browser_key=upstream_browser_key,
upstream_browser_version=upstream_browser_version,
qt_key=qt_key)
class AttributeInfo:
"""Info about a settings attribute."""
def __init__(self, *attributes: Any, converter: Callable = None) -> None:
self.attributes = attributes
if converter is None:
self.converter = lambda val: val
else:
self.converter = converter
class AbstractSettings:
"""Abstract base class for settings set via QWeb(Engine)Settings."""
_ATTRIBUTES: Dict[str, AttributeInfo] = {}
_FONT_SIZES: Dict[str, Any] = {}
_FONT_FAMILIES: Dict[str, Any] = {}
_FONT_TO_QFONT: Dict[Any, QFont.StyleHint] = {}
def __init__(self, settings: Any) -> None:
self._settings = settings
def _assert_not_unset(self, value: Any) -> None:
assert value is not usertypes.UNSET
def set_attribute(self, name: str, value: Any) -> None:
"""Set the given QWebSettings/QWebEngineSettings attribute.
If the value is usertypes.UNSET, the value is reset instead.
"""
info = self._ATTRIBUTES[name]
for attribute in info.attributes:
if value is usertypes.UNSET:
self._settings.resetAttribute(attribute)
else:
self._settings.setAttribute(attribute, info.converter(value))
def test_attribute(self, name: str) -> bool:
"""Get the value for the given attribute.
If the setting resolves to a list of attributes, only the first
attribute is tested.
"""
info = self._ATTRIBUTES[name]
return self._settings.testAttribute(info.attributes[0])
def set_font_size(self, name: str, value: int) -> None:
"""Set the given QWebSettings/QWebEngineSettings font size."""
self._assert_not_unset(value)
family = self._FONT_SIZES[name]
self._settings.setFontSize(family, value)
def set_font_family(self, name: str, value: Optional[str]) -> None:
"""Set the given QWebSettings/QWebEngineSettings font family.
With None (the default), QFont is used to get the default font for the
family.
"""
self._assert_not_unset(value)
family = self._FONT_FAMILIES[name]
if value is None:
font = QFont()
font.setStyleHint(self._FONT_TO_QFONT[family])
value = font.defaultFamily()
self._settings.setFontFamily(family, value)
def set_default_text_encoding(self, encoding: str) -> None:
"""Set the default text encoding to use."""
self._assert_not_unset(encoding)
self._settings.setDefaultTextEncoding(encoding)
def _update_setting(self, setting: str, value: Any) -> bool:
"""Update the given setting/value.
Unknown settings are ignored.
Return:
True if there was a change, False otherwise.
"""
if setting in self._ATTRIBUTES:
self.set_attribute(setting, value)
elif setting in self._FONT_SIZES:
self.set_font_size(setting, value)
elif setting in self._FONT_FAMILIES:
self.set_font_family(setting, value)
elif setting == 'content.default_encoding':
self.set_default_text_encoding(value)
return False
def update_setting(self, setting: str) -> None:
"""Update the given setting."""
value = config.instance.get(setting)
self._update_setting(setting, value)
def update_for_url(self, url: QUrl) -> None:
"""Update settings customized for the given tab."""
qtutils.ensure_valid(url)
for values in config.instance:
if not values.opt.supports_pattern:
continue
value = values.get_for_url(url, fallback=False)
self._update_setting(values.opt.name, value)
def init_settings(self) -> None:
"""Set all supported settings correctly."""
for setting in (list(self._ATTRIBUTES) + list(self._FONT_SIZES) +
list(self._FONT_FAMILIES)):
self.update_setting(setting)
@debugcachestats.register(name='user agent cache')
@functools.lru_cache()
def _format_user_agent(template: str, backend: usertypes.Backend) -> str:
if backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
parsed = webenginesettings.parsed_user_agent
else:
from qutebrowser.browser.webkit import webkitsettings
parsed = webkitsettings.parsed_user_agent
assert parsed is not None
return template.format(
os_info=parsed.os_info,
webkit_version=parsed.webkit_version,
qt_key=parsed.qt_key,
qt_version=qVersion(),
upstream_browser_key=parsed.upstream_browser_key,
upstream_browser_version=parsed.upstream_browser_version,
qutebrowser_version=qutebrowser.__version__,
)
def user_agent(url: QUrl = None) -> str:
"""Get the user agent for the given URL, or the global one if URL is None.
Note that the given URL should always be valid.
"""
template = config.instance.get('content.headers.user_agent', url=url)
return _format_user_agent(template=template, backend=objects.backend)
def init(args: argparse.Namespace) -> None:
"""Initialize all QWeb(Engine)Settings."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
webenginesettings.init()
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import webkitsettings
webkitsettings.init()
else:
raise utils.Unreachable(objects.backend)
# Make sure special URLs always get JS support
for pattern in ['chrome://*/*', 'qute://*/*']:
config.instance.set_obj('content.javascript.enabled', True,
pattern=urlmatch.UrlPattern(pattern),
hide_userconfig=True)
def clear_private_data() -> None:
"""Clear cookies, cache and related data for private browsing sessions."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
webenginesettings.init_private_profile()
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import cookies
assert cookies.ram_cookie_jar is not None
cookies.ram_cookie_jar.setAllCookies([])
else:
raise utils.Unreachable(objects.backend)
@pyqtSlot()
def shutdown() -> None:
"""Shut down QWeb(Engine)Settings."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginesettings
webenginesettings.shutdown()
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import webkitsettings
webkitsettings.shutdown()
else:
raise utils.Unreachable(objects.backend)
|
gpl-3.0
| 3,179,883,137,833,848,300
| 34.339552
| 78
| 0.652201
| false
| 4.068299
| true
| false
| false
|
TimBizeps/BachelorAP
|
V103_Biegung elastischer Stäbe/Auswertung.py
|
1
|
7054
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
from scipy.optimize import curve_fit
def auswertung(material, querschnitt, einspannung, x, D, d, L, M):
if einspannung == "einseitig":
u = L*x**2 - x**3/3
g = const.g
F = M*g
# d = np.mean(k)
# Δd = np.sqrt(1/(len(k)*(len(k)-1))*sum((d-k)**2))
if querschnitt == "kreisfoermig":
I = np.pi/64*d**4
# ΔI = np.pi/16*d**3*Δd
if querschnitt == "quadratisch":
I = d**4/12
# ΔI = 1/3*d**3*Δd
def f(x, m, b):
return m*x + b
params, cov = curve_fit(f, u, D)
m = params[0]
b = params[1]
Δm = np.sqrt(cov[0][0])
Δb = np.sqrt(cov[1][1])
E = F/(2*I*m)
# ΔE = np.sqrt((F/(2*I**2*m)*ΔI)**2+(F/(2*I*m**2)*Δm)**2)
ΔE = np.sqrt((F/(2*I*m**2)*Δm)**2)
t = np.linspace(u.min(), u.max(), 1000)
plt.plot(u, 1000*D, 'rx', label='Messwerte')
plt.plot(t, 1000*f(t, m, b), 'k-', label='Regressionsgerade')
plt.xlim(u.min(), u.max())
plt.xlabel(r"$(Lx^2 - \frac{x^3}{3})/\mathrm{m}^3$")
plt.ylabel(r"$D/\mathrm{mm}$")
plt.legend(loc='best')
plt.tight_layout()
plt.savefig("build/plot_{}_{}_{}.pdf".format(material, querschnitt, einspannung))
plt.close()
print(
"""
------------------------------------------------------------------------
Material: {}
Querschnitt: {}
Einspannung: {}
Durchmesser d: {} ± {:.5f} mm
Länge L: {} cm
Masse M: {} kg
Flächenträgheitsmoment I: {} ± {} mm^4
Elastizitätsmodul E: {} ± {} N/m^2
Steigung m: {} ± {}
Ordinatenabschnitt b: {} ± {}
------------------------------------------------------------------------
""".format(material, querschnitt, einspannung, d*1e3, 0, L*1e2, M, I*1e12, 0, E*1e0, ΔE*1e0, m, Δm, b, Δb))
if einspannung == "beidseitig":
x1, x2 = np.array_split(x, 2)
D1, D2 = np.array_split(D, 2)
u1 = 3*L**2*x1 - 4*x1**3
u2 = 4*x2**3 - 12*L*x2**2 + 9*L**2*x2 - L**3
g = const.g
F = M*g
# d = np.mean(k)
# Δd = np.sqrt(1/(len(k)*(len(k)-1))*sum((d-k)**2))
if querschnitt == "kreisfoermig":
I = np.pi/64*d**4
# ΔI = np.pi/16*d**3*Δd
if querschnitt == "quadratisch":
I = d**4/12
# ΔI = 1/3*d**3*Δd
def f(x, m, b):
return m*x + b
params1, cov1 = curve_fit(f, u1, D1)
params2, cov2 = curve_fit(f, u2, D2)
m1 = params1[0]
m2 = params2[0]
b1 = params1[1]
b2 = params2[1]
Δm1 = np.sqrt(cov1[0][0])
Δm2 = np.sqrt(cov2[0][0])
Δb1 = np.sqrt(cov1[1][1])
Δb2 = np.sqrt(cov2[1][1])
E1 = F/(48*I*m1)
E2 = F/(48*I*m2)
# ΔE1 = np.sqrt((F/(48*I**2*m1)*ΔI)**2+(F/(48*I*m1**2)*Δm1)**2)
ΔE1 = np.sqrt((F/(48*I*m1**2)*Δm1)**2)
# ΔE2 = np.sqrt((F/(48*I**2*m2)*ΔI)**2+(F/(48*I*m2**2)*Δm2)**2)
ΔE2 = np.sqrt((F/(48*I*m2**2)*Δm2)**2)
E = (E1+E2)/2
ΔE = np.sqrt(ΔE1**2+ΔE2**2)/2
t = np.linspace(u1.min(), u1.max(), 1000)
plt.plot(u1, 1000*D1, 'rx', label='Messwerte')
plt.plot(t, 1000*f(t, m1, b1), 'k-', label='Regressionsgerade')
plt.xlim(u1.min(), u1.max())
plt.xlabel(r"$(3L^2x - 4x^3)/\mathrm{m}^3$")
plt.ylabel(r"$D/\mathrm{mm}$")
plt.legend(loc='best')
plt.tight_layout()
plt.savefig("build/plot_{}_{}_{}_1.pdf".format(material, querschnitt, einspannung))
plt.close()
t = np.linspace(u2.min(), u2.max(), 1000)
plt.plot(u2, 1000*D2, 'rx', label='Messwerte')
plt.plot(t, 1000*f(t, m2, b2), 'k-', label='Regressionsgerade')
plt.xlim(u2.min(), u2.max())
plt.xlabel(r"$(4x^3 - 12Lx^2 + 9L^2x - L^3)/\mathrm{m}^3$")
plt.ylabel(r"$D/\mathrm{mm}$")
plt.legend(loc='best')
plt.tight_layout()
plt.savefig("build/plot_{}_{}_{}_2.pdf".format(material, querschnitt, einspannung))
plt.close()
print("""
------------------------------------------------------------------------
Material: {}
Querschnitt: {}
Einspannung: {}
Durchmesser d: {} ± {} mm
Länge L: {} cm
Masse M: {} kg
Flächenträgheitsmoment I: {} ± {} mm^4
Elastizitätsmodul E1: {} ± {} N/m^2
Elastizitätsmodul E2: {} ± {} N/m^2
Elastizitätsmodul E: {} ± {} N/m^2
Steigung m1: {} ± {}
Steigung m2: {} ± {}
Ordinatenabschnitt b1: {} ± {}
Ordinatenabschnitt b2: {} ± {}
------------------------------------------------------------------------
""".format(material, querschnitt, einspannung, d*1e3, 0, L*1e2, M, I*1e12, 0, E1*1e0, ΔE1*1e0, E2*1e0, ΔE2*1e0, E*1e0, ΔE*1e0, m1, Δm1, m2, Δm2, b1, Δb1, b2, Δb2))
'''
############################################################################
# Test mit Messwerten von Philipp Leser
# Aluminium, kreisförmiger Querschnitt, einseitig eingespannt
# Daten einlesen
x, D = np.loadtxt("data/daten_aluminium_quadratisch_beidseitig.txt", unpack=True)
d = np.loadtxt("data/daten_aluminium_quadratisch_durchmesser.txt", unpack=True)
L = 55.3 #[cm]
M = 4.6944 #[kg]
# Auswertung
d *= 1e-3
L *= 1e-2
x *= 1e-2
D *= 1e-6
auswertung("Aluminium", "quadratisch", "beidseitig", x, D, d, L, M)
############################################################################
'''
# Aluminium, kreisförmiger Querschnitt, einseitig eingespannt
# Daten einlesen
x, D = np.loadtxt("Messing.txt", unpack=True)
d = 10 #[mm]
L = 40.70 #[cm]
M = 2.3606 #[kg]
# Auswertung
d *= 1e-3
L *= 1e-2
x *= 1e-2
D *= 1e-6
auswertung("Messing", "quadratisch", "einseitig", x, D, d, L, M)
# Messing, quadratischer Querschnitt, einseitig eingespannt
# Daten einlesen
x, D = np.loadtxt("alurund.txt", unpack=True)
d = 10 #[mm]
L = 34.8 #[cm]
M = 1.1926 #[kg]
# Auswertung
d *= 1e-3
L *= 1e-2
x *= 1e-2
D *= 1e-6
auswertung("Aluminium", "kreisfoermig", "einseitig", x, D, d, L, M)
# Stahl, quadratischer Querschnitt, beidseitig eingespannt
# Daten einlesen
x, D = np.loadtxt("alueckig.txt", unpack=True)
d = 10 #[mm]
L = 55.3 #[cm]
M = 0 #[kg]
# Auswertung
d *= 1e-3
L *= 1e-2
x *= 1e-2
D *= 1e-6
auswertung("Aluminium", "quadratisch", "beidseitig" , x, D, d, L, M)
x, D = np.loadtxt("alueckig2.txt", unpack=True)
d = 10 #[mm]
L = 55.3 #[cm]
M = 3.5312 #[kg]
# Auswertung
d *= 1e-3
L *= 1e-2
x *= 1e-2
D *= 1e-6
auswertung("Aluminium", "quadratisch", "beidseitig", x, D, d, L, M)
|
gpl-3.0
| -5,941,356,961,601,821,000
| 31.635514
| 171
| 0.463918
| false
| 2.404959
| false
| false
| false
|
shacknetisp/fourthevaz
|
modules/default/chatbot/wordai.py
|
1
|
4216
|
# -*- coding: utf-8 -*-
from random import choice
import copy
import random
import string
import pprint
import pickle
class wordai:
"""Word AI"""
def load(self):
"""Load the file."""
try:
self.dict_file = open(self.dbfile, 'rb')
self.data_dict = pickle.load(self.dict_file)
self.dict_file.close()
except:
pass
def save(self):
"""Save the file"""
output = open(self.dbfile, 'wb')
pickle.dump(self.data_dict, output)
output.close()
def addchoice(self, a):
self.choices.append(a)
def ms(self, r):
exclude = set(string.punctuation)
r = ''.join(ch for ch in r if ch not in exclude)
inp = r.lower().split()
if len(inp):
if not ';start' in self.data_dict:
self.data_dict[';start'] = list()
if not ';end' in self.data_dict:
self.data_dict[';end'] = list()
if not inp[0] in self.data_dict[';start'] or True:
self.data_dict[';start'].append(inp[0])
if not inp[-1] in self.data_dict[';end'] or True:
self.data_dict[';end'].append(inp[-1])
for i in range(len(inp)):
if not inp[i] in self.data_dict:
self.data_dict[inp[i]] = list()
try:
if not inp[i + 1] in self.data_dict[inp[i]] or True:
self.data_dict[inp[i]].append(inp[i + 1])
except IndexError:
pass
ret = ''
try:
self.choices = list()
for ch in range(4):
try:
self.addchoice(choice(inp))
except:
pass
try:
self.addchoice(inp[0])
except:
pass
for ch in range(random.randrange(8, 16)):
try:
self.addchoice(choice(self.data_dict[';start']))
except:
pass
try:
self.addchoice(choice(self.data_dict[inp[0]]))
except:
pass
first = choice(self.choices)
ret += first + ' '
nextword = first
for numwords in range(100):
if nextword in self.data_dict:
if nextword in self.data_dict[';end'] \
and (int(random.randrange(0, 100)) < 5 + numwords
+ self.data_dict[';end'].count(nextword)
/ len(self.data_dict[';end']) * 1000
or len(self.data_dict[nextword]) == 0):
break
cnext = choice(self.data_dict[nextword])
ret += cnext + ' '
nextword = cnext
else:
break
except IndexError:
pass
except KeyError:
pass
try:
return str(str(ret[0]).upper() + ret[1:]).strip() + '.'
except IndexError:
return '?'
def process(self, mp):
"""Process <mp> and return a reply."""
out = self.ms(mp)
self.save()
return out
def replace(self, w, n):
"""Replace <w> with <n> in the dictionary."""
if n != w:
self.data_dict[n] = self.data_dict[w]
del self.data_dict[w]
for k in self.data_dict:
for (index, item) in enumerate(self.data_dict[k]):
if item == w:
self.data_dict[k][index] = n
self.save()
def getdictstring(self):
"""Return the pprinted dictionary."""
data_dict_tmp = copy.deepcopy(self.data_dict)
if ';record' in data_dict_tmp:
del data_dict_tmp[';record']
return pprint.pformat(data_dict_tmp)
def getwords(self):
"""Get the number of words."""
data_dict_tmp = copy.deepcopy(self.data_dict)
if ';record' in data_dict_tmp:
del data_dict_tmp[';record']
return len(data_dict_tmp) - 2
def __init__(self, dbf):
self.dbfile = dbf
self.choices = []
|
mit
| 5,600,262,000,338,761,000
| 31.183206
| 73
| 0.46371
| false
| 3.988647
| false
| false
| false
|
lukecampbell/compliance-checker
|
compliance_checker/cf/appendix_d.py
|
1
|
4004
|
#!/usr/bin/env python
'''
Appendix D compliance support for CF 1.6 and CF 1.7
The definitions given here allow an application to compute dimensional
coordinate values from the dimensionless ones and associated variables. The
formulas are expressed for a gridpoint (n,k,j,i) where i and j are the
horizontal indices, k is the vertical index and n is the time index. A
coordinate variable is associated with its definition by the value of the
standard_name attribute. The terms in the definition are associated with file
variables by the formula_terms attribute. The formula_terms attribute takes a
string value, the string being comprised of blank-separated elements of the form
"term: variable", where term is a keyword that represents one of the terms in
the definition, and variable is the name of the variable in a netCDF file that
contains the values for that term. The order of elements is not significant.
The gridpoint indices are not formally part of the definitions, but are included
to illustrate the indices that might be present in the file variables. For
example, a vertical coordinate whose definition contains a time index is not
necessarily time dependent in all netCDF files. Also, the definitions are given
in general forms that may be simplified by omitting certain terms. A term that
is omitted from the formula_terms attribute should be assumed to be zero.
'''
# Contains the standard name followed by a 2-tuple:
# (the set of expected formula terms, set of computed_standard_name(s)). Most
# vertical coordinates only have one computed_standard_name, but some have
# multiple acceptable values.
ocean_computed_standard_names = {
'altitude', 'height_above_geopotential_datum',
'height_above_reference_ellipsoid', 'height_above_mean_sea_level'
}
dimless_vertical_coordinates_1_6 = { # only for CF-1.6
"atmosphere_ln_pressure_coordinate" : ({'p0', 'lev'}, {'air_pressure'}),
"atmosphere_sigma_coordinate" : ({'sigma', 'ps', 'ptop'}, {'air_pressure'}),
"atmosphere_hybrid_sigma_pressure_coordinate": (({'a', 'b', 'ps'}, {'ap', 'b', 'ps'}), {'air_pressure'}),
"atmosphere_hybrid_height_coordinate" : ({'a', 'b', 'orog'}, {'altitude', 'height_above_geopotential_datum'}),
"atmosphere_sleve_coordinate" : ({'a', 'b1', 'b2', 'ztop', 'zsurf1', 'zsurf2'}, {'altitude', 'height_above_geopotential_datum'}),
"ocean_sigma_coordinate" : ({'sigma', 'eta', 'depth'}, ocean_computed_standard_names),
"ocean_s_coordinate" : ({'s', 'eta', 'depth', 'a', 'b', 'depth_c'}, ocean_computed_standard_names),
"ocean_sigma_z_coordinate" : ({'sigma', 'eta', 'depth', 'depth_c', 'nsigma', 'zlev'}, ocean_computed_standard_names),
"ocean_double_sigma_coordinate" : ({'sigma', 'depth', 'z1', 'z2', 'a', 'href', 'k_c'}, ocean_computed_standard_names)
}
dimless_vertical_coordinates_1_7 = dimless_vertical_coordinates_1_6.copy() # shallow copy
dimless_vertical_coordinates_1_7.update({ # extends 1.6
"ocean_s_coordinate_g1": ({'s', 'C', 'eta', 'depth', 'depth_c'}, ocean_computed_standard_names),
"ocean_s_coordinate_g2": ({'s', 'C', 'eta', 'depth', 'depth_c'}, ocean_computed_standard_names)
})
def no_missing_terms(formula_name, term_set, dimless_vertical_coordinates):
"""
Returns true if the set is not missing terms corresponding to the
entries in Appendix D, False otherwise. The set of terms should be exactly
equal, and not contain more or less terms than expected.
"""
reqd_terms = dimless_vertical_coordinates[formula_name][0]
def has_all_terms(reqd_termset):
return len(reqd_termset ^ term_set) == 0
if isinstance(reqd_terms, set):
return has_all_terms(reqd_terms)
# if it's not a set, it's likely some other form of iterable with multiple
# possible definitions i.e. a/ap are interchangeable in
else:
return any(has_all_terms(req) for req in reqd_terms)
|
apache-2.0
| 2,743,449,355,784,953,000
| 59.666667
| 148
| 0.692807
| false
| 3.64
| false
| false
| false
|
DemianWright/io_scene_blb
|
const.py
|
1
|
6023
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Various constants used in multiple modules.
@author: Demian Wright
"""
from decimal import Decimal
from enum import Enum, IntEnum
from math import pi
# The BLB file extension.
BLB_EXT = ".blb"
# The log file extension.
LOG_EXT = ".log"
# One log indent level.
LOG_INDENT = " "
# Generic.
X = 0
Y = 1
Z = 2
# Humans have wibbly-wobbly hands.
HUMAN_BRICK_GRID_ERROR = Decimal("0.1")
# The defined height of a Blockland plate at 100% scale.
DEFAULT_PLATE_HEIGHT = Decimal("0.4")
# Blockland does not accept bricks that are wide/deeper than 64 bricks or taller than 256 plates.
MAX_BRICK_HORIZONTAL_PLATES = 64
MAX_BRICK_VERTICAL_PLATES = 256
# Blockland supports up to 10 collision cuboids per BLB.
MAX_BRICK_COLLISION_CUBOIDS = 10
class BLBQuadSection(IntEnum):
"""The quad sections in the correct order for writing to a BLB file. Indexed from 0 to 6."""
TOP = 0
BOTTOM = 1
NORTH = 2
EAST = 3
SOUTH = 4
WEST = 5
OMNI = 6
class BrickTexture(Enum):
"""Valid brick texture names in alphabetical order."""
BOTTOMEDGE = 0
BOTTOMLOOP = 1
PRINT = 2
RAMP = 3
SIDE = 4
TOP = 5
def __str__(self):
"""Returns the name of the enum value in uppercase characters."""
return self.name
@classmethod
def as_list(cls):
"""Returns the names of the members of this enum as a list of uppercase strings."""
return [member.name for member in BrickTexture]
# BLB file strings.
BLB_BRICK_TYPE_SPECIAL = "SPECIAL"
BLB_SECTION_SEPARATOR = "---------------- {} QUADS ----------------"
BLB_HEADER_COVERAGE = "COVERAGE:"
BLB_PREFIX_TEXTURE = "TEX:"
BLB_HEADER_POSITION = "POSITION:"
BLB_HEADER_UV = "UV COORDS:"
BLB_HEADER_COLORS = "COLORS:"
BLB_HEADER_NORMALS = "NORMALS:"
# The default coverage value = no coverage. (Number of plates that need to cover a brick side to hide it.)
# The maximum area a brick's side can cover is 64 * 256 = 16384 plates.
DEFAULT_COVERAGE = 99999
# Brick grid symbols.
GRID_INSIDE = "x" # Disallow building inside brick.
GRID_OUTSIDE = "-" # Allow building in empty space.
GRID_UP = "u" # Allow placing bricks above this plate.
GRID_DOWN = "d" # Allow placing bricks below this plate.
GRID_BOTH = "b" # Allow placing bricks above and below this plate.
# Blender has 20 layers.
BLENDER_MAX_LAYER_IDX = 19
# Maximum number of decimal places to write to file.
MAX_FP_DECIMALS_TO_WRITE = 16
# The width and height of the default brick textures in pixels.
BRICK_TEXTURE_RESOLUTION = 512
# The UV coordinates are a single point in the middle of the image = no uv coordinates.
# The middle of the image is used instead of (0,0) due to the way Blockland brick textures are designed.
DEFAULT_UV_COORDINATES = ((0.5, 0.5),) * 4
# Often used Decimal values.
DECIMAL_ONE = Decimal("1.0")
DECIMAL_HALF = Decimal("0.5")
# Useful angles in radians.
RAD_45_DEG = pi * 0.25
RAD_135_DEG = pi - RAD_45_DEG
RAD_225_DEG = pi + RAD_45_DEG
RAD_315_DEG = pi + RAD_135_DEG
TWO_PI = 2.0 * pi
class Axis3D(Enum):
"""An enum with values representing each axis in three-dimensional space, indexed as follows:
0: POS_X
1: NEG_X
2: POS_Y
3: NEG_Y
4: POS_Z
5: NEG_Z
"""
POS_X = 0
NEG_X = 1
POS_Y = 2
NEG_Y = 3
POS_Z = 4
NEG_Z = 5
def index(self):
"""Determines the index of this three-dimensional axis.
Returns:
The index 0, 1, or 2 for the axes X, Y, and Z respectively.
"""
if self is Axis3D.POS_X or self is Axis3D.NEG_X:
return X
elif self is Axis3D.POS_Y or self is Axis3D.NEG_Y:
return Y
else:
return Z
@classmethod
def from_property_name(cls, axis_name):
"""Parses the 3D axis from the specified string.
Args:
axis_name (string): The name of the axis in the same format as the axis_blb_forward Blender property.
Returns:
An Axis3D value corresponding to the specified axis name.
"""
if axis_name == "POSITIVE_X":
return Axis3D.POS_X
elif axis_name == "NEGATIVE_X":
return Axis3D.NEG_X
elif axis_name == "POSITIVE_Y":
return Axis3D.POS_Y
elif axis_name == "NEGATIVE_Y":
return Axis3D.NEG_Y
elif axis_name == "POSITIVE_Z":
return Axis3D.POS_Z
else: # axis_name == "NEGATIVE_Z":
return Axis3D.NEG_Z
def is_positive(self):
"""Determines if this three-dimensional axis is positive or negative.
Returns:
True if this value represents a positive axis.
"""
return self is Axis3D.POS_X or self is Axis3D.POS_Y or self is Axis3D.POS_Z
class AxisPlane3D(Enum):
"""An enum with values representing each axis-aligned plane in three-dimensional space, indexed as follows:
0: XY-plane
1: XZ-plane
2: YZ-plane
"""
XY = 0
XZ = 1
YZ = 2
|
gpl-2.0
| 3,007,720,878,029,224,000
| 27.965174
| 113
| 0.619957
| false
| 3.338692
| false
| false
| false
|
twhiteaker/pynwm
|
src/pynwm/hydroshare/hs_latest.py
|
1
|
2315
|
#!/usr/bin/python2
"""Identifies the latest National Water Model files in HydroShare."""
from hs_list import list_sims, list_dates
def _find_complete_sim(sims):
for key in reversed(sims):
sim = sims[key]
if sim['is_complete']:
return (key, sim)
return (None, None)
def find_latest_simulation(product):
"""Identifies files for the most recent complete simulation.
As files arrive at HydroShare from NOAA, a folder for the forecast
date is created although all files may have not yet arrived from
NOAA. This function checks that all files for the simulation are
present before returning details of that simulation.
Each simulation is represented as a dictionary describing product
type, simulation date, and whether all expected files are present,
and it also includes a list of filenames, e.g.
{'product': 'long_range_mem1',
'date': '20170401t06-00',
'is_complete': True,
'files': ['nwm...f006.conus.nc', 'nwm...f012.conus.nc', ...],
'links': ['http...', ...]}
Args:
product: String product name, e.g., 'short_range'.
Returns:
An ordered dictionary of simulation dictionaries, indexed by
product and date, e.g., 'long_range_mem1_20170401t06-00', or
empty dictionary if no complete simulations found.
"""
sims = {}
if product == 'analysis_assim':
# Warning: This may change with NWM v1.1 since assim has 3 files, not one
all_sims = list_sims(product)
key, sim = _find_complete_sim(all_sims)
if key:
sims[key] = sim
else:
dates = reversed(list_dates(product))
for date in dates:
date_sims = list_sims(product, date)
if product == 'long_range' and len(date_sims) == 16:
is_complete = True
for key, sim in date_sims.iteritems():
if not sim['is_complete']:
is_complete = False
break
if is_complete:
sims = date_sims
break
elif product != 'long_range':
key, sim = _find_complete_sim(date_sims)
if key:
sims[key] = sim
break
return sims
|
mit
| 5,805,429,525,390,713,000
| 34.075758
| 81
| 0.580562
| false
| 3.977663
| false
| false
| false
|
z01nl1o02/tests
|
mxnet/cn/utils.py
|
1
|
15685
|
from math import exp
from mxnet import gluon
from mxnet import autograd
from mxnet import nd
from mxnet import image
from mxnet.gluon import nn
import mxnet as mx
import numpy as np
from time import time
import matplotlib.pyplot as plt
import matplotlib as mpl
import random
import pdb
class DataLoader(object):
"""similiar to gluon.data.DataLoader, but might be faster.
The main difference this data loader tries to read more exmaples each
time. But the limits are 1) all examples in dataset have the same shape, 2)
data transfomer needs to process multiple examples at each time
"""
def __init__(self, dataset, batch_size, shuffle, transform=None):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.transform = transform
def __iter__(self):
data = self.dataset[:]
X = data[0]
y = nd.array(data[1])
n = X.shape[0]
if self.shuffle:
idx = np.arange(n)
np.random.shuffle(idx)
X = nd.array(X.asnumpy()[idx])
y = nd.array(y.asnumpy()[idx])
for i in range(n//self.batch_size):
if self.transform is not None:
yield self.transform(X[i*self.batch_size:(i+1)*self.batch_size],
y[i*self.batch_size:(i+1)*self.batch_size])
else:
yield (X[i*self.batch_size:(i+1)*self.batch_size],
y[i*self.batch_size:(i+1)*self.batch_size])
def __len__(self):
return len(self.dataset)//self.batch_size
def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
# Transform a batch of examples.
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x width x channel to batch x channel x height x width
return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
# Transform later to avoid memory explosion.
train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
return (train_data, test_data)
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()"""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except:
ctx = mx.cpu()
return ctx
def try_all_gpus():
"""Return all available GPUs, or [mx.gpu()] if there is no GPU"""
ctx_list = []
try:
for i in range(16):
ctx = mx.gpu(i)
_ = nd.array([0], ctx=ctx)
ctx_list.append(ctx)
except:
pass
if not ctx_list:
ctx_list = [mx.cpu()]
return ctx_list
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
def accuracy(output, label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def _get_batch(batch, ctx):
"""return data and label on ctx"""
if isinstance(batch, mx.io.DataBatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return (gluon.utils.split_and_load(data, ctx),
gluon.utils.split_and_load(label, ctx),
data.shape[0])
def evaluate_accuracy(data_iterator, net, ctx=[mx.cpu()]):
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc = nd.array([0])
n = 0.
if isinstance(data_iterator, mx.io.MXDataIter) or isinstance(data_iterator,mx.image.ImageIter):
data_iterator.reset()
for batch in data_iterator:
data, label, batch_size = _get_batch(batch, ctx)
for X, y in zip(data, label):
y = y.astype('float32')
acc += nd.sum(net(X).argmax(axis=1)==y).copyto(mx.cpu())
n += y.size
acc.wait_to_read() # don't push too many operators into backend
return acc.asscalar() / n
def train(train_data, test_data, net, loss, trainer, ctx, num_epochs, print_batches=None):
"""Train a network"""
print("Start training on ", ctx)
if isinstance(ctx, mx.Context):
ctx = [ctx]
for epoch in range(num_epochs):
train_loss, train_acc, n, m = 0.0, 0.0, 0.0, 0.0
if isinstance(train_data, mx.io.MXDataIter) or isinstance(train_data,mx.image.ImageIter):
train_data.reset()
start = time()
#i = 0
for i, batch in enumerate(train_data):
#pdb.set_trace()
#for batch,label in train_data:
data, label, batch_size = _get_batch(batch, ctx)
#batch_size = batch.shape[0]
losses = []
with autograd.record():
outputs = [net(X) for X in data]
losses = [loss(yhat, y) for yhat, y in zip(outputs, label)]
for l in losses:
l.backward()
train_acc += sum([(yhat.argmax(axis=1)==y).sum().asscalar()
for yhat, y in zip(outputs, label)])
train_loss += sum([l.sum().asscalar() for l in losses])
trainer.step(batch_size)
n += batch_size
m += sum([y.size for y in label])
if print_batches and (i+1) % print_batches == 0:
print("Batch %d. Loss: %f, Train acc %f" % (
n, train_loss/n, train_acc/m
))
test_acc = evaluate_accuracy(test_data, net, ctx)
print("Epoch %d. Loss: %.3f, Train acc %.2f, Test acc %.2f, Time %.1f sec" % (
epoch, train_loss/n, train_acc/m, test_acc, time() - start
))
print("done")
class Residual(nn.HybridBlock):
def __init__(self, channels, same_shape=True, **kwargs):
super(Residual, self).__init__(**kwargs)
self.same_shape = same_shape
with self.name_scope():
strides = 1 if same_shape else 2
self.conv1 = nn.Conv2D(channels, kernel_size=3, padding=1,
strides=strides)
self.bn1 = nn.BatchNorm()
self.conv2 = nn.Conv2D(channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm()
if not same_shape:
self.conv3 = nn.Conv2D(channels, kernel_size=1,
strides=strides)
def hybrid_forward(self, F, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if not self.same_shape:
x = self.conv3(x)
return F.relu(out + x)
def resnet18(num_classes):
net = nn.HybridSequential()
with net.name_scope():
net.add(
nn.BatchNorm(),
nn.Conv2D(64, kernel_size=3, strides=1),
nn.MaxPool2D(pool_size=3, strides=2),
Residual(64),
Residual(64),
Residual(128, same_shape=False),
Residual(128),
Residual(256, same_shape=False),
Residual(256),
nn.GlobalAvgPool2D(),
nn.Dense(num_classes)
)
return net
def show_images(imgs, nrows, ncols, figsize=None):
"""plot a list of images"""
if not figsize:
figsize = (ncols, nrows)
_, figs = plt.subplots(nrows, ncols, figsize=figsize)
for i in range(nrows):
for j in range(ncols):
figs[i][j].imshow(imgs[i*ncols+j].asnumpy())
figs[i][j].axes.get_xaxis().set_visible(False)
figs[i][j].axes.get_yaxis().set_visible(False)
plt.show()
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a random order from sequential data."""
# Subtract 1 because label indices are corresponding input indices + 1.
num_examples = (len(corpus_indices) - 1) // num_steps
epoch_size = num_examples // batch_size
# Randomize samples.
example_indices = list(range(num_examples))
random.shuffle(example_indices)
def _data(pos):
return corpus_indices[pos: pos + num_steps]
for i in range(epoch_size):
# Read batch_size random samples each time.
i = i * batch_size
batch_indices = example_indices[i: i + batch_size]
data = nd.array(
[_data(j * num_steps) for j in batch_indices], ctx=ctx)
label = nd.array(
[_data(j * num_steps + 1) for j in batch_indices], ctx=ctx)
yield data, label
def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a consecutive order from sequential data."""
corpus_indices = nd.array(corpus_indices, ctx=ctx)
data_len = len(corpus_indices)
batch_len = data_len // batch_size
indices = corpus_indices[0: batch_size * batch_len].reshape((
batch_size, batch_len))
# Subtract 1 because label indices are corresponding input indices + 1.
epoch_size = (batch_len - 1) // num_steps
for i in range(epoch_size):
i = i * num_steps
data = indices[:, i: i + num_steps]
label = indices[:, i + 1: i + num_steps + 1]
yield data, label
def grad_clipping(params, clipping_norm, ctx):
"""Gradient clipping."""
if clipping_norm is not None:
norm = nd.array([0.0], ctx)
for p in params:
norm += nd.sum(p.grad ** 2)
norm = nd.sqrt(norm).asscalar()
if norm > clipping_norm:
for p in params:
p.grad[:] *= clipping_norm / norm
def predict_rnn(rnn, prefix, num_chars, params, hidden_dim, ctx, idx_to_char,
char_to_idx, get_inputs, is_lstm=False):
"""Predict the next chars given the prefix."""
prefix = prefix.lower()
state_h = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
output = [char_to_idx[prefix[0]]]
for i in range(num_chars + len(prefix)):
X = nd.array([output[-1]], ctx=ctx)
if is_lstm:
Y, state_h, state_c = rnn(get_inputs(X), state_h, state_c, *params)
else:
Y, state_h = rnn(get_inputs(X), state_h, *params)
if i < len(prefix)-1:
next_input = char_to_idx[prefix[i+1]]
else:
next_input = int(Y[0].argmax(axis=1).asscalar())
output.append(next_input)
return ''.join([idx_to_char[i] for i in output])
def train_and_predict_rnn(rnn, is_random_iter, epochs, num_steps, hidden_dim,
learning_rate, clipping_norm, batch_size,
pred_period, pred_len, seqs, get_params, get_inputs,
ctx, corpus_indices, idx_to_char, char_to_idx,
is_lstm=False):
"""Train an RNN model and predict the next item in the sequence."""
if is_random_iter:
data_iter = data_iter_random
else:
data_iter = data_iter_consecutive
params = get_params()
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
for e in range(1, epochs + 1):
# If consecutive sampling is used, in the same epoch, the hidden state
# is initialized only at the beginning of the epoch.
if not is_random_iter:
state_h = nd.zeros(shape=(batch_size, hidden_dim), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(batch_size, hidden_dim), ctx=ctx)
train_loss, num_examples = 0, 0
for data, label in data_iter(corpus_indices, batch_size, num_steps,
ctx):
# If random sampling is used, the hidden state has to be
# initialized for each mini-batch.
if is_random_iter:
state_h = nd.zeros(shape=(batch_size, hidden_dim), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(batch_size, hidden_dim), ctx=ctx)
with autograd.record():
# outputs shape: (batch_size, vocab_size)
if is_lstm:
outputs, state_h, state_c = rnn(get_inputs(data), state_h,
state_c, *params)
else:
outputs, state_h = rnn(get_inputs(data), state_h, *params)
# Let t_ib_j be the j-th element of the mini-batch at time i.
# label shape: (batch_size * num_steps)
# label = [t_0b_0, t_0b_1, ..., t_1b_0, t_1b_1, ..., ].
label = label.T.reshape((-1,))
# Concatenate outputs:
# shape: (batch_size * num_steps, vocab_size).
outputs = nd.concat(*outputs, dim=0)
# Now outputs and label are aligned.
loss = softmax_cross_entropy(outputs, label)
loss.backward()
grad_clipping(params, clipping_norm, ctx)
SGD(params, learning_rate)
train_loss += nd.sum(loss).asscalar()
num_examples += loss.size
if e % pred_period == 0:
print("Epoch %d. Training perplexity %f" % (e,
exp(train_loss/num_examples)))
for seq in seqs:
print(' - ', predict_rnn(rnn, seq, pred_len, params,
hidden_dim, ctx, idx_to_char, char_to_idx, get_inputs,
is_lstm))
print()
def set_fig_size(mpl, figsize=(3.5, 2.5)):
"""set output image size for matplotlib """
mpl.rcParams['figure.figsize'] = figsize
def data_iter(batch_size, num_examples, X, y):
"""walk around dataset"""
idx = list(range(num_examples))
random.shuffle(idx)
for i in range(0, num_examples, batch_size):
j = nd.array(idx[i: min(i + batch_size, num_examples)])
yield X.take(j), y.take(j)
def linreg(X, w, b):
"""linear regression"""
return nd.dot(X, w) + b
def squared_loss(yhat, y):
return (yhat - y.reshape(yhat.shape)) ** 2 / 2
def optimize(batch_size, trainer, num_epochs, decay_epoch, log_interval, X, y,
net):
dataset = gluon.data.ArrayDataset(X, y)
data_iter = gluon.data.DataLoader(dataset, batch_size, shuffle=True)
square_loss = gluon.loss.L2Loss()
y_vals = [square_loss(net(X), y).mean().asnumpy()]
for epoch in range(1, num_epochs + 1):
#lower lr
if decay_epoch and epoch > decay_epoch:
trainer.set_learning_rate(trainer.learning_rate * 0.1)
for batch_i, (features, label) in enumerate(data_iter):
with autograd.record():
output = net(features)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
if batch_i * batch_size % log_interval == 0:
y_vals.append(square_loss(net(X), y).mean().asnumpy())
print('w:', net[0].weight.data(), '\nb:', net[0].bias.data(), '\n')
x_vals = np.linspace(0, num_epochs, len(y_vals), endpoint=True)
semilogy(x_vals, y_vals, 'epoch', 'loss')
def semilogy(x_vals, y_vals, x_label, y_label, figsize=(3.5, 2.5)):
"""plot log(y)"""
set_fig_size(mpl, figsize)
plt.semilogy(x_vals, y_vals)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
|
gpl-2.0
| 8,030,618,133,173,451,000
| 37.070388
| 99
| 0.562576
| false
| 3.492541
| true
| false
| false
|
DeveloperJose/Vision-Rat-Brain
|
feature_matching_v3/exp_dynamic_programming.py
|
1
|
11157
|
# Author: Jose G Perez
# Version 1.0
# Last Modified: January 31, 2018
import numpy as np
import pylab as plt
from skimage import color
from util_im import imshow_matches
from util_sm import load_sm, norm_sm, norm_prob_sm
from util_sift import precompute_sift, load_sift
precompute_sift('S_BB_V4', 'PW_BB_V4')
s_im, s_label, s_kp, s_des = load_sift('S_BB_V4_SIFT.npz')
pw_im, pw_label, pw_kp, pw_des = load_sift('PW_BB_V4_SIFT.npz')
sm_matches, sm_metric = load_sm('sm_v4', s_kp, pw_kp)
def idx_to_plate(labels, plate):
return np.where(labels == plate)
def dynamic_prog(sm, pw_penalty, s_penalty):
ed = np.zeros((sm.shape[0]+1, sm.shape[1]+1))
dir = np.zeros_like(ed)
ed[:,0] = np.arange(ed.shape[0]) * -s_penalty
ed[0,:] = np.arange(ed.shape[1]) * -pw_penalty
# ed[:,0] = ed[0,:] = 0
for i in range(1,ed.shape[0]):
for j in range(1,ed.shape[1]):
choices = [ed[i,j-1] - pw_penalty, # 0 = top
ed[i-1,j-1] + sm[i-1,j-1], # 1 = diagonal
ed[i-1,j] - s_penalty] # 2 = left
idx = np.argmax(choices)
dir[i,j]=idx
ed[i,j]=choices[idx]
return ed, dir.astype(np.uint8)
# return ed, dir.astype(np.uint8)
def get_pairs(dir):
sidx = dir.shape[0]-1
pwidx = dir.shape[1]-1
pairs = []
while sidx > 0 and pwidx > 0:
next_dir = dir[sidx, pwidx]
pairs.append([sidx, pwidx])
if next_dir == 0:
sidx -= 1
elif next_dir == 1:
sidx -= 1
pwidx -= 1
else:
pwidx -= 1
return np.array(pairs)
def pair_metric(sm_metric, pairs):
best6_pw = get_best_pw(sm_metric, pairs, 6)
best11_pw = get_best_pw(sm_metric, pairs, 11)
best23_pw = get_best_pw(sm_metric, pairs, 23)
best33_pw = get_best_pw(sm_metric, pairs, 33)
# PW8 S6, PW11 S11, PW42 S23, PW68 S33,
# m += np.count_nonzero(best6_pw == np.where(pw_label == 8))
# m += np.count_nonzero(best11_pw == np.where(pw_label == 11))
# m += np.count_nonzero(best23_pw == np.where(pw_label == 42))
# m += np.count_nonzero(best33_pw == np.where(pw_label == 68))
return np.min(abs(best6_pw - np.where(pw_label == 8))) + \
np.min(abs(best11_pw - np.where(pw_label == 11))) + \
np.min(abs(best23_pw - np.where(pw_label == 42))) + \
np.min(abs(best33_pw - np.where(pw_label == 68)))
def overlay(dir, sm):
# bg = norm_sm(sm, 255).astype(np.uint8)
bg = sm.astype(np.uint8)
color_mask = np.zeros((dir.shape[0],dir.shape[1],3))
sidx = sm.shape[0]-1
pwidx = sm.shape[1]-1
count = 0
path = ['START']
pairs = []
while sidx >= 0 and pwidx >= 0:
count += 1
color_mask[sidx, pwidx] = [0, 0, 255]
bg[sidx, pwidx] = 255
next_dir = dir[sidx, pwidx]
pairs.append([sidx, pwidx])
if next_dir == 0: # Left
pwidx -= 1
path.append('L')
elif next_dir == 1: # Diagonal
sidx -= 1
pwidx -= 1
path.append('D')
else: # Up
sidx -= 1
path.append('U')
# Remove penalty row/col
dir = dir[1:,1:]
color_mask = color_mask[1:,1:,:]
# PW8 S6, PW11 S11, PW42 S23, PW68 S33,
color_mask[np.where(s_label == 6), np.where(pw_label == 8)] = [255, 0, 0]
bg[np.where(s_label == 6), np.where(pw_label == 8)] = 255
color_mask[np.where(s_label == 11), np.where(pw_label == 11)] = [255, 0, 0]
bg[np.where(s_label == 11), np.where(pw_label == 11)] = 255
color_mask[np.where(s_label == 23), np.where(pw_label == 42)] = [255, 0, 0]
bg[np.where(s_label == 23), np.where(pw_label == 42)] = 255
color_mask[np.where(s_label == 33), np.where(pw_label == 68)] = [255, 0, 0]
bg[np.where(s_label == 33), np.where(pw_label == 68)] = 255
print("path", count, path)
img_color = np.stack((bg,)*3,axis=2)
img_hsv = color.rgb2hsv(img_color)
color_mask_hsv = color.rgb2hsv(color_mask)
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1]
im_overlay = color.hsv2rgb(img_hsv)
return im_overlay, np.array(pairs)
def error(best_pw, pw_plate, s_plate):
# s_idx = int(np.argwhere(s_label == s_plate))
pw_idx = int(np.argwhere(pw_label == pw_plate))
pred_sidx = best_pw[pw_idx]
pred_s = int(np.argwhere(s_label == pred_sidx))
return abs(pred_s - s_plate)
def get_best_pw(sm_metric, pairs, s_plate):
# Indices start at 0, plates start at 1
sidx = s_plate-1
pidx = np.where(pairs[:, 0] == sidx)
matches = pairs[pidx, 1].flatten()
# return pw_label[matches] if len(matches >= 1) else -1
return pw_label[matches] if len(matches >= 1) else np.array([np.inf])
# if len(matches) > 1:
# metrics = sm_metric[sidx,matches]
# best_idx = np.argmax(metrics)
# return int(pw_label[matches[best_idx]])
# elif len(matches) == 1:
# # Convert from PW Indices to PW Labels
# return int(pw_label[matches])
# else:
# return -1
if __name__ == '__main__':
# lowest_error = np.inf
# best_pw = -1
# best_s = -1
# for pw_penalty in np.arange(0.4, 0.5, 0.001):
# for s_penalty in np.arange(0.4, 0.5, 0.001):
# ed, dir = dynamic_prog(norm, pw_penalty=pw_penalty, s_penalty=s_penalty)
# pairs = get_pairs(dir)
# metric = pair_metric(sm_metric, pairs)
# if metric < lowest_error:
# print("New error", metric, pw_penalty, s_penalty)
# lowest_error = metric
# best_pw = pw_penalty
# best_s = s_penalty
# ed, dir = dynamic_prog(norm, pw_penalty=best_pw, s_penalty=best_s)
# im_overlay, pairs = overlay(dir, sm_metric)
# best6_pw = get_best_pw(sm_metric,pairs,6)
# best11_pw = get_best_pw(sm_metric,pairs,11)
# best23_pw = get_best_pw(sm_metric,pairs,23)
# best33_pw = get_best_pw(sm_metric,pairs,33)
# print("[PW8=%s], [PW11=%s], [PW42=%s [PW68=%s]" % (best6_pw, best11_pw, best23_pw, best33_pw))
#
# imshow_matches(im_overlay, 'Dynamic Programming')
# import pylab as plt
# best_pw = 200
# best_s = 220
# ed, dir = dynamic_prog(norm, pw_penalty=best_pw, s_penalty=best_s)
# pairs = get_pairs(dir)
# metric = pair_metric(sm_metric, pairs)
# im_overlay, pairs = overlay(dir, sm_metric)
# best6_pw = get_best_pw(sm_metric,pairs,6)
# best11_pw = get_best_pw(sm_metric,pairs,11)
# best23_pw = get_best_pw(sm_metric,pairs,23)
# best33_pw = get_best_pw(sm_metric,pairs,33)
# print("[PW8=%s], [PW11=%s], [PW42=%s [PW68=%s]" % (best6_pw, best11_pw, best23_pw, best33_pw))
#
# imshow_matches(im_overlay, 'Dynamic Programming')
# plt.show()
# mat = sm_matches
#
# pw_penalty = 50
# s_penalty = 50
# ed, dir = dynamic_prog(mat, pw_penalty=pw_penalty, s_penalty=s_penalty)
# im_overlay, pairs = overlay(dir, mat)
# norm = norm_sm(mat)
#
# import pylab as plt
# fig, axes = plt.subplots(nrows=2, ncols=2)
# plt.subplots_adjust(left=0.25, bottom=0.25)
# plt.set_cmap(plt.get_cmap('hot'))
# # axes.set_title('Dynamic')
#
# axes[0,0].set_title('Similarity Matrix')
# axes[0,0].imshow(mat)
#
# axes[0,1].set_title('SM Norm')
# axes[0,1].imshow(norm_prob_sm(sm_matches))
#
# axes[1,0].set_title('ED')
# axes[1,1].set_title('Overlay')
#
# # Sliders
# axcolor = 'lightgoldenrodyellow'
# axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
# axamp = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
# # s_pwp = plt.Slider(axfreq, 'PW Penalty', 0, 1, .0001, valfmt='%.8f')
# # s_sp = plt.Slider(axamp, 'S Penalty', 0, 1, .0001, valfmt='%.8f')
# s_pwp = plt.Slider(axfreq, 'PW Penalty', 0, 400, 10, valfmt='%.8f')
# s_sp = plt.Slider(axamp, 'S Penalty', 0, 400, 10, valfmt='%.8f')
#
# def update(val):
# pw_penalty = s_pwp.val
# s_penalty = s_sp.val
#
# ed, dir = dynamic_prog(mat, pw_penalty=pw_penalty, s_penalty=s_penalty)
# im_overlay, pairs = overlay(dir, mat)
#
# best6_pw = get_best_pw(sm_metric,pairs,6)
# best11_pw = get_best_pw(sm_metric,pairs,11)
# best23_pw = get_best_pw(sm_metric,pairs,23)
# best33_pw = get_best_pw(sm_metric,pairs,33)
# print("[PW8=%s], [PW11=%s], [PW42=%s [PW68=%s]" % (best6_pw, best11_pw, best23_pw, best33_pw))
#
# axes[1,0].imshow(ed)
# axes[1,1].imshow(im_overlay)
# fig.canvas.draw_idle()
#
# s_pwp.on_changed(update)
# s_sp.on_changed(update)
# plt.show()
#%% Runtime Experiments
mat = sm_matches
pw_penalty = 50
s_penalty = 50
ed, dir = dynamic_prog(mat, pw_penalty=pw_penalty, s_penalty=s_penalty)
im_overlay, pairs = overlay(dir, mat)
# Figure prep
pw_ticks_idxs = [0]
pw_ticks_vals = [pw_label[0]]
for x in range(len(pw_label)):
try:
diff = pw_label[x + 1] - pw_label[x]
if diff > 1:
pw_ticks_idxs.append(x)
pw_ticks_vals.append(pw_label[x])
# print("IDX: ", x, "DIFF:", diff)
except:
continue
pw_ticks_idxs.append(len(pw_label) - 1)
pw_ticks_vals.append(pw_label[-1])
# Figure
plt.figure()
ax = plt.gca()
ax.set_title('Dynamic Programming Back-Tracing')
plt.setp(ax.get_xticklabels(), rotation=90, horizontalalignment='right')
plt.imshow(im_overlay)
plt.xticks(pw_ticks_idxs, pw_ticks_vals)
plt.yticks(np.arange(0, len(s_label)), np.arange(1, len(s_label) + 1))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
plt.xlabel('PW Level')
plt.ylabel('S Level')
# best_pwp = 0
# best_sps = 0
# best_total = np.inf
# for pw_penalty in range(0, 200):
# for s_penalty in range(0, 200):
# ed, ed2 = dynamic_prog(norm, pw_penalty=pw_penalty, s_penalty=s_penalty)
# best_pw = s_label[np.argmin(ed, axis=0)]
#
# # PW8 S6, PW11 S11, PW42 S23, PW68 S33,
# e = error(best_pw, 68, 33) + \
# error(best_pw, 11, 11) + \
# error(best_pw, 42, 23) + \
# error(best_pw, 68, 33)
#
# if e < best_total:
# print("New best total", e)
# best_total = e
# best_pwp = pw_penalty
# best_sps = s_penalty
# best_pwp = 200
# best_sps = 200
# ed, ed2 = dynamic_prog(norm, pw_penalty=best_pwp, s_penalty=best_sps)
# im_overlay = overlay(ed, norm)
# imshow_matches(dynamic_prog(norm, pw_penalty=1, s_penalty=1)[1], '')
# imshow_matches(overlay(dynamic_prog(sm_matches, 0.9, 0.1)[0], sm_matches), '')
# aoi = ed[32:35, 38:41]
# best_s = pw_label[np.argmin(ed,axis=1)]
# print("PW68 best match", best_pw[np.where(pw_label==68)])
# print("S33 best match", best_s[np.where(s_label==33)])
|
mit
| -8,387,339,234,302,177,000
| 34.531847
| 104
| 0.553554
| false
| 2.709983
| false
| false
| false
|
facebook/chisel
|
commands/FBXCTestCommands.py
|
1
|
48565
|
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import fbchisellldbbase as fb
import lldb
NOT_FOUND = 0xFFFFFFFF # UINT32_MAX
def lldbcommands():
return [FBXCPrintDebugDescription(), FBXCPrintTree(), FBXCPrintObject(), FBXCNoId()]
class FBXCPrintDebugDescription(fb.FBCommand):
def name(self):
return "xdebug"
def description(self):
return "Print debug description the XCUIElement in human readable format."
def args(self):
return [
fb.FBCommandArgument(
arg="element",
type="XCUIElement*",
help="The element to print debug description.",
default="__default__",
)
]
def run(self, arguments, options):
element = arguments[0]
language = fb.currentLanguage()
if element == "__default__":
element = (
"XCUIApplication()"
if language == lldb.eLanguageTypeSwift
else "(XCUIApplication *)[[XCUIApplication alloc] init]"
)
if language == lldb.eLanguageTypeSwift:
print(
fb.evaluateExpressionValue(
"{}.debugDescription".format(element), language=language
)
.GetObjectDescription()
.replace("\\n", "\n")
.replace("\\'", "'")
.strip(' "\n\t')
)
else:
print(
fb.evaluateExpressionValue(
"[{} debugDescription]".format(element)
).GetObjectDescription()
)
class FBXCPrintTree(fb.FBCommand):
def name(self):
return "xtree"
def description(self):
return "Print XCUIElement subtree."
def args(self):
return [
fb.FBCommandArgument(
arg="element",
type="XCUIElement*",
help="The element to print tree.",
default="__default__",
)
]
def options(self):
return [
fb.FBCommandArgument(
arg="pointer",
short="-p",
long="--pointer",
type="BOOL",
boolean=True,
default=False,
help="Print pointers",
),
fb.FBCommandArgument(
arg="trait",
short="-t",
long="--traits",
type="BOOL",
boolean=True,
default=False,
help="Print traits",
),
fb.FBCommandArgument(
arg="frame",
short="-f",
long="--frame",
type="BOOL",
boolean=True,
default=False,
help="Print frames",
),
]
def run(self, arguments, options):
element = arguments[0]
language = fb.currentLanguage()
if element == "__default__":
element = (
"XCUIApplication()"
if language == lldb.eLanguageTypeSwift
else "(XCUIApplication *)[[XCUIApplication alloc] init]"
)
# Evaluate object
element_sbvalue = fb.evaluateExpressionValue(
"{}".format(element), language=language
)
""":type: lldb.SBValue"""
# Get pointer value, so it will be working in Swift and Objective-C
element_pointer = int(element_sbvalue.GetValue(), 16)
# Get XCElementSnapshot object
snapshot = take_snapshot(element_pointer)
# Print tree for snapshot element
snapshot_object = XCElementSnapshot(snapshot, language=language)
print(
snapshot_object.tree().hierarchy_text(
pointer=options.pointer, trait=options.trait, frame=options.frame
)
)
class FBXCPrintObject(fb.FBCommand):
def name(self):
return "xobject"
def description(self):
return "Print XCUIElement details."
def args(self):
return [
fb.FBCommandArgument(
arg="element",
type="XCUIElement*",
help="The element to print details.",
default="__default__",
)
]
def run(self, arguments, options):
element = arguments[0]
language = fb.currentLanguage()
if element == "__default__":
element = (
"XCUIApplication()"
if language == lldb.eLanguageTypeSwift
else "(XCUIApplication *)[[XCUIApplication alloc] init]"
)
# Evaluate object
element_sbvalue = fb.evaluateExpressionValue(
"{}".format(element), language=language
)
""":type: lldb.SBValue"""
# Get pointer value, so it will be working in Swift and Objective-C
element_pointer = int(element_sbvalue.GetValue(), 16)
# Get XCElementSnapshot object
snapshot = take_snapshot(element_pointer)
# Print details of snapshot element
snapshot_object = XCElementSnapshot(snapshot, language=language)
print(snapshot_object.detail_summary())
class FBXCNoId(fb.FBCommand):
def name(self):
return "xnoid"
def description(self):
return "Print XCUIElement objects with label but without identifier."
def args(self):
return [
fb.FBCommandArgument(
arg="element",
type="XCUIElement*",
help="The element from start to.",
default="__default__",
)
]
def options(self):
return [
fb.FBCommandArgument(
arg="status_bar",
short="-s",
long="--status-bar",
type="BOOL",
boolean=True,
default=False,
help="Print status bar items",
),
fb.FBCommandArgument(
arg="pointer",
short="-p",
long="--pointer",
type="BOOL",
boolean=True,
default=False,
help="Print pointers",
),
fb.FBCommandArgument(
arg="trait",
short="-t",
long="--traits",
type="BOOL",
boolean=True,
default=False,
help="Print traits",
),
fb.FBCommandArgument(
arg="frame",
short="-f",
long="--frame",
type="BOOL",
boolean=True,
default=False,
help="Print frames",
),
]
def run(self, arguments, options):
element = arguments[0]
language = fb.currentLanguage()
if element == "__default__":
element = (
"XCUIApplication()"
if language == lldb.eLanguageTypeSwift
else "(XCUIApplication *)[[XCUIApplication alloc] init]"
)
# Evaluate object
element_sbvalue = fb.evaluateExpressionValue(
"{}".format(element), language=language
)
""":type: lldb.SBValue"""
# Get pointer value, so it will be working in Swift and Objective-C
element_pointer = int(element_sbvalue.GetValue(), 16)
# Get XCElementSnapshot object
snapshot = take_snapshot(element_pointer)
# Print tree for snapshot element
snapshot_object = XCElementSnapshot(snapshot, language=language)
elements = snapshot_object.find_missing_identifiers(
status_bar=options.status_bar
)
if elements is not None:
print(
elements.hierarchy_text(
pointer=options.pointer, trait=options.trait, frame=options.frame
)
)
else:
print("Couldn't found elements without identifier")
def take_snapshot(element):
"""
Takes snapshot (XCElementSnapshot) from XCUIElement (as pointer)
:param int element: Pointer to the XCUIElement
:return: XCElementSnapshot object
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue(
"(XCElementSnapshot *)[[[{} query] matchingSnapshotsWithError:nil] firstObject]".format(
element
)
)
class _ElementList(object):
"""
Store element and list of children
:param XCElementSnapshot element: XCElementSnapshot
:param list[_ElementList] children: List of XCElementSnapshot objects
"""
def __init__(self, element, children):
self.element = element
self.children = children
def text(self, pointer, trait, frame, indent):
"""
String representation of the element
:param bool pointer: Print pointers
:param bool trait: Print traits
:param bool frame: Print frames
:param int indent: Indention
:return: String representation of the element
:rtype: str
"""
indent_string = " | " * indent
return "{}{}\n".format(
indent_string,
self.element.summary(pointer=pointer, trait=trait, frame=frame),
)
def hierarchy_text(self, pointer=False, trait=False, frame=False, indent=0):
"""
String representation of the hierarchy of elements
:param bool pointer: Print pointers
:param bool trait: Print traits
:param bool frame: Print frames
:param int indent: Indention
:return: String representation of the hierarchy of elements
:rtype: str
"""
s = self.text(pointer=pointer, trait=trait, frame=frame, indent=indent)
for e in self.children:
s += e.hierarchy_text(
pointer=pointer, trait=trait, frame=frame, indent=indent + 1
)
return s
class XCElementSnapshot(object):
"""
XCElementSnapshot wrapper
:param lldb.SBValue element: XCElementSnapshot object
:param str element_value: Pointer to XCElementSnapshot object
:param language: Project language
:param lldb.SBValue _type: XCUIElement type / XCUIElementType
:param lldb.SBValue _traits: UIAccessibilityTraits
:param lldb.SBValue | None _frame: XCUIElement frame
:param lldb.SBValue _identifier: XCUIElement identifier
:param lldb.SBValue _value: XCUIElement value
:param lldb.SBValue _placeholderValue: XCUIElement placeholder value
:param lldb.SBValue _label: XCUIElement label
:param lldb.SBValue _title: XCUIElement title
:param lldb.SBValue _children: XCUIElement children
:param lldb.SBValue _enabled: XCUIElement is enabled
:param lldb.SBValue _selected: XCUIElement is selected
:param lldb.SBValue _isMainWindow: XCUIElement is main window
:param lldb.SBValue _hasKeyboardFocus: XCUIElement has keyboard focus
:param lldb.SBValue _hasFocus: XCUIElement has focus
:param lldb.SBValue _generation: XCUIElement generation
:param lldb.SBValue _horizontalSizeClass: XCUIElement horizontal class
:param lldb.SBValue _verticalSizeClass: XCUIElement vertical class
"""
def __init__(self, element, language):
"""
:param lldb.SBValue element: XCElementSnapshot object
:param language: Project language
"""
super(XCElementSnapshot, self).__init__()
self.element = element
self.element_value = self.element.GetValue()
self.language = language
self._type = None
self._traits = None
self._frame = None
self._identifier = None
self._value = None
self._placeholderValue = None
self._label = None
self._title = None
self._children = None
self._enabled = None
self._selected = None
self._isMainWindow = None
self._hasKeyboardFocus = None
self._hasFocus = None
self._generation = None
self._horizontalSizeClass = None
self._verticalSizeClass = None
@property
def is_missing_identifier(self):
"""
Checks if element has a label but doesn't have an identifier.
:return: True if element has a label but doesn't have an identifier.
:rtype: bool
"""
return len(self.identifier_value) == 0 and len(self.label_value) > 0
@property
def type(self):
"""
:return: XCUIElement type / XCUIElementType
:rtype: lldb.SBValue
"""
if self._type is None:
name = "_elementType"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._type = fb.evaluateExpressionValue(
"(int)[{} elementType]".format(self.element_value)
)
else:
self._type = self.element.GetChildMemberWithName(name)
return self._type
@property
def type_value(self):
"""
:return: XCUIElementType value
:rtype: int
"""
return int(self.type.GetValue())
@property
def type_summary(self):
"""
:return: XCUIElementType summary
:rtype: str
"""
return self.get_type_value_string(self.type_value)
@property
def traits(self):
"""
:return: UIAccessibilityTraits
:rtype: lldb.SBValue
"""
if self._traits is None:
name = "_traits"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._traits = fb.evaluateExpressionValue(
"(int)[{} traits]".format(self.element_value)
)
else:
self._traits = self.element.GetChildMemberWithName(name)
return self._traits
@property
def traits_value(self):
"""
:return: UIAccessibilityTraits value
:rtype: int
"""
return int(self.traits.GetValue())
@property
def traits_summary(self):
"""
:return: UIAccessibilityTraits summary
:rtype: str
"""
return self.get_traits_value_string(self.traits_value)
@property
def frame(self):
"""
:return: XCUIElement frame
:rtype: lldb.SBValue
"""
if self._frame is None:
import_uikit()
name = "_frame"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._frame = fb.evaluateExpressionValue(
"(CGRect)[{} frame]".format(self.element_value)
)
else:
self._frame = self.element.GetChildMemberWithName(name)
return self._frame
@property
def frame_summary(self):
"""
:return: XCUIElement frame summary
:rtype: str
"""
return CGRect(self.frame).summary()
@property
def identifier(self):
"""
:return: XCUIElement identifier
:rtype: lldb.SBValue
"""
if self._identifier is None:
name = "_identifier"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._identifier = fb.evaluateExpressionValue(
"(NSString *)[{} identifier]".format(self.element_value)
)
else:
self._identifier = self.element.GetChildMemberWithName(name)
return self._identifier
@property
def identifier_value(self):
"""
:return: XCUIElement identifier value
:rtype: str
"""
return normalize_summary(self.identifier.GetSummary())
@property
def identifier_summary(self):
"""
:return: XCUIElement identifier summary
:rtype: str | None
"""
if len(self.identifier_value) == 0:
return None
return "identifier: '{}'".format(self.identifier_value)
@property
def value(self):
"""
:return: XCUIElement value
:rtype: lldb.SBValue
"""
if self._value is None:
name = "_value"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._value = fb.evaluateExpressionValue(
"(NSString *)[{} value]".format(self.element_value)
)
else:
self._value = self.element.GetChildMemberWithName(name)
return self._value
@property
def value_value(self):
"""
:return: XCUIElement value value
:rtype: str
"""
return normalize_summary(self.value.GetSummary())
@property
def value_summary(self):
"""
:return: XCUIElement value summary
:rtype: str | None
"""
if len(self.value_value) == 0:
return None
return "value: '{}'".format(self.value_value)
@property
def placeholder(self):
"""
:return: XCUIElement placeholder value
:rtype: lldb.SBValue
"""
if self._placeholderValue is None:
name = "_placeholderValue"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._placeholderValue = fb.evaluateExpressionValue(
"(NSString *)[{} placeholderValue]".format(self.element_value)
)
else:
self._placeholderValue = self.element.GetChildMemberWithName(name)
return self._placeholderValue
@property
def placeholder_value(self):
"""
:return: XCUIElement placeholderValue value
:rtype: str
"""
return normalize_summary(self.placeholder.GetSummary())
@property
def placeholder_summary(self):
"""
:return: XCUIElement placeholderValue summary
:rtype: str | None
"""
if len(self.placeholder_value) == 0:
return None
return "placeholderValue: '{}'".format(self.placeholder_value)
@property
def label(self):
"""
:return: XCUIElement label
:rtype: lldb.SBValue
"""
if self._label is None:
name = "_label"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._label = fb.evaluateExpressionValue(
"(NSString *)[{} label]".format(self.element_value)
)
else:
self._label = self.element.GetChildMemberWithName(name)
return self._label
@property
def label_value(self):
"""
:return: XCUIElement label value
:rtype: str
"""
return normalize_summary(self.label.GetSummary())
@property
def label_summary(self):
"""
:return: XCUIElement label summary
:rtype: str | None
"""
if len(self.label_value) == 0:
return None
return "label: '{}'".format(self.label_value)
@property
def title(self):
"""
:return: XCUIElement title
:rtype: lldb.SBValue
"""
if self._title is None:
name = "_title"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._title = fb.evaluateExpressionValue(
"(NSString *)[{} title]".format(self.element_value)
)
else:
self._title = self.element.GetChildMemberWithName(name)
return self._title
@property
def title_value(self):
"""
:return: XCUIElement title value
:rtype: str
"""
return normalize_summary(self.title.GetSummary())
@property
def title_summary(self):
"""
:return: XCUIElement title summary
:rtype: str | None
"""
if len(self.title_value) == 0:
return None
return "title: '{}'".format(self.title_value)
@property
def children(self):
"""
:return: XCUIElement children
:rtype: lldb.SBValue
"""
if self._children is None:
name = "_children"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._children = fb.evaluateExpressionValue(
"(NSArray *)[{} children]".format(self.element_value)
)
else:
self._children = self.element.GetChildMemberWithName(name)
return self._children
@property
def children_count(self):
"""
:return: XCUIElement children count
:rtype: int
"""
return self.children.GetNumChildren()
@property
def children_list(self):
"""
:return: XCUIElement children list
:rtype: list[lldb.SBValue]
"""
return [self.children.GetChildAtIndex(i) for i in range(self.children_count)]
@property
def enabled(self):
"""
:return: XCUIElement is enabled
:rtype: lldb.SBValue
"""
if self._enabled is None:
name = "_enabled"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._enabled = fb.evaluateExpressionValue(
"(BOOL)[{} enabled]".format(self.element_value)
)
else:
self._enabled = self.element.GetChildMemberWithName(name)
return self._enabled
@property
def enabled_value(self):
"""
:return: XCUIElement is enabled value
:rtype: bool
"""
return bool(self.enabled.GetValueAsSigned())
@property
def enabled_summary(self):
"""
:return: XCUIElement is enabled summary
:rtype: str | None
"""
if not self.enabled_value:
return "enabled: {}".format(self.enabled_value)
return None
@property
def selected(self):
"""
:return: XCUIElement is selected
:rtype: lldb.SBValue
"""
if self._selected is None:
name = "_selected"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._selected = fb.evaluateExpressionValue(
"(BOOL)[{} selected]".format(self.element_value)
)
else:
self._selected = self.element.GetChildMemberWithName(name)
return self._selected
@property
def selected_value(self):
"""
:return: XCUIElement is selected value
:rtype: bool
"""
return bool(self.selected.GetValueAsSigned())
@property
def selected_summary(self):
"""
:return: XCUIElement is selected summary
:rtype: str | None
"""
if self.selected_value:
return "selected: {}".format(self.selected_value)
return None
@property
def is_main_window(self):
"""
:return: XCUIElement isMainWindow
:rtype: lldb.SBValue
"""
if self._isMainWindow is None:
name = "_isMainWindow"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._isMainWindow = fb.evaluateExpressionValue(
"(BOOL)[{} isMainWindow]".format(self.element_value)
)
else:
self._isMainWindow = self.element.GetChildMemberWithName(name)
return self._isMainWindow
@property
def is_main_window_value(self):
"""
:return: XCUIElement isMainWindow value
:rtype: bool
"""
return bool(self.is_main_window.GetValueAsSigned())
@property
def is_main_window_summary(self):
"""
:return: XCUIElement isMainWindow summary
:rtype: str | None
"""
if self.is_main_window_value:
return "MainWindow"
return None
@property
def keyboard_focus(self):
"""
:return: XCUIElement hasKeyboardFocus
:rtype: lldb.SBValue
"""
if self._hasKeyboardFocus is None:
name = "_hasKeyboardFocus"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._hasKeyboardFocus = fb.evaluateExpressionValue(
"(BOOL)[{} hasKeyboardFocus]".format(self.element_value)
)
else:
self._hasKeyboardFocus = self.element.GetChildMemberWithName(name)
return self._hasKeyboardFocus
@property
def keyboard_focus_value(self):
"""
:return: XCUIElement hasKeyboardFocus value
:rtype: bool
"""
return bool(self.keyboard_focus.GetValueAsSigned())
@property
def keyboard_focus_summary(self):
"""
:return: XCUIElement hasKeyboardFocus summary
:rtype: str | None
"""
if self.keyboard_focus_value:
return "hasKeyboardFocus: {}".format(self.keyboard_focus_value)
return None
@property
def focus(self):
"""
:return: XCUIElement hasFocus
:rtype: lldb.SBValue
"""
if self._hasFocus is None:
name = "_hasFocus"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._hasFocus = fb.evaluateExpressionValue(
"(BOOL)[{} hasFocus]".format(self.element_value)
)
else:
self._hasFocus = self.element.GetChildMemberWithName(name)
return self._hasFocus
@property
def focus_value(self):
"""
:return: XCUIElement hasFocus value
:rtype: bool
"""
return bool(self.focus.GetValueAsSigned())
@property
def focus_summary(self):
"""
:return: XCUIElement hasFocus summary
:rtype: str | None
"""
if self.focus_value:
return "hasFocus: {}".format(self.focus_value)
return None
@property
def generation(self):
"""
:return: XCUIElement generation
:rtype: lldb.SBValue
"""
if self._generation is None:
name = "_generation"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._generation = fb.evaluateExpressionValue(
"(unsigned int)[{} generation]".format(self.element_value)
)
else:
self._generation = self.element.GetChildMemberWithName(name)
return self._generation
@property
def generation_value(self):
"""
:return: XCUIElement generation value
:rtype: int
"""
return int(self.generation.GetValueAsUnsigned())
@property
def horizontal_size_class(self):
"""
:return: XCUIElement horizontal size class
:rtype: lldb.SBValue
"""
if self._horizontalSizeClass is None:
name = "_horizontalSizeClass"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._horizontalSizeClass = fb.evaluateExpressionValue(
"(int)[{} horizontalSizeClass]".format(self.element_value)
)
else:
self._horizontalSizeClass = self.element.GetChildMemberWithName(name)
return self._horizontalSizeClass
@property
def horizontal_size_class_value(self):
"""
:return: XCUIElement horizontal size class value
:rtype: int
"""
return int(self.horizontal_size_class.GetValue())
@property
def horizontal_size_class_summary(self):
"""
:return: XCUIElement horizontal size class summary
"""
return self.get_user_interface_size_class_string(
self.horizontal_size_class_value
)
@property
def vertical_size_class(self):
"""
:return: XCUIElement vertical size class
:rtype: lldb.SBValue
"""
if self._verticalSizeClass is None:
name = "_verticalSizeClass"
if self.element.GetIndexOfChildWithName(name) == NOT_FOUND:
self._verticalSizeClass = fb.evaluateExpressionValue(
"(int)[{} verticalSizeClass]".format(self.element_value)
)
else:
self._verticalSizeClass = self.element.GetChildMemberWithName(name)
return self._verticalSizeClass
@property
def vertical_size_class_value(self):
"""
:return: XCUIElement vertical size class value
:rtype: int
"""
return int(self.vertical_size_class.GetValue())
@property
def vertical_size_class_summary(self):
"""
:return: XCUIElement vertical size class summary
"""
return self.get_user_interface_size_class_string(self.vertical_size_class_value)
@property
def uniquely_identifying_objective_c_code(self):
"""
:return: XCUIElement uniquely identifying Objective-C code
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue(
"(id)[{} _uniquelyIdentifyingObjectiveCCode]".format(self.element_value)
)
@property
def uniquely_identifying_objective_c_code_value(self):
"""
:return: XCUIElement uniquely identifying Objective-C code value
:rtype: str
"""
return normalize_array_description(
self.uniquely_identifying_objective_c_code.GetObjectDescription()
)
@property
def uniquely_identifying_swift_code(self):
"""
:return: XCUIElement uniquely identifying Swift code
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue(
"(id)[{} _uniquelyIdentifyingSwiftCode]".format(self.element_value)
)
@property
def uniquely_identifying_swift_code_value(self):
"""
:return: XCUIElement uniquely identifying Swift code value
:rtype: str
"""
return normalize_array_description(
self.uniquely_identifying_swift_code.GetObjectDescription()
)
@property
def is_touch_bar_element(self):
"""
:return: XCUIElement is touch bar element
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue(
"(BOOL)[{} isTouchBarElement]".format(self.element_value)
)
@property
def is_touch_bar_element_value(self):
"""
:return: XCUIElement is touch bar element value
:rtype: bool
"""
return bool(self.is_touch_bar_element.GetValueAsSigned())
@property
def is_top_level_touch_bar_element(self):
"""
:return: XCUIElement is top level touch bar element
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue(
"(BOOL)[{} isTopLevelTouchBarElement]".format(self.element_value)
)
@property
def is_top_level_touch_bar_element_value(self):
"""
:return: XCUIElement is top level touch bar element value
:rtype: bool
"""
return bool(self.is_top_level_touch_bar_element.GetValueAsSigned())
@property
def suggested_hit_points(self):
"""
:return: XCUIElement suggested hit points
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue(
"(NSArray *)[{} suggestedHitpoints]".format(self.element_value)
)
@property
def suggested_hit_points_value(self):
"""
:return: XCUIElement suggested hit points
:rtype: str
"""
return normalize_array_description(
self.suggested_hit_points.GetObjectDescription()
)
@property
def visible_frame(self):
"""
:return: XCUIElement visible frame
:rtype: lldb.SBValue
"""
import_uikit()
return fb.evaluateExpressionValue(
"(CGRect)[{} visibleFrame]".format(self.element_value)
)
@property
def visible_frame_summary(self):
"""
:return: XCUIElement visible frame
:rtype: str
"""
return CGRect(self.visible_frame).summary()
@property
def depth(self):
"""
:return: XCUIElement depth
:rtype: lldb.SBValue
"""
return fb.evaluateExpressionValue("(int)[{} depth]".format(self.element_value))
@property
def depth_value(self):
"""
:return: XCUIElement depth
:rtype: int
"""
return int(self.depth.GetValue())
@property
def hit_point(self):
"""
:return: XCUIElement hit point
:rtype: lldb.SBValue
"""
import_uikit()
return fb.evaluateExpressionValue(
"(CGPoint)[{} hitPoint]".format(self.element_value)
)
@property
def hit_point_value(self):
"""
:return: XCUIElement hit point
:rtype: str
"""
return CGPoint(self.hit_point).summary()
@property
def hit_point_for_scrolling(self):
"""
:return: XCUIElement hit point for scrolling
:rtype: lldb.SBValue
"""
import_uikit()
return fb.evaluateExpressionValue(
"(CGPoint)[{} hitPointForScrolling]".format(self.element_value)
)
@property
def hit_point_for_scrolling_value(self):
"""
:return: XCUIElement hit point for scrolling
:rtype: str
"""
return CGPoint(self.hit_point_for_scrolling).summary()
def summary(self, pointer=False, trait=False, frame=False):
"""
Returns XCElementSnapshot summary
:param bool pointer: Print pointers
:param bool trait: Print traits
:param bool frame: Print frames
:return: XCElementSnapshot summary
:rtype: str
"""
type_text = self.type_summary
if pointer:
type_text += " {:#x}".format(int(self.element_value, 16))
if trait:
type_text += " traits: {}({:#x})".format(
self.traits_summary, self.traits_value
)
frame_text = self.frame_summary if frame else None
identifier = self.identifier_summary
label = self.label_summary
title = self.title_summary
value = self.value_summary
placeholder = self.placeholder_summary
enabled = self.enabled_summary
selected = self.selected_summary
main_window = self.is_main_window_summary
keyboard_focus = self.keyboard_focus_summary
focus = self.focus_summary
texts = [
t
for t in [
frame_text,
identifier,
label,
title,
value,
placeholder,
enabled,
selected,
main_window,
keyboard_focus,
focus,
]
if t is not None
]
return "{}: {}".format(type_text, ", ".join(texts))
def detail_summary(self):
"""
Returns XCElementSnapshot detail summary
:return: XCElementSnapshot detail summary
:rtype: str
"""
texts = list()
texts.append("Pointer: {:#x}".format(int(self.element_value, 16)))
texts.append("Type: {}".format(self.type_summary))
texts.append("Depth: {}".format(self.depth_value))
texts.append(
"Traits: {} ({:#x})".format(self.traits_summary, self.traits_value)
)
texts.append("Frame: {}".format(self.frame_summary))
texts.append("Visible frame: {}".format(self.visible_frame_summary))
texts.append("Identifier: '{}'".format(self.identifier_value))
texts.append("Label: '{}'".format(self.label_value))
texts.append("Title: '{}'".format(self.title_value))
texts.append("Value: '{}'".format(self.value_value))
texts.append("Placeholder: '{}'".format(self.placeholder_value))
if self.language != lldb.eLanguageTypeSwift:
# They doesn't work on Swift :(
texts.append("Hit point: {}".format(self.hit_point_value))
texts.append(
"Hit point for scrolling: {}".format(self.hit_point_for_scrolling_value)
)
texts.append("Enabled: {}".format(self.enabled_value))
texts.append("Selected: {}".format(self.selected_value))
texts.append("Main Window: {}".format(self.is_main_window_value))
texts.append("Keyboard focus: {}".format(self.keyboard_focus_value))
texts.append("Focus: {}".format(self.focus_value))
texts.append("Generation: {}".format(self.generation_value))
texts.append(
"Horizontal size class: {}".format(self.horizontal_size_class_summary)
)
texts.append("Vertical size class: {}".format(self.vertical_size_class_summary))
texts.append("TouchBar element: {}".format(self.is_touch_bar_element_value))
texts.append(
"TouchBar top level element: {}".format(
self.is_top_level_touch_bar_element_value
)
)
texts.append(
"Unique Objective-C: {}".format(
self.uniquely_identifying_objective_c_code_value
)
)
texts.append(
"Unique Swift: {}".format(self.uniquely_identifying_swift_code_value)
)
texts.append("Suggested hit points: {}".format(self.suggested_hit_points_value))
return "\n".join(texts)
def tree(self):
"""
Returns tree of elements in hierarchy
:return: Elements hierarchy
:rtype: _ElementList
"""
children = [
XCElementSnapshot(e, self.language).tree() for e in self.children_list
]
return _ElementList(self, children)
def find_missing_identifiers(self, status_bar):
"""
Find element which has a label but doesn't have an identifier
:param bool status_bar: Print status bar items
:return: Hierarchy structure with items which has a label but doesn't have an identifier
:rtype: _ElementList | None
"""
# Do not print status bar items
if status_bar is not True and self.type_value == XCUIElementType.StatusBar:
return None
children_missing = [
XCElementSnapshot(e, self.language).find_missing_identifiers(
status_bar=status_bar
)
for e in self.children_list
]
children_missing = [x for x in children_missing if x is not None]
# Self and its children are not missing identifiers
if self.is_missing_identifier is False and len(children_missing) == 0:
return None
return _ElementList(self, children_missing)
@staticmethod
def get_type_value_string(value):
"""
Get element type string from XCUIElementType (as int)
:param int value: XCUIElementType (as int)
:return: XCUIElementType string
:rtype: str
"""
return XCUIElementType.name_for_value(value)
@staticmethod
def get_traits_value_string(value):
"""
Get element traits string from UIAccessibilityTraits (as int)
:param int value: UIAccessibilityTraits (as int)
:return: UIAccessibilityTraits string
:rtype: str
"""
return UIAccessibilityTraits.name_for_value(value)
@staticmethod
def get_user_interface_size_class_string(value):
"""
Get user interface size class string from UIUserInterfaceSizeClass (as int)
:param value: UIAccessibilityTraits (as int)
:return: UIUserInterfaceSizeClass string
:rtype: str
"""
return UIUserInterfaceSizeClass.name_for_value(value)
class XCUIElementType(object):
"""
Represents all XCUIElementType types
"""
Any = 0
Other = 1
Application = 2
Group = 3
Window = 4
Sheet = 5
Drawer = 6
Alert = 7
Dialog = 8
Button = 9
RadioButton = 10
RadioGroup = 11
CheckBox = 12
DisclosureTriangle = 13
PopUpButton = 14
ComboBox = 15
MenuButton = 16
ToolbarButton = 17
Popover = 18
Keyboard = 19
Key = 20
NavigationBar = 21
TabBar = 22
TabGroup = 23
Toolbar = 24
StatusBar = 25
Table = 26
TableRow = 27
TableColumn = 28
Outline = 29
OutlineRow = 30
Browser = 31
CollectionView = 32
Slider = 33
PageIndicator = 34
ProgressIndicator = 35
ActivityIndicator = 36
SegmentedControl = 37
Picker = 38
PickerWheel = 39
Switch = 40
Toggle = 41
Link = 42
Image = 43
Icon = 44
SearchField = 45
ScrollView = 46
ScrollBar = 47
StaticText = 48
TextField = 49
SecureTextField = 50
DatePicker = 51
TextView = 52
Menu = 53
MenuItem = 54
MenuBar = 55
MenuBarItem = 56
Map = 57
WebView = 58
IncrementArrow = 59
DecrementArrow = 60
Timeline = 61
RatingIndicator = 62
ValueIndicator = 63
SplitGroup = 64
Splitter = 65
RelevanceIndicator = 66
ColorWell = 67
HelpTag = 68
Matte = 69
DockItem = 70
Ruler = 71
RulerMarker = 72
Grid = 73
LevelIndicator = 74
Cell = 75
LayoutArea = 76
LayoutItem = 77
Handle = 78
Stepper = 79
Tab = 80
TouchBar = 81
@classmethod
def _attributes_by_value(cls):
"""
:return: Hash of all attributes and their values
:rtype: dict[int, str]
"""
class_attributes = set(dir(cls)) - set(dir(object))
return dict(
[
(getattr(cls, n), n)
for n in class_attributes
if not callable(getattr(cls, n)) and not n.startswith("__")
]
)
@classmethod
def name_for_value(cls, value):
"""
Get element type string from XCUIElementType (as int)
:param int value: XCUIElementType (as int)
:return: Name of type
:rtype: str
"""
attributes = cls._attributes_by_value()
if value in attributes:
return attributes[value]
else:
return "Unknown ({:#x})".format(value)
class UIAccessibilityTraits(object):
"""
Represents all UIAccessibilityTraits types
"""
Button = 0x0000000000000001
Link = 0x0000000000000002
Image = 0x0000000000000004
Selected = 0x0000000000000008
PlaysSound = 0x0000000000000010
KeyboardKey = 0x0000000000000020
StaticText = 0x0000000000000040
SummaryElement = 0x0000000000000080
NotEnabled = 0x0000000000000100
UpdatesFrequently = 0x0000000000000200
SearchField = 0x0000000000000400
StartsMediaSession = 0x0000000000000800
Adjustable = 0x0000000000001000
AllowsDirectInteraction = 0x0000000000002000
CausesPageTurn = 0x0000000000004000
TabBar = 0x0000000000008000
Header = 0x0000000000010000
@classmethod
def _attributes_by_value(cls):
"""
:return: Hash of all attributes and their values
:rtype: dict[int, str]
"""
class_attributes = set(dir(cls)) - set(dir(object))
return dict(
[
(getattr(cls, n), n)
for n in class_attributes
if not callable(getattr(cls, n)) and not n.startswith("__")
]
)
@classmethod
def name_for_value(cls, value):
"""
Get element traits string from UIAccessibilityTraits (as int)
:param int value: UIAccessibilityTraits (as int)
:return: UIAccessibilityTraits string
:rtype: str
"""
if value == 0:
return "None"
traits = []
attributes = cls._attributes_by_value()
for k in attributes.keys():
if value & k:
traits.append(attributes[k])
if len(traits) == 0:
return "Unknown"
else:
return ", ".join(traits)
class UIUserInterfaceSizeClass(object):
"""
Represents all UIUserInterfaceSizeClass types
"""
Unspecified = 0
Compact = 1
Regular = 2
@classmethod
def name_for_value(cls, value):
"""
Get user interface size class string from UIUserInterfaceSizeClass (as int)
:param int value: UIAccessibilityTraits (as int)
:return: UIUserInterfaceSizeClass string
:rtype: str
"""
if value == cls.Unspecified:
return "Unspecified"
elif value == cls.Compact:
return "Compact"
elif value == cls.Regular:
return "Regular"
else:
return "Unknown ({:#x})".format(value)
class CGRect(object):
"""
CGRect wrapper
:param lldb.SBValue element: CGRect object
"""
def __init__(self, element):
"""
:param lldb.SBValue element: CGRect object
"""
super(CGRect, self).__init__()
self.element = element
def summary(self):
"""
:return: CGRect summary
:rtype: str
"""
origin_element = self.element.GetChildMemberWithName("origin")
origin = CGPoint(origin_element)
size = self.element.GetChildMemberWithName("size")
width = size.GetChildMemberWithName("width")
height = size.GetChildMemberWithName("height")
width_value = float(width.GetValue())
height_value = float(height.GetValue())
return "{{{}, {{{}, {}}}}}".format(origin.summary(), width_value, height_value)
class CGPoint(object):
"""
CGPoint wrapper
:param lldb.SBValue element: CGPoint object
"""
def __init__(self, element):
super(CGPoint, self).__init__()
self.element = element
def summary(self):
"""
:return: CGPoint summary
:rtype: str
"""
x = self.element.GetChildMemberWithName("x")
y = self.element.GetChildMemberWithName("y")
x_value = float(x.GetValue())
y_value = float(y.GetValue())
return "{{{}, {}}}".format(x_value, y_value)
def normalize_summary(summary):
"""
Normalize summary by removing "'" and "@" characters
:param str summary: Summary string to normalize
:return: Normalized summary string
:rtype: str
"""
return summary.lstrip("@").strip('"')
def normalize_array_description(description):
"""
Normalize array object description by removing "<" and ">" characters and content between them.
:param str description: Array object description
:return: Normalized array object description string
:rtype: str
"""
return re.sub("^(<.*>)", "", description).strip()
_uikit_imported = False
def import_uikit():
"""
Import UIKit framework to the debugger
"""
global _uikit_imported
if _uikit_imported:
return
_uikit_imported = True
fb.evaluateExpressionValue("@import UIKit")
def debug(element):
"""
Debug helper
:param lldb.SBValue element: Element to debug
"""
print("---")
print("element: {}".format(element))
print("element class: {}".format(element.__class__))
print("element name: {}".format(element.GetName()))
print("element type name: {}".format(element.GetTypeName()))
print("element value: {}".format(element.GetValue()))
print("element value class: {}".format(element.GetValue().__class__))
print("element value type: {}".format(element.GetValueType()))
print("element value signed: {0}({0:#x})".format(element.GetValueAsSigned()))
print("element value unsigned: {0}({0:#x})".format(element.GetValueAsUnsigned()))
print("element summary: {}".format(element.GetSummary()))
print("element description: {}".format(element.GetObjectDescription()))
print("element children num: {}".format(element.GetNumChildren()))
for i in range(0, element.GetNumChildren()):
child = element.GetChildAtIndex(i)
""":type: lldb.SBValue"""
print("element child {:02}: {}".format(i, child.GetName()))
print("===")
|
mit
| -1,635,572,801,278,517,000
| 28.776211
| 99
| 0.564789
| false
| 4.377197
| false
| false
| false
|
fossfreedom/indicator-sysmonitor
|
sensors.py
|
1
|
22495
|
#!/usr/bin/python3
# coding: utf-8
#
# A simple indicator applet displaying cpu and memory information
#
# Author: Alex Eftimie <alex@eftimie.ro>
# Fork Author: fossfreedom <foss.freedom@gmail.com>
# Original Homepage: http://launchpad.net/indicator-sysmonitor
# Fork Homepage: https://github.com/fossfreedom/indicator-sysmonitor
# License: GPL v3
import json
import time
from threading import Thread
from threading import Event
import subprocess
import copy
import logging
import re
import os
import platform
from gettext import gettext as _
from gi.repository import GLib
import psutil as ps
ps_v1_api = int(ps.__version__.split('.')[0]) <= 1
B_UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']
cpu_load = []
def bytes_to_human(num):
for unit in B_UNITS:
if abs(num) < 1000.0:
return "%3.2f %s" % (num, unit)
num /= 1000.0
return "%.2f %s" % (num, 'YB')
class ISMError(Exception):
"""General exception."""
def __init__(self, msg):
Exception.__init__(self, msg)
class SensorManager(object):
"""Singleton"""
_instance = None
SETTINGS_FILE = os.getenv("HOME") + '/.indicator-sysmonitor.json'
digit_regex = re.compile(r'''\d+''')
class __impl:
settings = {
'custom_text': 'cpu: {cpu} mem: {mem}',
'interval': 2,
'on_startup': False,
'sensors': {
# 'name' => (desc, cmd)
}
}
supported_sensors = None
def __init__(self):
self.sensor_instances = [CPUSensor(),
NvGPUSensor(),
MemSensor(),
NetSensor(),
NetCompSensor(),
TotalNetSensor(),
BatSensor(),
FSSensor(),
SwapSensor(),
UporDownSensor(),
PublicIPSensor(),
CPUTemp(),
NvGPUTemp()]
for sensor in self.sensor_instances:
self.settings['sensors'][sensor.name] = (sensor.desc, sensor.cmd)
self._last_net_usage = [0, 0] # (up, down)
self._fetcher = None
# @staticmethod
@classmethod
def update_regex(self, names=None):
if names is None:
names = list(self.settings["sensors"].keys())
reg = '|'.join(names)
reg = "\A({})\Z".format(reg)
# global supported_sensors
self.supported_sensors = re.compile("{}".format(reg))
def get(self, name):
"""
:param name: of the sensor
:return: the sensor instance
"""
for sensor in self.sensor_instances:
if sensor.check(name) is not None:
return sensor
return None
# @staticmethod
def exists(self, name):
"""Checks if the sensor name exists"""
return bool(self.supported_sensors.match(name))
# @staticmethod
def check(self, sensor_string):
for sensor in self.sensor_instances:
sensor.check(sensor_string)
def add(self, name, desc, cmd):
"""Adds a custom sensors."""
if self.exists(name):
raise ISMError(_("Sensor name already in use."))
self.settings["sensors"][name] = (desc, cmd)
self.update_regex()
def delete(self, name):
"""Deletes a custom sensors."""
sensors = self.settings['sensors']
names = list(sensors.keys())
if name not in names:
raise ISMError(_("Sensor is not defined."))
_desc, default = sensors[name]
if default is True:
raise ISMError(_("Can not delete default sensors."))
del sensors[name]
self.update_regex()
def edit(self, name, newname, desc, cmd):
"""Edits a custom sensors."""
try:
sensors = self.settings['sensors']
_desc, default = sensors[name]
except KeyError:
raise ISMError(_("Sensor does not exists."))
if default is True:
raise ISMError(_("Can not edit default sensors."))
if newname != name:
if newname in list(sensors.keys()):
raise ISMError(_("Sensor name already in use."))
sensors[newname] = (desc, cmd)
del sensors[name]
self.settings["custom_text"] = self.settings["custom_text"].replace(
name, newname)
self.update_regex()
def load_settings(self):
"""It gets the settings from the config file and
sets them to the correct vars"""
try:
with open(SensorManager.SETTINGS_FILE, 'r') as f:
cfg = json.load(f)
if cfg['custom_text'] is not None:
self.settings['custom_text'] = cfg['custom_text']
if cfg['interval'] is not None:
self.settings['interval'] = cfg['interval']
if cfg['on_startup'] is not None:
self.settings['on_startup'] = cfg['on_startup']
if cfg['sensors'] is not None:
# need to merge our current list of sensors with what was previously saved
newcopy = self.settings['sensors']
newcopy.update(cfg['sensors'])
self.settings['sensors'] = newcopy
self.update_regex()
except Exception as ex:
logging.exception(ex)
logging.error('Reading settings failed')
def save_settings(self):
"""It stores the current settings to the config file."""
# TODO: use gsettings
try:
with open(SensorManager.SETTINGS_FILE, 'w') as f:
f.write(json.dumps(self.settings))
except Exception as ex:
logging.exception(ex)
logging.error('Writing settings failed')
def get_guide(self):
"""Updates the label guide from appindicator."""
# foss - I'm doubtful any of this guide stuff works - this needs to be recoded
# each sensor needs a sensor guide
data = self._fetcher.fetch()
for key in data:
if key.startswith('fs'):
data[key] = '000gB'
break
data['mem'] = data['cpu'] = data['bat'] = '000%'
data['net'] = '↓666kB/s ↑666kB/s'
self.settings['custom_text'].format(**data)
return self.settings['custom_text'].format(**data)
def get_label(self, data):
"""It updates the appindicator text with the the values
from data"""
try:
label = self.settings["custom_text"].format(**data) if len(data) \
else _("(no output)")
except KeyError as ex:
label = _("Invalid Sensor: {}").format(ex)
except Exception as ex:
logging.exception(ex)
label = _("Unknown error: ").format(ex)
return label
def initiate_fetcher(self, parent):
if self._fetcher is not None:
self._fetcher.stop()
self._fetcher = StatusFetcher(parent)
self._fetcher.start()
logging.info("Fetcher started")
def fill_liststore(self, list_store):
sensors = self.settings['sensors']
for name in list(sensors.keys()):
list_store.append([name, sensors[name][0]])
def get_command(self, name):
cmd = self.settings["sensors"][name][1]
return cmd
def set_custom_text(self, custom_text):
self.settings["custom_text"] = custom_text
def get_custom_text(self):
return self.settings["custom_text"]
def set_interval(self, interval):
self.settings["interval"] = interval
def get_interval(self):
return self.settings["interval"]
def get_results(self):
"""Return a dict whose element are the sensors
and their values"""
res = {}
from preferences import Preferences
# We call this only once per update
global cpu_load
cpu_load = ps.cpu_percent(interval=0, percpu=True)
# print (self.settings["custom_text"]) custom_text is the full visible string seen in Preferences edit field
for sensor in Preferences.sensors_regex.findall(
self.settings["custom_text"]):
sensor = sensor[1:-1]
instance = self.get(sensor)
if instance:
value = instance.get_value(sensor)
if value:
res[sensor] = value
else: # custom sensor
res[sensor] = BaseSensor.script_exec(self.settings["sensors"][sensor][1])
return res
def __init__(self):
if SensorManager._instance is None:
SensorManager._instance = SensorManager.__impl()
# Store instance reference as the only member in the handle
self.__dict__['_SensorManager__instance'] = SensorManager._instance
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
class BaseSensor(object):
name = ''
desc = ''
cmd = True
def check(self, sensor):
'''
checks to see if the sensor string passed in valid
:param sensor: string representation of the sensor
:return: True if the sensor is understood and passes the check or
an Exception if the format of the sensor string is wrong
None is returned if the sensor string is nothing to-do with the Sensor name
'''
if sensor == self.name:
return True
def get_value(self, sensor_data):
return None
@staticmethod
def script_exec(command):
"""Execute a custom command."""
try:
output = subprocess.Popen(command, stdout=subprocess.PIPE,
shell=True).communicate()[0].strip()
except:
output = _("Error")
logging.error(_("Error running: {}").format(command))
return output.decode('utf-8') if output else _("(no output)")
class NvGPUSensor(BaseSensor):
name = 'nvgpu'
desc = _('Nvidia GPU utilization')
def get_value(self, sensor):
if sensor == 'nvgpu':
return "{:02.0f}%".format(self._fetch_gpu())
def _fetch_gpu(self, percpu=False):
result = subprocess.check_output(['nvidia-smi', '--query-gpu=utilization.gpu', '--format=csv'])
perc = result.splitlines()[1]
perc = perc[:-2]
return int(perc)
class NvGPUTemp(BaseSensor):
"""Return GPU temperature expressed in Celsius
"""
name = 'nvgputemp'
desc = _('Nvidia GPU Temperature')
def get_value(self, sensor):
# degrees symbol is unicode U+00B0
return "{}\u00B0C".format(self._fetch_gputemp())
def _fetch_gputemp(self):
result = subprocess.check_output(['nvidia-smi', '--query-gpu=temperature.gpu', '--format=csv'])
perc = result.splitlines()[1]
return int(perc)
class CPUSensor(BaseSensor):
name = 'cpu\d*'
desc = _('Average CPU usage')
cpus = re.compile("\Acpu\d*\Z")
last = None
if ps_v1_api:
cpu_count = ps.NUM_CPUS
else:
cpu_count = ps.cpu_count()
def check(self, sensor):
if self.cpus.match(sensor):
if len(sensor) == 3:
nber = 0
else:
nber = int(sensor[3:]) if len(sensor) > 3 else 999
if nber >= self.cpu_count:
print(sensor)
print(self.cpu_count)
print(len(sensor))
raise ISMError(_("Invalid number of CPUs."))
return True
def get_value(self, sensor):
if sensor == 'cpu':
return "{:02.0f}%".format(self._fetch_cpu())
elif CPUSensor.cpus.match(sensor):
cpus = self._fetch_cpu(percpu=True)
return "{:02.0f}%".format(cpus[int(sensor[3:])])
return None
def _fetch_cpu(self, percpu=False):
if percpu:
return cpu_load
r = 0.0
for i in cpu_load:
r += i
r /= self.cpu_count
return r
class MemSensor(BaseSensor):
name = 'mem'
desc = _('Physical memory in use.')
def get_value(self, sensor_data):
return '{:02.0f}%'.format(self._fetch_mem())
def _fetch_mem(self):
"""It gets the total memory info and return the used in percent."""
def grep(pattern, word_list):
expr = re.compile(pattern)
arr = [elem for elem in word_list if expr.match(elem)]
return arr[0]
with open('/proc/meminfo') as meminfofile:
meminfo = meminfofile.readlines()
total = SensorManager.digit_regex.findall(grep("MemTotal", meminfo))[0]
release = re.split('\.', platform.release())
major_version = int(release[0])
minor_version = int(re.search(r'\d+', release[1]).group())
if (minor_version >= 16 and major_version == 3) or (major_version > 3):
available = SensorManager.digit_regex.findall(
grep("MemAvailable", meminfo))[0]
return 100 - 100 * int(available) / float(total)
else:
free = SensorManager.digit_regex.findall(
grep("MemFree", meminfo))[0]
cached = SensorManager.digit_regex.findall(
grep("Cached", meminfo))[0]
free = int(free) + int(cached)
return 100 - 100 * free / float(total)
class NetSensor(BaseSensor):
name = 'net'
desc = _('Network activity.')
_last_net_usage = [0, 0] # (up, down)
def get_value(self, sensor_data):
return self._fetch_net()
def _fetch_net(self):
"""It returns the bytes sent and received in bytes/second"""
current = [0, 0]
for _, iostat in list(ps.net_io_counters(pernic=True).items()):
current[0] += iostat.bytes_recv
current[1] += iostat.bytes_sent
dummy = copy.deepcopy(current)
current[0] -= self._last_net_usage[0]
current[1] -= self._last_net_usage[1]
self._last_net_usage = dummy
mgr = SensorManager()
current[0] /= mgr.get_interval()
current[1] /= mgr.get_interval()
return '↓ {:>9s}/s ↑ {:>9s}/s'.format(bytes_to_human(current[0]), bytes_to_human(current[1]))
class NetCompSensor(BaseSensor):
name = 'netcomp'
desc = _('Network activity in Compact form.')
_last_net_usage = [0, 0] # (up, down)
def get_value(self, sensor_data):
return self._fetch_net()
def _fetch_net(self):
"""It returns the bytes sent and received in bytes/second"""
current = [0, 0]
for _, iostat in list(ps.net_io_counters(pernic=True).items()):
current[0] += iostat.bytes_recv
current[1] += iostat.bytes_sent
dummy = copy.deepcopy(current)
current[0] -= self._last_net_usage[0]
current[1] -= self._last_net_usage[1]
self._last_net_usage = dummy
mgr = SensorManager()
current[0] /= mgr.get_interval()
current[1] /= mgr.get_interval()
return '⇵ {:>9s}/s'.format(bytes_to_human(current[0] + current[1]))
class TotalNetSensor(BaseSensor):
name = 'totalnet'
desc = _('Total Network activity.')
def get_value(self, sensor_data):
return self._fetch_net()
def _fetch_net(self):
"""It returns total number the bytes sent and received"""
current = [0, 0]
for _, iostat in list(ps.net_io_counters(pernic=True).items()):
current[0] += iostat.bytes_recv
current[1] += iostat.bytes_sent
mgr = SensorManager()
current[0] /= mgr.get_interval()
current[1] /= mgr.get_interval()
return ' Σ {:>9s}'.format(bytes_to_human(current[0] + current[1]))
class BatSensor(BaseSensor):
name = 'bat\d*'
desc = _('Battery capacity.')
bat = re.compile("\Abat\d*\Z")
def check(self, sensor):
if self.bat.match(sensor):
bat_id = int(sensor[3:]) if len(sensor) > 3 else 0
if not os.path.exists("/sys/class/power_supply/BAT{}".format(bat_id)):
raise ISMError(_("Invalid number returned for the Battery sensor."))
return True
def get_value(self, sensor):
if BatSensor.bat.match(sensor):
bat_id = int(sensor[3:]) if len(sensor) > 3 else 0
return '{:02.0f}%'.format(self._fetch_bat(bat_id))
return None
def _fetch_bat(self, batid):
"""Fetch the the amount of remaining battery"""
capacity = 0
try:
with open("/sys/class/power_supply/BAT{}/capacity".format(batid)) as state:
while True:
capacity = int(state.readline())
break
except IOError:
return "N/A"
return capacity
class FSSensor(BaseSensor):
name = 'fs//.+'
desc = _('Available space in file system.')
def check(self, sensor):
if sensor.startswith("fs//"):
path = sensor.split("//")[1]
if not os.path.exists(path):
raise ISMError(_("Path: {} doesn't exists.").format(path))
return True
def get_value(self, sensor):
if sensor.startswith('fs//'):
parts = sensor.split('//')
return self._fetch_fs(parts[1])
return None
def _fetch_fs(self, mount_point):
"""It returns the amount of bytes available in the fs in
a human-readble format."""
if not os.access(mount_point, os.F_OK):
return None
stat = os.statvfs(mount_point)
bytes_ = stat.f_bavail * stat.f_frsize
for unit in B_UNITS:
if bytes_ < 1024:
return "{} {}".format(round(bytes_, 2), unit)
bytes_ /= 1024
class SwapSensor(BaseSensor):
name = 'swap'
desc = _("Average swap usage")
def get_value(self, sensor):
return '{:02.0f}%'.format(self._fetch_swap())
def _fetch_swap(self):
"""Return the swap usage in percent"""
usage = 0
total = 0
try:
with open("/proc/swaps") as swaps:
swaps.readline()
for line in swaps.readlines():
dummy, dummy, total_, usage_, dummy = line.split()
total += int(total_)
usage += int(usage_)
if total == 0:
return 0
else:
return usage * 100 / total
except IOError:
return "N/A"
class UporDownSensor(BaseSensor):
name = 'upordown'
desc = _("Display if your internet connection is up or down")
command = 'if wget -qO /dev/null google.com > /dev/null; then echo "☺"; else echo "☹"; fi'
current_val = ""
lasttime = 0 # we refresh this every 10 seconds
def get_value(self, sensor):
if self.current_val == "" or self.lasttime == 0 or (time.time() - self.lasttime) > 10:
self.current_val = self.script_exec(self.command)
self.lasttime = time.time()
return self.current_val
class PublicIPSensor(BaseSensor):
name = 'publicip'
desc = _("Display your public IP address")
command = 'curl ipv4.icanhazip.com'
current_ip = ""
lasttime = 0 # we refresh this every 10 minutes
def get_value(self, sensor):
if self.current_ip == "" or self.lasttime == 0 or (time.time() - self.lasttime) > 600:
self.current_ip = self.script_exec(self.command)
self.lasttime = time.time()
return self.current_ip
class CPUTemp(BaseSensor):
"""Return CPU temperature expressed in Celsius
"""
name = 'cputemp'
desc = _('CPU temperature')
def get_value(self, sensor):
# degrees symbol is unicode U+00B0
return "{:02.0f}\u00B0C".format(self._fetch_cputemp())
def _fetch_cputemp(self):
# http://www.mjmwired.net/kernel/Documentation/hwmon/sysfs-interface
# first try the following sys file
# /sys/class/thermal/thermal_zone0/temp
# if that fails try various hwmon files
cat = lambda file: open(file, 'r').read().strip()
ret = None
zone = "/sys/class/thermal/thermal_zone0/"
try:
ret = int(cat(os.path.join(zone, 'temp'))) / 1000
except:
pass
if ret:
return ret
base = '/sys/class/hwmon/'
ls = sorted(os.listdir(base))
assert ls, "%r is empty" % base
for hwmon in ls:
hwmon = os.path.join(base, hwmon)
try:
ret = int(cat(os.path.join(hwmon, 'temp1_input'))) / 1000
break
except:
pass
# if fahrenheit:
# digits = [(x * 1.8) + 32 for x in digits]
return ret
class StatusFetcher(Thread):
"""It recollects the info about the sensors."""
def __init__(self, parent):
Thread.__init__(self)
self._parent = parent
self.mgr = SensorManager()
self.alive = Event()
self.alive.set()
GLib.timeout_add_seconds(self.mgr.get_interval(), self.run)
def fetch(self):
return self.mgr.get_results()
def stop(self):
self.alive.clear()
def run(self):
data = self.fetch()
self._parent.update(data)
if self.alive.isSet():
return True
|
gpl-3.0
| -5,165,070,681,103,870,000
| 30.178918
| 120
| 0.532117
| false
| 4.065099
| false
| false
| false
|
skirpichev/omg
|
diofant/combinatorics/polyhedron.py
|
1
|
27227
|
from ..core import Basic, Tuple
from ..core.compatibility import as_int
from ..sets import FiniteSet
from ..utilities import flatten, unflatten
from ..utilities.iterables import minlex
from .perm_groups import PermutationGroup
from .permutations import Permutation
rmul = Permutation.rmul
class Polyhedron(Basic):
"""
Represents the polyhedral symmetry group (PSG).
The PSG is one of the symmetry groups of the Platonic solids.
There are three polyhedral groups: the tetrahedral group
of order 12, the octahedral group of order 24, and the
icosahedral group of order 60.
All doctests have been given in the docstring of the
constructor of the object.
References
==========
http://mathworld.wolfram.com/PolyhedralGroup.html
"""
_edges = None
def __new__(cls, corners, faces=[], pgroup=[]):
"""
The constructor of the Polyhedron group object.
It takes up to three parameters: the corners, faces, and
allowed transformations.
The corners/vertices are entered as a list of arbitrary
expressions that are used to identify each vertex.
The faces are entered as a list of tuples of indices; a tuple
of indices identifies the vertices which define the face. They
should be entered in a cw or ccw order; they will be standardized
by reversal and rotation to be give the lowest lexical ordering.
If no faces are given then no edges will be computed.
>>> Polyhedron(list('abc'), [(1, 2, 0)]).faces
{(0, 1, 2)}
>>> Polyhedron(list('abc'), [(1, 0, 2)]).faces
{(0, 1, 2)}
The allowed transformations are entered as allowable permutations
of the vertices for the polyhedron. Instance of Permutations
(as with faces) should refer to the supplied vertices by index.
These permutation are stored as a PermutationGroup.
Examples
========
>>> Permutation.print_cyclic = False
>>> from diofant.abc import w
Here we construct the Polyhedron object for a tetrahedron.
>>> corners = [w, x, y, z]
>>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)]
Next, allowed transformations of the polyhedron must be given. This
is given as permutations of vertices.
Although the vertices of a tetrahedron can be numbered in 24 (4!)
different ways, there are only 12 different orientations for a
physical tetrahedron. The following permutations, applied once or
twice, will generate all 12 of the orientations. (The identity
permutation, Permutation(range(4)), is not included since it does
not change the orientation of the vertices.)
>>> pgroup = [Permutation([[0, 1, 2], [3]]),
... Permutation([[0, 1, 3], [2]]),
... Permutation([[0, 2, 3], [1]]),
... Permutation([[1, 2, 3], [0]]),
... Permutation([[0, 1], [2, 3]]),
... Permutation([[0, 2], [1, 3]]),
... Permutation([[0, 3], [1, 2]])]
The Polyhedron is now constructed and demonstrated:
>>> tetra = Polyhedron(corners, faces, pgroup)
>>> tetra.size
4
>>> tetra.edges
{(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)}
>>> tetra.corners
(w, x, y, z)
It can be rotated with an arbitrary permutation of vertices, e.g.
the following permutation is not in the pgroup:
>>> tetra.rotate(Permutation([0, 1, 3, 2]))
>>> tetra.corners
(w, x, z, y)
An allowed permutation of the vertices can be constructed by
repeatedly applying permutations from the pgroup to the vertices.
Here is a demonstration that applying p and p**2 for every p in
pgroup generates all the orientations of a tetrahedron and no others:
>>> all = ((w, x, y, z),
... (x, y, w, z),
... (y, w, x, z),
... (w, z, x, y),
... (z, w, y, x),
... (w, y, z, x),
... (y, z, w, x),
... (x, z, y, w),
... (z, y, x, w),
... (y, x, z, w),
... (x, w, z, y),
... (z, x, w, y))
>>> got = []
>>> for p in (pgroup + [p**2 for p in pgroup]):
... h = Polyhedron(corners)
... h.rotate(p)
... got.append(h.corners)
...
>>> set(got) == set(all)
True
The make_perm method of a PermutationGroup will randomly pick
permutations, multiply them together, and return the permutation that
can be applied to the polyhedron to give the orientation produced
by those individual permutations.
Here, 3 permutations are used:
>>> tetra.pgroup.make_perm(3) # doctest: +SKIP
Permutation([0, 3, 1, 2])
To select the permutations that should be used, supply a list
of indices to the permutations in pgroup in the order they should
be applied:
>>> use = [0, 0, 2]
>>> p002 = tetra.pgroup.make_perm(3, use)
>>> p002
Permutation([1, 0, 3, 2])
Apply them one at a time:
>>> tetra.reset()
>>> for i in use:
... tetra.rotate(pgroup[i])
...
>>> tetra.vertices
(x, w, z, y)
>>> sequentially = tetra.vertices
Apply the composite permutation:
>>> tetra.reset()
>>> tetra.rotate(p002)
>>> tetra.corners
(x, w, z, y)
>>> tetra.corners in all and tetra.corners == sequentially
True
Notes
=====
Defining permutation groups
---------------------------
It is not necessary to enter any permutations, nor is necessary to
enter a complete set of transformations. In fact, for a polyhedron,
all configurations can be constructed from just two permutations.
For example, the orientations of a tetrahedron can be generated from
an axis passing through a vertex and face and another axis passing
through a different vertex or from an axis passing through the
midpoints of two edges opposite of each other.
For simplicity of presentation, consider a square --
not a cube -- with vertices 1, 2, 3, and 4:
1-----2 We could think of axes of rotation being:
| | 1) through the face
| | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4
3-----4 3) lines 1-4 or 2-3
To determine how to write the permutations, imagine 4 cameras,
one at each corner, labeled A-D:
A B A B
1-----2 1-----3 vertex index:
| | | | 1 0
| | | | 2 1
3-----4 2-----4 3 2
C D C D 4 3
original after rotation
along 1-4
A diagonal and a face axis will be chosen for the "permutation group"
from which any orientation can be constructed.
>>> pgroup = []
Imagine a clockwise rotation when viewing 1-4 from camera A. The new
orientation is (in camera-order): 1, 3, 2, 4 so the permutation is
given using the *indices* of the vertices as:
>>> pgroup.append(Permutation((0, 2, 1, 3)))
Now imagine rotating clockwise when looking down an axis entering the
center of the square as viewed. The new camera-order would be
3, 1, 4, 2 so the permutation is (using indices):
>>> pgroup.append(Permutation((2, 0, 3, 1)))
The square can now be constructed:
** use real-world labels for the vertices, entering them in
camera order
** for the faces we use zero-based indices of the vertices
in *edge-order* as the face is traversed; neither the
direction nor the starting point matter -- the faces are
only used to define edges (if so desired).
>>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup)
To rotate the square with a single permutation we can do:
>>> square.rotate(square.pgroup[0])
>>> square.corners
(1, 3, 2, 4)
To use more than one permutation (or to use one permutation more
than once) it is more convenient to use the make_perm method:
>>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations
>>> square.reset() # return to initial orientation
>>> square.rotate(p011)
>>> square.corners
(4, 2, 3, 1)
Thinking outside the box
------------------------
Although the Polyhedron object has a direct physical meaning, it
actually has broader application. In the most general sense it is
just a decorated PermutationGroup, allowing one to connect the
permutations to something physical. For example, a Rubik's cube is
not a proper polyhedron, but the Polyhedron class can be used to
represent it in a way that helps to visualize the Rubik's cube.
>>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD'])
>>> def show():
... pairs = unflatten(r2.corners, 2)
... print(sstr(pairs[::2]))
... print(sstr(pairs[1::2]))
...
>>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2))
>>> show()
[(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)]
[(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)]
>>> r2.rotate(0) # cw rotation of F
>>> show()
[(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)]
[(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)]
Predefined Polyhedra
====================
For convenience, the vertices and faces are defined for the following
standard solids along with a permutation group for transformations.
When the polyhedron is oriented as indicated below, the vertices in
a given horizontal plane are numbered in ccw direction, starting from
the vertex that will give the lowest indices in a given face. (In the
net of the vertices, indices preceded by "-" indicate replication of
the lhs index in the net.)
tetrahedron, tetrahedron_faces
------------------------------
4 vertices (vertex up) net:
0 0-0
1 2 3-1
4 faces:
(0,1,2) (0,2,3) (0,3,1) (1,2,3)
cube, cube_faces
----------------
8 vertices (face up) net:
0 1 2 3-0
4 5 6 7-4
6 faces:
(0,1,2,3)
(0,1,5,4) (1,2,6,5) (2,3,7,6) (0,3,7,4)
(4,5,6,7)
octahedron, octahedron_faces
----------------------------
6 vertices (vertex up) net:
0 0 0-0
1 2 3 4-1
5 5 5-5
8 faces:
(0,1,2) (0,2,3) (0,3,4) (0,1,4)
(1,2,5) (2,3,5) (3,4,5) (1,4,5)
dodecahedron, dodecahedron_faces
--------------------------------
20 vertices (vertex up) net:
0 1 2 3 4 -0
5 6 7 8 9 -5
14 10 11 12 13-14
15 16 17 18 19-15
12 faces:
(0,1,2,3,4)
(0,1,6,10,5) (1,2,7,11,6) (2,3,8,12,7) (3,4,9,13,8) (0,4,9,14,5)
(5,10,16,15,14) (
6,10,16,17,11) (7,11,17,18,12) (8,12,18,19,13) (9,13,19,15,14)
(15,16,17,18,19)
icosahedron, icosahedron_faces
------------------------------
12 vertices (face up) net:
0 0 0 0 -0
1 2 3 4 5 -1
6 7 8 9 10 -6
11 11 11 11 -11
20 faces:
(0,1,2) (0,2,3) (0,3,4) (0,4,5) (0,1,5)
(1,2,6) (2,3,7) (3,4,8) (4,5,9) (1,5,10)
(2,6,7) (3,7,8) (4,8,9) (5,9,10) (1,6,10)
(6,7,11,) (7,8,11) (8,9,11) (9,10,11) (6,10,11)
>>> cube.edges
{(0, 1), (0, 3), (0, 4), ..., (4, 7), (5, 6), (6, 7)}
If you want to use letters or other names for the corners you
can still use the pre-calculated faces:
>>> corners = list('abcdefgh')
>>> Polyhedron(corners, cube.faces).corners
(a, b, c, d, e, f, g, h)
References
==========
[1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf
"""
faces = [minlex(f, directed=False, is_set=True) for f in faces]
corners, faces, pgroup = args = \
[Tuple(*a) for a in (corners, faces, pgroup)]
obj = Basic.__new__(cls, *args)
obj._corners = tuple(corners) # in order given
obj._faces = FiniteSet(*faces)
if pgroup and pgroup[0].size != len(corners):
raise ValueError('Permutation size unequal to number of corners.')
# use the identity permutation if none are given
obj._pgroup = PermutationGroup((
pgroup or [Permutation(range(len(corners)))] ))
return obj
@property
def corners(self):
"""
Get the corners of the Polyhedron.
The method ``vertices`` is an alias for ``corners``.
Examples
========
>>> p = Polyhedron(list('abcd'))
>>> p.corners == p.vertices == (a, b, c, d)
True
See Also
========
array_form, cyclic_form
"""
return self._corners
vertices = corners
@property
def array_form(self):
"""Return the indices of the corners.
The indices are given relative to the original position of corners.
Examples
========
>>> tetrahedron.array_form
[0, 1, 2, 3]
>>> tetrahedron.rotate(0)
>>> tetrahedron.array_form
[0, 2, 3, 1]
>>> tetrahedron.pgroup[0].array_form
[0, 2, 3, 1]
>>> tetrahedron.reset()
See Also
========
corners, cyclic_form
"""
corners = list(self.args[0])
return [corners.index(c) for c in self.corners]
@property
def cyclic_form(self):
"""Return the indices of the corners in cyclic notation.
The indices are given relative to the original position of corners.
See Also
========
corners, array_form
"""
return Permutation._af_new(self.array_form).cyclic_form
@property
def size(self):
"""Get the number of corners of the Polyhedron."""
return len(self._corners)
@property
def faces(self):
"""Get the faces of the Polyhedron."""
return self._faces
@property
def pgroup(self):
"""Get the permutations of the Polyhedron."""
return self._pgroup
@property
def edges(self):
"""
Given the faces of the polyhedra we can get the edges.
Examples
========
>>> corners = (a, b, c)
>>> faces = [(0, 1, 2)]
>>> Polyhedron(corners, faces).edges
{(0, 1), (0, 2), (1, 2)}
"""
if self._edges is None:
output = set()
for face in self.faces:
for i in range(len(face)):
edge = tuple(sorted([face[i], face[i - 1]]))
output.add(edge)
self._edges = FiniteSet(*output)
return self._edges
def rotate(self, perm):
"""
Apply a permutation to the polyhedron *in place*. The permutation
may be given as a Permutation instance or an integer indicating
which permutation from pgroup of the Polyhedron should be
applied.
This is an operation that is analogous to rotation about
an axis by a fixed increment.
Notes
=====
When a Permutation is applied, no check is done to see if that
is a valid permutation for the Polyhedron. For example, a cube
could be given a permutation which effectively swaps only 2
vertices. A valid permutation (that rotates the object in a
physical way) will be obtained if one only uses
permutations from the ``pgroup`` of the Polyhedron. On the other
hand, allowing arbitrary rotations (applications of permutations)
gives a way to follow named elements rather than indices since
Polyhedron allows vertices to be named while Permutation works
only with indices.
Examples
========
>>> cube.corners
(0, 1, 2, 3, 4, 5, 6, 7)
>>> cube.rotate(0)
>>> cube.corners
(1, 2, 3, 0, 5, 6, 7, 4)
A non-physical "rotation" that is not prohibited by this method:
>>> cube.reset()
>>> cube.rotate(Permutation([[1, 2]], size=8))
>>> cube.corners
(0, 2, 1, 3, 4, 5, 6, 7)
Polyhedron can be used to follow elements of set that are
identified by letters instead of integers:
>>> shadow = h5 = Polyhedron(list('abcde'))
>>> p = Permutation([3, 0, 1, 2, 4])
>>> h5.rotate(p)
>>> h5.corners
(d, a, b, c, e)
>>> _ == shadow.corners
True
>>> copy = h5.copy()
>>> h5.rotate(p)
>>> h5.corners == copy.corners
False
"""
if not isinstance(perm, Permutation):
perm = self.pgroup[perm]
# and we know it's valid
else:
if perm.size != self.size:
raise ValueError('Polyhedron and Permutation sizes differ.')
a = perm.array_form
corners = [self.corners[a[i]] for i in range(len(self.corners))]
self._corners = tuple(corners)
def reset(self):
"""Return corners to their original positions.
Examples
========
>>> tetrahedron.corners
(0, 1, 2, 3)
>>> tetrahedron.rotate(0)
>>> tetrahedron.corners
(0, 2, 3, 1)
>>> tetrahedron.reset()
>>> tetrahedron.corners
(0, 1, 2, 3)
"""
self._corners = self.args[0]
def _pgroup_calcs():
"""Return the permutation groups for each of the polyhedra and the face
definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces,
icosahedron_faces
(This author didn't find and didn't know of a better way to do it though
there likely is such a way.)
Although only 2 permutations are needed for a polyhedron in order to
generate all the possible orientations, a group of permutations is
provided instead. A set of permutations is called a "group" if::
a*b = c (for any pair of permutations in the group, a and b, their
product, c, is in the group)
a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds)
there is an identity permutation, I, such that I*a = a*I for all elements
in the group
a*b = I (the inverse of each permutation is also in the group)
None of the polyhedron groups defined follow these definitions of a group.
Instead, they are selected to contain those permutations whose powers
alone will construct all orientations of the polyhedron, i.e. for
permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``,
``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of
permutation ``i``) generate all permutations of the polyhedron instead of
mixed products like ``a*b``, ``a*b**2``, etc....
Note that for a polyhedron with n vertices, the valid permutations of the
vertices exclude those that do not maintain its faces. e.g. the
permutation BCDE of a square's four corners, ABCD, is a valid
permutation while CBDE is not (because this would twist the square).
Examples
========
The is_group checks for: closure, the presence of the Identity permutation,
and the presence of the inverse for each of the elements in the group. This
confirms that none of the polyhedra are true groups:
>>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron)
>>> [h.pgroup.is_group for h in polyhedra]
...
[True, True, True, True, True]
Although tests in polyhedron's test suite check that powers of the
permutations in the groups generate all permutations of the vertices
of the polyhedron, here we also demonstrate the powers of the given
permutations create a complete group for the tetrahedron:
>>> for h in polyhedra[:1]:
... G = h.pgroup
... perms = set()
... for g in G:
... for e in range(g.order()):
... p = tuple((g**e).array_form)
... perms.add(p)
...
... perms = [Permutation(p) for p in perms]
... assert PermutationGroup(perms).is_group
In addition to doing the above, the tests in the suite confirm that the
faces are all present after the application of each permutation.
References
==========
http://dogschool.tripod.com/trianglegroup.html
"""
def _pgroup_of_double(polyh, ordered_faces, pgroup):
n = len(ordered_faces[0])
# the vertices of the double which sits inside a give polyhedron
# can be found by tracking the faces of the outer polyhedron.
# A map between face and the vertex of the double is made so that
# after rotation the position of the vertices can be located
fmap = dict(zip(ordered_faces,
range(len(ordered_faces))))
flat_faces = flatten(ordered_faces)
new_pgroup = []
for i, p in enumerate(pgroup):
h = polyh.copy()
h.rotate(p)
c = h.corners
# reorder corners in the order they should appear when
# enumerating the faces
reorder = unflatten([c[j] for j in flat_faces], n)
# make them canonical
reorder = [tuple(map(as_int,
minlex(f, directed=False, is_set=True)))
for f in reorder]
# map face to vertex: the resulting list of vertices are the
# permutation that we seek for the double
new_pgroup.append(Permutation([fmap[f] for f in reorder]))
return new_pgroup
tetrahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3
(1, 2, 3), # bottom
]
# cw from top
#
_t_pgroup = [
Permutation([[1, 2, 3], [0]]), # cw from top
Permutation([[0, 1, 2], [3]]), # cw from front face
Permutation([[0, 3, 2], [1]]), # cw from back right face
Permutation([[0, 3, 1], [2]]), # cw from back left face
Permutation([[0, 1], [2, 3]]), # through front left edge
Permutation([[0, 2], [1, 3]]), # through front right edge
Permutation([[0, 3], [1, 2]]), # through back edge
]
tetrahedron = Polyhedron(
range(4),
tetrahedron_faces,
_t_pgroup)
cube_faces = [
(0, 1, 2, 3), # upper
(0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4
(4, 5, 6, 7), # lower
]
# U, D, F, B, L, R = up, down, front, back, left, right
_c_pgroup = [Permutation(p) for p in
[
[1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U
[4, 0, 3, 7, 5, 1, 2, 6], # cw from F face
[4, 5, 1, 0, 7, 6, 2, 3], # cw from R face
[1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge
[6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge
[6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge
[3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge
[4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge
[6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge
[0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex
[5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex
[5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex
[7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL
]]
cube = Polyhedron(
range(8),
cube_faces,
_c_pgroup)
octahedron_faces = [
(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4
(1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4
]
octahedron = Polyhedron(
range(6),
octahedron_faces,
_pgroup_of_double(cube, cube_faces, _c_pgroup))
dodecahedron_faces = [
(0, 1, 2, 3, 4), # top
(0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5
(3, 4, 9, 13, 8), (0, 4, 9, 14, 5),
(5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18,
12), # lower 5
(8, 12, 18, 19, 13), (9, 13, 19, 15, 14),
(15, 16, 17, 18, 19) # bottom
]
def _string_to_perm(s):
rv = [Permutation(range(20))]
p = None
for si in s:
if si not in '01':
count = int(si) - 1
else:
count = 1
if si == '0':
p = _f0
else:
p = _f1
rv.extend([p]*count)
return Permutation.rmul(*rv)
# top face cw
_f0 = Permutation([
1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11,
12, 13, 14, 10, 16, 17, 18, 19, 15])
# front face cw
_f1 = Permutation([
5, 0, 4, 9, 14, 10, 1, 3, 13, 15,
6, 2, 8, 19, 16, 17, 11, 7, 12, 18])
# the strings below, like 0104 are shorthand for F0*F1*F0**4 and are
# the remaining 4 face rotations, 15 edge permutations, and the
# 10 vertex rotations.
_dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in """
0104 140 014 0410
010 1403 03104 04103 102
120 1304 01303 021302 03130
0412041 041204103 04120410 041204104 041204102
10 01 1402 0140 04102 0412 1204 1302 0130 03120""".strip().split()]
dodecahedron = Polyhedron(
range(20),
dodecahedron_faces,
_dodeca_pgroup)
icosahedron_faces = [
[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5], [0, 1, 5],
[1, 6, 7], [1, 2, 7], [2, 7, 8], [2, 3, 8], [3, 8, 9],
[3, 4, 9], [4, 9, 10 ], [4, 5, 10], [5, 6, 10], [1, 5, 6],
[6, 7, 11], [7, 8, 11], [8, 9, 11], [9, 10, 11], [6, 10, 11]]
icosahedron = Polyhedron(
range(12),
icosahedron_faces,
_pgroup_of_double(
dodecahedron, dodecahedron_faces, _dodeca_pgroup))
return (tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces,
dodecahedron_faces, icosahedron_faces)
(tetrahedron, cube, octahedron, dodecahedron, icosahedron,
tetrahedron_faces, cube_faces, octahedron_faces,
dodecahedron_faces, icosahedron_faces) = _pgroup_calcs()
|
bsd-3-clause
| 3,605,767,470,753,817,000
| 32.738538
| 80
| 0.530356
| false
| 3.578733
| false
| false
| false
|
strands-project/robomongo
|
src/third-party/qscintilla/Python/configure.py
|
1
|
29817
|
# This script configures QScintilla for PyQt v4.10 and later. It will fall
# back to the old script if an earlier version of PyQt is found.
#
# Copyright (c) 2012 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of QScintilla.
#
# This file may be used under the terms of the GNU General Public
# License versions 2.0 or 3.0 as published by the Free Software
# Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
# included in the packaging of this file. Alternatively you may (at
# your option) use any later version of the GNU General Public
# License if such license has been publicly approved by Riverbank
# Computing Limited (or its successors, if any) and the KDE Free Qt
# Foundation. In addition, as a special exception, Riverbank gives you
# certain additional rights. These rights are described in the Riverbank
# GPL Exception version 1.1, which can be found in the file
# GPL_EXCEPTION.txt in this package.
#
# If you are unsure which license is appropriate for your use, please
# contact the sales department at sales@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import sys
import os
import glob
import optparse
try:
import sysconfig
except ImportError:
from distutils import sysconfig
# Initialise the constants.
SIP_MIN_VERSION = '4.12.0'
# This must be kept in sync with qscintilla.pro, Qt4Qt5/application.pro and
# Qt4Qt5/designer.pro.
QSCI_API_MAJOR = 9
def error(msg):
""" Display an error message and terminate. msg is the text of the error
message.
"""
sys.stderr.write(format("Error: " + msg) + "\n")
sys.exit(1)
def inform(msg):
""" Display an information message. msg is the text of the error message.
"""
sys.stdout.write(format(msg) + "\n")
def format(msg, left_margin=0, right_margin=78):
""" Format a message by inserting line breaks at appropriate places. msg
is the text of the message. left_margin is the position of the left
margin. right_margin is the position of the right margin. Returns the
formatted message.
"""
curs = left_margin
fmsg = " " * left_margin
for w in msg.split():
l = len(w)
if curs != left_margin and curs + l > right_margin:
fmsg = fmsg + "\n" + (" " * left_margin)
curs = left_margin
if curs > left_margin:
fmsg = fmsg + " "
curs = curs + 1
fmsg = fmsg + w
curs = curs + l
return fmsg
class HostPythonConfiguration:
""" A container for the host Python configuration. """
def __init__(self):
""" Initialise the configuration. """
self.platform = sys.platform
self.version = sys.hexversion >> 8
if hasattr(sysconfig, 'get_path'):
# The modern API.
self.inc_dir = sysconfig.get_path('include')
self.module_dir = sysconfig.get_path('platlib')
else:
# The legacy distutils API.
self.inc_dir = sysconfig.get_python_inc(plat_specific=1)
self.module_dir = sysconfig.get_python_lib(plat_specific=1)
if sys.platform == 'win32':
self.data_dir = sys.prefix
self.lib_dir = sys.prefix + '\\libs'
else:
self.data_dir = sys.prefix + '/share'
self.lib_dir = sys.prefix + '/lib'
class TargetQtConfiguration:
""" A container for the target Qt configuration. """
def __init__(self, qmake):
""" Initialise the configuration. qmake is the full pathname of the
qmake executable that will provide the configuration.
"""
pipe = os.popen(' '.join([qmake, '-query']))
for l in pipe:
l = l.strip()
tokens = l.split(':', 1)
if isinstance(tokens, list):
if len(tokens) != 2:
error("Unexpected output from qmake: '%s'\n" % l)
name, value = tokens
else:
name = tokens
value = None
name = name.replace('/', '_')
setattr(self, name, value)
pipe.close()
class TargetConfiguration:
""" A container for the target configuration. """
def __init__(self):
""" Initialise the configuration with default values. """
# Values based on the host Python configuration.
py_config = HostPythonConfiguration()
self.py_module_dir = py_config.module_dir
self.py_inc_dir = py_config.inc_dir
self.py_lib_dir = py_config.lib_dir
self.py_platform = py_config.platform
self.py_sip_dir = os.path.join(py_config.data_dir, 'sip')
self.py_version = py_config.version
self.sip_inc_dir = py_config.inc_dir
# The default qmake spec.
if self.py_platform == 'win32':
if self.py_version >= 0x030300:
self.qmake_spec = 'win32-msvc2010'
elif self.py_version >= 0x020600:
self.qmake_spec = 'win32-msvc2008'
elif self.py_version >= 0x020400:
self.qmake_spec = 'win32-msvc.net'
else:
self.qmake_spec = 'win32-msvc'
else:
# Use the Qt default. (We may update it for MacOS/X later.)
self.qmake_spec = ''
# Remaining values.
self.pyqt_sip_flags = ''
self.pyqt_version = ''
self.qmake = self._find_exe('qmake')
self.sip = self._find_exe('sip')
self.prot_is_public = (self.py_platform.startswith('linux') or self.py_platform == 'darwin')
self.qscintilla_is_dll = (self.py_platform == 'win32')
self.module_dir = os.path.join(py_config.module_dir, 'PyQt4')
self.pyqt_sip_dir = os.path.join(self.py_sip_dir, 'PyQt4')
self.qsci_sip_dir = self.pyqt_sip_dir
def from_configuration_file(self, config_file):
""" Initialise the configuration with values from a file. config_file
is the name of the configuration file.
"""
inform("Reading configuration from %s..." % config_file)
cfg = open(config_file)
line_nr = 0
for l in cfg:
line_nr += 1
l = l.strip()
if len(l) == 0 or l[0] == '#':
continue
eq = l.find('=')
if eq > 0:
name = l[:eq - 1].rstrip()
value = l[eq + 1:].lstrip()
else:
name = value = ''
if name == '' or value == '':
error("%s:%d: Invalid line." % (config_file, line_nr))
default_value = getattr(self, name, None)
if default_value is None:
error(
"%s:%d: Unknown item: %s." % (config_file, line_nr,
name))
if isinstance(default_value, int):
if value.startswith('0x'):
value = int(value, 16)
else:
value = int(value)
setattr(self, name, value)
cfg.close()
def from_introspection(self, pyqt_package):
""" Initialise the configuration by introspecting the system.
pyqt_package is the name of the PyQt package we are building against.
"""
if pyqt_package == 'PyQt5':
try:
from PyQt5 import QtCore
except ImportError:
error(
"Unable to import PyQt5.QtCore. Make sure PyQt5 is "
"installed.")
else:
try:
from PyQt4 import QtCore
except ImportError:
error(
"Unable to import PyQt4.QtCore. Make sure PyQt4 is "
"installed.")
inform("PyQt %s is being used." % QtCore.PYQT_VERSION_STR)
inform("Qt %s is being used." % QtCore.QT_VERSION_STR)
# See if we have a PyQt that embeds its configuration.
try:
pyqt_config = QtCore.PYQT_CONFIGURATION
except AttributeError:
pyqt_config = None
if pyqt_config is None:
# Fallback to the old configuration script.
config_script = sys.argv[0].replace('configure', 'configure-old')
args = [sys.executable, config_script] + sys.argv[1:]
try:
os.execv(sys.executable, args)
except OSError:
pass
error("Unable to execute '%s'\n" % config_script)
self.pyqt_sip_flags = pyqt_config['sip_flags']
def get_qt_configuration(self, opts):
""" Get the Qt configuration that can be extracted from qmake. opts
are the command line options.
"""
try:
qmake = opts.qmake
except AttributeError:
# Windows.
qmake = None
if qmake is not None:
self.qmake = qmake
elif self.qmake is None:
# Under Windows qmake and the Qt DLLs must be on the system PATH
# otherwise the dynamic linker won't be able to resolve the
# symbols. On other systems we assume we can just run qmake by
# using its full pathname.
if sys.platform == 'win32':
error("Make sure you have a working Qt qmake on your PATH.")
else:
error(
"Make sure you have a working Qt qmake on your PATH "
"or use the --qmake argument to explicitly specify a "
"working Qt qmake.")
# Query qmake.
qt_config = TargetQtConfiguration(self.qmake)
# The binary MacOS/X Qt installer defaults to XCode. If this is what
# we might have then use macx-clang (Qt v5) or macx-g++ (Qt v4).
if sys.platform == 'darwin':
try:
# Qt v5.
if qt_config.QMAKE_SPEC == 'macx-xcode':
# This will exist (and we can't check anyway).
self.qmake_spec = 'macx-clang'
else:
# No need to explicitly name the default.
self.qmake_spec = ''
except AttributeError:
# Qt v4.
self.qmake_spec = 'macx-g++'
self.api_dir = qt_config.QT_INSTALL_DATA
self.qsci_inc_dir = qt_config.QT_INSTALL_HEADERS
self.qsci_lib_dir = qt_config.QT_INSTALL_LIBS
def override_defaults(self, opts):
""" Override the defaults from the command line. opts are the command
line options.
"""
if opts.apidir is not None:
self.api_dir = opts.apidir
if opts.destdir is not None:
self.module_dir = opts.destdir
else:
self.module_dir = os.path.join(self.py_module_dir,
opts.pyqt_package)
if opts.qmakespec is not None:
self.qmake_spec = opts.qmakespec
if opts.prot_is_public is not None:
self.prot_is_public = opts.prot_is_public
if opts.qsci_inc_dir is not None:
self.qsci_inc_dir = opts.qsci_inc_dir
if opts.qsci_lib_dir is not None:
self.qsci_lib_dir = opts.qsci_lib_dir
if opts.sip_inc_dir is not None:
self.sip_inc_dir = opts.sip_inc_dir
if opts.pyqt_sip_dir is not None:
self.pyqt_sip_dir = opts.pyqt_sip_dir
else:
self.pyqt_sip_dir = os.path.join(self.py_sip_dir,
opts.pyqt_package)
if opts.qsci_sip_dir is not None:
self.qsci_sip_dir = opts.qsci_sip_dir
else:
self.qsci_sip_dir = self.pyqt_sip_dir
if opts.sip is not None:
self.sip = opts.sip
if opts.is_dll is not None:
self.qscintilla_is_dll = opts.is_dll
@staticmethod
def _find_exe(exe):
""" Find an executable, ie. the first on the path. """
try:
path = os.environ['PATH']
except KeyError:
path = ''
if sys.platform == 'win32':
exe = exe + '.exe'
for d in path.split(os.pathsep):
exe_path = os.path.join(d, exe)
if os.access(exe_path, os.X_OK):
return exe_path
return None
def create_optparser(target_config):
""" Create the parser for the command line. target_config is the target
configuration containing default values.
"""
def store_abspath(option, opt_str, value, parser):
setattr(parser.values, option.dest, os.path.abspath(value))
def store_abspath_dir(option, opt_str, value, parser):
if not os.path.isdir(value):
raise optparse.OptionValueError("'%s' is not a directory" % value)
setattr(parser.values, option.dest, os.path.abspath(value))
def store_abspath_exe(option, opt_str, value, parser):
if not os.access(value, os.X_OK):
raise optparse.OptionValueError("'%s' is not an executable" % value)
setattr(parser.values, option.dest, os.path.abspath(value))
p = optparse.OptionParser(usage="python %prog [options]",
version="2.7.2")
p.add_option("--spec", dest='qmakespec', default=None, action='store',
metavar="SPEC",
help="pass -spec SPEC to qmake [default: %s]" % "don't pass -spec" if target_config.qmake_spec == '' else target_config.qmake_spec)
p.add_option("--apidir", "-a", dest='apidir', type='string', default=None,
action='callback', callback=store_abspath, metavar="DIR",
help="the QScintilla API file will be installed in DIR [default: "
"QT_INSTALL_DATA/qsci]")
p.add_option("--configuration", dest='config_file', type='string',
default=None, action='callback', callback=store_abspath,
metavar="FILE",
help="FILE defines the target configuration")
p.add_option("--destdir", "-d", dest='destdir', type='string',
default=None, action='callback', callback=store_abspath,
metavar="DIR",
help="install the QScintilla module in DIR [default: "
"%s]" % target_config.module_dir)
p.add_option("--protected-is-public", dest='prot_is_public', default=None,
action='store_true',
help="enable building with 'protected' redefined as 'public' "
"[default: %s]" % target_config.prot_is_public)
p.add_option("--protected-not-public", dest='prot_is_public',
action='store_false',
help="disable building with 'protected' redefined as 'public'")
p.add_option("--pyqt", dest='pyqt_package', type='choice',
choices=['PyQt4', 'PyQt5'], default='PyQt4', action='store',
metavar="PyQtn",
help="configure for PyQt4 or PyQt5 [default: PyQt4]")
if sys.platform != 'win32':
p.add_option("--qmake", "-q", dest='qmake', type='string',
default=None, action='callback', callback=store_abspath_exe,
metavar="FILE",
help="the pathname of qmake is FILE [default: "
"%s]" % (target_config.qmake or "None"))
p.add_option("--qsci-incdir", "-n", dest='qsci_inc_dir', type='string',
default=None, action='callback', callback=store_abspath_dir,
metavar="DIR",
help="the directory containing the QScintilla Qsci header file "
"directory is DIR [default: QT_INSTALL_HEADERS]")
p.add_option("--qsci-libdir", "-o", dest='qsci_lib_dir', type='string',
default=None, action='callback', callback=store_abspath_dir,
metavar="DIR",
help="the directory containing the QScintilla library is DIR "
"[default: QT_INSTALL_LIBS]")
p.add_option("--sip", dest='sip', type='string', default=None,
action='callback', callback=store_abspath_exe, metavar="FILE",
help="the pathname of sip is FILE [default: "
"%s]" % (target_config.sip or "None"))
p.add_option("--sip-incdir", dest='sip_inc_dir', type='string',
default=None, action='callback', callback=store_abspath_dir,
metavar="DIR",
help="the directory containing the sip.h header file file is DIR "
"[default: %s]" % target_config.sip_inc_dir)
p.add_option("--pyqt-sipdir", dest='pyqt_sip_dir', type='string',
default=None, action='callback', callback=store_abspath_dir,
metavar="DIR",
help="the directory containing the PyQt .sip files is DIR "
"[default: %s]" % target_config.pyqt_sip_dir)
p.add_option("--qsci-sipdir", "-v", dest='qsci_sip_dir', type='string',
default=None, action='callback', callback=store_abspath_dir,
metavar="DIR",
help="the QScintilla .sip files will be installed in DIR "
"[default: %s]" % target_config.qsci_sip_dir)
p.add_option("--concatenate", "-c", dest='concat', default=False,
action='store_true',
help="concatenate the C++ source files")
p.add_option("--concatenate-split", "-j", dest='split', type='int',
default=1, metavar="N",
help="split the concatenated C++ source files into N pieces "
"[default: 1]")
p.add_option("--static", "-k", dest='static', default=False,
action='store_true',
help="build the QScintilla module as a static library")
p.add_option("--no-docstrings", dest='no_docstrings', default=False,
action='store_true',
help="disable the generation of docstrings")
p.add_option("--trace", "-r", dest='tracing', default=False,
action='store_true',
help="build the QScintilla module with tracing enabled")
p.add_option("--no-dll", "-s", dest='is_dll', default=None,
action='store_false',
help="QScintilla is a static library and not a Windows DLL")
p.add_option("--debug", "-u", default=False, action='store_true',
help="build the QScintilla module with debugging symbols")
p.add_option("--no-timestamp", "-T", dest='no_timestamp', default=False,
action='store_true',
help="suppress timestamps in the header comments of generated "
"code [default: include timestamps]")
return p
def inform_user(target_config):
""" Tell the user the values that are going to be used. target_config is
the target configuration.
"""
inform("The sip executable is %s." % target_config.sip)
inform("The QScintilla module will be installed in %s." % target_config.module_dir)
if target_config.prot_is_public:
inform("The QScintilla module is being built with 'protected' "
"redefined as 'public'.")
inform("The QScintilla .sip files will be installed in %s." %
target_config.qsci_sip_dir)
inform("The QScintilla API file will be installed in %s." %
os.path.join(target_config.api_dir, 'api', 'python'))
def check_qscintilla(target_config):
""" See if QScintilla can be found and what its version is. target_config
is the target configuration.
"""
# Find the QScintilla header files.
sciglobal = os.path.join(target_config.qsci_inc_dir, 'Qsci', 'qsciglobal.h')
if not os.access(sciglobal, os.F_OK):
error("Qsci/qsciglobal.h could not be found in %s. If QScintilla is installed then use the --qsci-incdir argument to explicitly specify the correct directory." % target_config.qsci_inc_dir)
# Get the QScintilla version string.
sciversstr = read_define(sciglobal, 'QSCINTILLA_VERSION_STR')
if sciversstr is None:
error(
"The QScintilla version number could not be determined by "
"reading %s." % sciglobal)
if not glob.glob(os.path.join(target_config.qsci_lib_dir, '*qscintilla2*')):
error("The QScintilla library could not be found in %s. If QScintilla is installed then use the --qsci-libdir argument to explicitly specify the correct directory." % target_config.qsci_lib_dir)
# Because we include the Python bindings with the C++ code we can
# reasonably force the same version to be used and not bother about
# versioning.
if sciversstr != '2.7.2':
error("QScintilla %s is being used but the Python bindings 2.7.2 are being built. Please use matching versions." % sciversstr)
inform("QScintilla %s is being used." % sciversstr)
def read_define(filename, define):
""" Read the value of a #define from a file. filename is the name of the
file. define is the name of the #define. None is returned if there was no
such #define.
"""
f = open(filename)
for l in f:
wl = l.split()
if len(wl) >= 3 and wl[0] == "#define" and wl[1] == define:
# Take account of embedded spaces.
value = ' '.join(wl[2:])[1:-1]
break
else:
value = None
f.close()
return value
def sip_flags(target_config):
""" Return the SIP flags. target_config is the target configuration. """
# Get the flags used for the main PyQt module.
flags = target_config.pyqt_sip_flags.split()
# Generate the API file.
flags.append('-a')
flags.append('QScintilla2.api')
# Add PyQt's .sip files to the search path.
flags.append('-I')
flags.append(target_config.pyqt_sip_dir)
return flags
def generate_code(target_config, opts):
""" Generate the code for the QScintilla module. target_config is the
target configuration. opts are the command line options.
"""
inform("Generating the C++ source for the Qsci module...")
# Build the SIP command line.
argv = [target_config.sip]
argv.extend(sip_flags(target_config))
if opts.no_timestamp:
argv.append('-T')
if not opts.no_docstrings:
argv.append('-o');
if target_config.prot_is_public:
argv.append('-P');
if opts.concat:
argv.append('-j')
argv.append(str(opts.split))
if opts.tracing:
argv.append('-r')
argv.append('-c')
argv.append('.')
if opts.pyqt_package == 'PyQt5':
argv.append('sip/qscimod5.sip')
else:
argv.append('sip/qscimod4.sip')
rc = os.spawnv(os.P_WAIT, target_config.sip, argv)
if rc != 0:
error("%s returned exit code %d." % (target_config.sip, rc))
# Generate the .pro file.
generate_pro(target_config, opts)
# Generate the Makefile.
inform("Creating the Makefile for the Qsci module...")
qmake_args = ['qmake']
if target_config.qmake_spec != '':
qmake_args.append('-spec')
qmake_args.append(target_config.qmake_spec)
qmake_args.append('Qsci.pro')
rc = os.spawnv(os.P_WAIT, target_config.qmake, qmake_args)
if rc != 0:
error("%s returned exit code %d." % (target_config.qmake, rc))
def generate_pro(target_config, opts):
""" Generate the .pro file for the QScintilla module. target_config is the
target configuration. opts are the command line options.
"""
# Without the 'no_check_exist' magic the target.files must exist when qmake
# is run otherwise the install and uninstall targets are not generated.
inform("Generating the .pro file for the Qsci module...")
pro = open('Qsci.pro', 'w')
pro.write('TEMPLATE = lib\n')
pro.write('CONFIG += %s\n' % ('debug' if opts.debug else 'release'))
pro.write('CONFIG += %s\n' % ('staticlib' if opts.static else 'plugin'))
pro.write('''
greaterThan(QT_MAJOR_VERSION, 4) {
QT += widgets printsupport
}
''')
if not opts.static:
# I don't really understand why the linker needs to find the Python
# .lib file.
pro.write('''
win32 {
PY_MODULE = Qsci.pyd
target.files = Qsci.pyd
LIBS += -L%s
} else {
PY_MODULE = Qsci.so
target.files = Qsci.so
}
target.CONFIG = no_check_exist
''' % target_config.py_lib_dir)
pro.write('''
target.path = %s
INSTALLS += target
''' % target_config.module_dir)
pro.write('''
api.path = %s/api/python
api.files = QScintilla2.api
INSTALLS += api
''' % target_config.api_dir)
pro.write('''
sip.path = %s/Qsci
sip.files =''' % target_config.qsci_sip_dir)
for s in glob.glob('sip/*.sip'):
pro.write(' \\\n %s' % s)
pro.write('''
INSTALLS += sip
''')
pro.write('\n')
# These optimisations could apply to other platforms.
if target_config.py_platform == 'darwin':
pro.write('QMAKE_CXXFLAGS += -fno-exceptions\n')
if target_config.py_platform.startswith('linux'):
pro.write('QMAKE_CXXFLAGS += -fno-exceptions\n')
if not opts.static:
if target_config.py_version >= 0x030000:
entry_point = 'PyInit_Qsci'
else:
entry_point = 'initQsci'
exp = open('Qsci.exp', 'wt')
exp.write('{ global: %s; local: *; };' % entry_point)
exp.close()
pro.write('QMAKE_LFLAGS += -Wl,--version-script=Qsci.exp\n')
if target_config.prot_is_public:
pro.write('DEFINES += SIP_PROTECTED_IS_PUBLIC protected=public\n')
if target_config.qscintilla_is_dll:
pro.write('DEFINES += QSCINTILLA_DLL\n')
pro.write('INCLUDEPATH += %s\n' % target_config.qsci_inc_dir)
pro.write('INCLUDEPATH += %s\n' % target_config.py_inc_dir)
if target_config.py_inc_dir != target_config.sip_inc_dir:
pro.write('INCLUDEPATH += %s\n' % target_config.sip_inc_dir)
pro.write('LIBS += -L%s -lqscintilla2\n' % target_config.qsci_lib_dir)
if not opts.static:
pro.write('''
win32 {
QMAKE_POST_LINK = $(COPY_FILE) $(DESTDIR_TARGET) $$PY_MODULE
} else {
QMAKE_POST_LINK = $(COPY_FILE) $(TARGET) $$PY_MODULE
}
macx {
QMAKE_LFLAGS += "-undefined dynamic_lookup"
QMAKE_POST_LINK = $$QMAKE_POST_LINK$$escape_expand(\\\\n\\\\t)$$quote(install_name_tool -change libqscintilla2.%s.dylib %s/libqscintilla2.%s.dylib $$PY_MODULE)
}
''' % (QSCI_API_MAJOR, target_config.qsci_lib_dir, QSCI_API_MAJOR))
pro.write('\n')
pro.write('TARGET = Qsci\n')
pro.write('HEADERS = sipAPIQsci.h\n')
pro.write('SOURCES =')
for s in glob.glob('*.cpp'):
pro.write(' \\\n %s' % s)
pro.write('\n')
pro.close()
def check_sip(target_config):
""" Check that the version of sip is good enough. target_config is the
target configuration.
"""
if target_config.sip is None:
error(
"Make sure you have a working sip on your PATH or use the "
"--sip argument to explicitly specify a working sip.")
pipe = os.popen(' '.join([target_config.sip, '-V']))
for l in pipe:
version_str = l.strip()
break
else:
error("'%s -V' did not generate any output." % target_config.sip)
pipe.close()
if 'snapshot' not in version_str:
version = version_from_string(version_str)
if version is None:
error(
"'%s -V' generated unexpected output: '%s'." % (
target_config.sip, version_str))
min_version = version_from_string(SIP_MIN_VERSION)
if version < min_version:
error(
"This version of QScintilla requires sip %s or later." %
SIP_MIN_VERSION)
inform("sip %s is being used." % version_str)
def version_from_string(version_str):
""" Convert a version string of the form m.n or m.n.o to an encoded version
number (or None if it was an invalid format). version_str is the version
string.
"""
parts = version_str.split('.')
if not isinstance(parts, list):
return None
if len(parts) == 2:
parts.append('0')
if len(parts) != 3:
return None
version = 0
for part in parts:
try:
v = int(part)
except ValueError:
return None
version = (version << 8) + v
return version
def main(argv):
""" Create the configuration module module. argv is the list of command
line arguments.
"""
# Create the default target configuration.
target_config = TargetConfiguration()
# Parse the command line.
p = create_optparser(target_config)
opts, args = p.parse_args()
if args:
p.print_help()
sys.exit(2)
# Query qmake for the basic configuration information.
target_config.get_qt_configuration(opts)
# Update the target configuration.
if opts.config_file is not None:
target_config.from_configuration_file(opts.config_file)
else:
target_config.from_introspection(opts.pyqt_package)
target_config.override_defaults(opts)
# Check SIP is new enough.
check_sip(target_config)
# Check for QScintilla.
check_qscintilla(target_config)
# Tell the user what's been found.
inform_user(target_config)
# Generate the code.
generate_code(target_config, opts)
###############################################################################
# The script starts here.
###############################################################################
if __name__ == '__main__':
try:
main(sys.argv)
except SystemExit:
raise
except:
sys.stderr.write(
"""An internal error occured. Please report all the output from the program,
including the following traceback, to support@riverbankcomputing.com.
""")
raise
|
gpl-3.0
| 5,562,037,454,009,413,000
| 32.691525
| 202
| 0.583325
| false
| 3.79496
| true
| false
| false
|
sassoftware/mint
|
mint/django_rest/rbuilder/platforms/models.py
|
1
|
5423
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.db import models
from mint.django_rest.rbuilder import modellib
from mint.django_rest.rbuilder.modellib import basemodels # hack, because of modellib in Platform
import sys
from xobj import xobj
from mint.django_rest.deco import D
## TODO: Change SyntheticFields to correct type (mostly CharFields/BooleanFields/FK's)
## once the schema is updated (if need be). Some of the models are listed as
## abstract as they lack the necessary tables in the db -- and some of the fields on
## those models are temporarily synthetic because we can't have FK's to abstract models.
class Platforms(modellib.Collection):
class Meta:
abstract = True
_xobj = xobj.XObjMetadata(tag='platforms')
list_fields = ['platform']
class Platform(modellib.XObjIdModel):
class Meta:
db_table = 'platforms'
ordering = [ 'platform_id' ]
_xobj = xobj.XObjMetadata(tag='platform')
_MODE_CHOICES = (('manual', 'manual'), ('auto', 'auto'))
platform_id = D(models.AutoField(primary_key=True, db_column='platformid'), 'ID of the platform')
label = D(models.CharField(max_length=1026, unique=True), 'Platform label, must be unique')
mode = D(models.CharField(max_length=1026, default='manual', choices=_MODE_CHOICES),
'Charfield, defaults to "manual"')
enabled = D(models.IntegerField(default=1), 'Is enabled, defaults to integer 1')
project = D(modellib.DeferredForeignKey('projects.Project', db_column='projectid', null=True),
'Project attached to the platform, cannot be null')
platform_name = D(models.CharField(max_length=1026, db_column='platformname'), 'Name of the platform')
configurable = D(models.BooleanField(default=False), 'Boolean, defaults to False')
abstract = D(models.BooleanField(default=False), 'Boolean, defaults to False')
is_from_disk = D(models.BooleanField(default=False, db_column='isfromdisk'), 'Boolean, defaults to False')
hidden = D(models.BooleanField(default=False), 'Boolean, defaults to False')
upstream_url = D(models.TextField(), "Upstream repository URL used when creating external project for this platform")
time_refreshed = D(basemodels.DateTimeUtcField(auto_now_add=True),
'Time at which the platform was refreshed') # hack, modellib keeps evaluating to None
# SyntheticFields -- fields with no column in the db
# most of these are deferred fk's, M2M's, or CharFields in the old code
platform_trove_name = modellib.SyntheticField() # charfield
repository_host_name = modellib.SyntheticField() # charfield
repository_api = modellib.SyntheticField(modellib.HrefField()) # genuine synthetic field
product_version = modellib.SyntheticField() # fk
platform_versions = modellib.SyntheticField(modellib.HrefField()) # fk, is this different from product_version ?
platform_usage_terms = modellib.SyntheticField() # charfield
mirror_permission = modellib.SyntheticField() # boolean
platform_type = modellib.SyntheticField() # charfield
load = modellib.SyntheticField() # fk
image_type_definitions = modellib.SyntheticField() # fk
platform_status = modellib.SyntheticField() # fk
is_platform = modellib.SyntheticField() # booleanfield
def computeSyntheticFields(self, sender, **kwargs):
# Platform has yet to be enabled.
if self.project is None:
return
self._computeRepositoryAPI()
self._computePlatformVersions()
def _computeRepositoryAPI(self):
self.repository_api = modellib.HrefField(
href='/repos/%s/api' % self.project.short_name,
)
def _computePlatformVersions(self):
self.platform_versions = modellib.HrefField(
href='/api/platforms/%s/platformVersions' % self.platform_id,
)
class PlatformVersions(modellib.Collection):
class Meta:
abstract = True
list_fields = ['platform_version']
class PlatformVersion(modellib.XObjIdModel):
class Meta:
abstract = True
name = D(models.CharField(max_length=1026), 'Platform version name')
version = D(models.CharField(max_length=1026), 'Is the platform version')
revision = D(models.CharField(max_length=1026), 'Is the platform revision')
label = models.CharField(max_length=1026)
ordering = D(models.DecimalField(), 'Ordering of the version, is a decimal')
class PlatformBuildTemplates(modellib.Collection):
class Meta:
abstract = True
list_fields = ['platform_build_template']
class PlatformBuildTemplate(modellib.XObjIdModel):
class Meta:
abstract = True
pass
for mod_obj in sys.modules[__name__].__dict__.values():
if hasattr(mod_obj, '_xobj'):
if mod_obj._xobj.tag:
modellib.type_map[mod_obj._xobj.tag] = mod_obj
|
apache-2.0
| -8,816,140,235,265,148,000
| 40.083333
| 121
| 0.702379
| false
| 3.873571
| false
| false
| false
|
pbs/django-cms
|
cms/utils/page.py
|
1
|
3242
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db.models import Q
from django.core.exceptions import ValidationError
import re
APPEND_TO_SLUG = "-copy"
COPY_SLUG_REGEX = re.compile(r'^.*-copy(?:-(\d+)*)?$')
def is_valid_page_slug(page, parent, lang, slug, site, path=None):
"""Validates given slug depending on settings.
"""
from cms.models import Title
# Exclude the page with the publisher_state == page.PUBLISHER_STATE_DELETE
qs = Title.objects.filter(page__site=site)
if page.id:
qs = qs.exclude(
Q(page=page) |
Q(page=page.publisher_public) |
Q(page__publisher_state=page.PUBLISHER_STATE_DELETE)
)
if settings.i18n_installed:
qs = qs.filter(language=lang)
if not settings.CMS_FLAT_URLS:
if parent:
if parent.is_home():
qs = qs.filter(Q(page__parent=parent) |
Q(page__parent__isnull=True))
else:
qs = qs.filter(page__parent=parent)
else:
qs = qs.filter(page__parent__isnull=True)
if page.pk:
qs = qs.exclude(language=lang, page=page)
## Check for slugs
if qs.filter(slug=slug).count():
return False
## Check for path
if path and qs.filter(path=path).count():
return False
return True
def get_available_slug(title, new_slug=None):
"""Smart function generates slug for title if current title slug cannot be
used. Appends APPEND_TO_SLUG to slug and checks it again.
(Used in page copy function)
Returns: slug
"""
rewrite_slug = False
slug = new_slug or title.slug
# We need the full path for the title to check for conflicting urls
title.slug = slug
title.update_path()
path = title.path
# This checks for conflicting slugs/overwrite_url, for both published and unpublished pages
# This is a simpler check than in page_resolver.is_valid_url which
# takes into account actualy page URL
if not is_valid_page_slug(title.page, title.page.parent, title.language, slug, title.page.site, path):
if title.has_url_overwrite and is_valid_page_slug(title.page, title.page.parent, title.language, slug, title.page.site, None):
# The title has an overwrite url so a slug change will not change the path and
# the validation fails only because the path already exists.
return slug
# add nice copy attribute, first is -copy, then -copy-2, -copy-3, ....
match = COPY_SLUG_REGEX.match(slug)
if match:
try:
next = int(match.groups()[0]) + 1
slug = "-".join(slug.split('-')[:-1]) + "-%d" % next
except TypeError:
slug = slug + "-2"
else:
slug = slug + APPEND_TO_SLUG
return get_available_slug(title, slug)
else:
return slug
def check_title_slugs(page):
"""Checks page title slugs for duplicity if required, used after page move/
cut/paste.
"""
for title in page.title_set.all():
old_slug = title.slug
title.slug = get_available_slug(title)
if title.slug != old_slug:
title.save()
|
bsd-3-clause
| -1,183,574,578,454,097,700
| 34.23913
| 134
| 0.607958
| false
| 3.747977
| false
| false
| false
|
yothenberg/mcxapi
|
mcxapi/api.py
|
1
|
20155
|
import logging
import requests
import re
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from datetime import datetime, timezone, timedelta
from collections import namedtuple
from anytree import RenderTree, NodeMixin
from .exceptions import McxNetworkError, McxParsingError
Inbox = namedtuple('Inbox', 'ids fieldnames cases')
def ordinal(n):
if 10 <= n % 100 < 20:
return str(n) + 'th'
else:
return str(n) + {1: 'st', 2: 'nd', 3: 'rd'}.get(n % 10, "th")
def parse_date(date):
# Weird date format /Date(milliseconds-since-epoch-+tzoffset)/
# /Date(1486742990423-0600)/
# /Date(1486664366563+0100)/
r = re.compile(r'/Date\((\d+)([-+])(\d{2,2})(\d{2,2})\)/')
m = r.match(date)
if m is None:
return "Unknown Date Format"
else:
milliseconds, sign, tzhours, tzminutes = m.groups()
seconds = int(milliseconds) / 1000.0
sign = -1 if sign == '-' else 1
tzinfo = timezone(sign * timedelta(hours=int(tzhours), minutes=int(tzminutes)))
return datetime.fromtimestamp(seconds, tzinfo).strftime('%Y-%m-%d %H:%M%z')
class McxApi:
BASE_URL = "https://{}.mcxplatform.de/CaseManagement.svc/{}"
TIMEOUT = 45
RETRY_COUNT = 3
PASSWORD_KEY = "password"
TOKEN_KEY = "token"
PAGE_SIZE = 500
PAGES = 199
def __init__(self, instance, company, user, password, headers=None, pool_connections=50):
self.instance = instance
self.company = company
self.user = user
self.password = password
self.session = requests.Session()
# 500 Internal Service Error
# 501 Not Implemented
# 502 Bad Gateway
# 503 Service Unavailable
# 504 Gateway Timeout
retries = Retry(total=self.RETRY_COUNT, backoff_factor=1, status_forcelist=[500, 501, 502, 503, 504], method_whitelist=['GET', 'POST'])
adapter = HTTPAdapter(pool_connections=pool_connections, pool_maxsize=pool_connections, max_retries=retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.headers = headers
self.token = None
print("HTTP connection timeout: {}, retry count: {}".format(self.TIMEOUT, self.RETRY_COUNT))
def _sanitize_json_for_logging(self, json):
json_copy = json.copy()
if self.PASSWORD_KEY in json_copy:
json_copy[self.PASSWORD_KEY] = "*****"
if self.TOKEN_KEY in json:
json_copy[self.TOKEN_KEY] = "*****"
return json_copy
def _url(self, endpoint):
return self.BASE_URL.format(self.instance, endpoint)
def _post(self, url, params=None, json={}):
if self.token:
json[self.TOKEN_KEY] = self.token
logging.info("POST: url: {} json: {}".format(url, self._sanitize_json_for_logging(json)))
try:
r = self.session.post(url, params=params, json=json, timeout=self.TIMEOUT)
r.raise_for_status()
except requests.exceptions.RequestException as e:
raise McxNetworkError(url, json=self._sanitize_json_for_logging(json)) from e
return r.json()
def auth(self):
url = self._url("authenticate")
payload = {'userName': self.user, self.PASSWORD_KEY: self.password, 'companyName': self.company}
json = self._post(url, json=payload)
result = json["AuthenticateResult"]
if "token" in result:
self.token = result["token"]
def get_case_inbox(self):
""" Fetches active cases assigned to the user
"""
case_ids = []
fieldnames = []
cases = []
url = self._url("getMobileCaseInboxItems")
# Fetches 50 at a time up to a maximum of 100,000 cases
for p in range(0, self.PAGES):
start_count = len(case_ids)
payload = {'startPage': p, 'pageSize': self.PAGE_SIZE}
print("Fetching {} {} case_ids from inbox".format(ordinal(p + 1), self.PAGE_SIZE))
json = self._post(url, json=payload)
self.parse_case_inbox(json, case_ids, fieldnames, cases)
if len(case_ids) == start_count:
break
fieldnames.sort()
return Inbox(ids=case_ids, fieldnames=fieldnames, cases=cases)
def parse_case_inbox(self, json, case_ids, fieldnames, cases):
rows = json["GetMobileCaseInboxItemsResult"]["caseMobileInboxData"]["Rows"]
try:
for row in rows:
case = {}
case_id = None
row["Inbox Owner"] = self.user
for key, val in row.items():
# special case for the nested list of n columns
if key == "Columns":
for column in val:
column_name = column["ColumnName"]
if column_name not in fieldnames:
fieldnames.append(column_name)
case[column_name] = column["ColumnValue"]
else:
if key not in fieldnames:
fieldnames.append(key)
if key == "CaseId":
case_id = val
case[key] = val
if case_id not in case_ids:
# Dedupes the cases in case the same case_id is exported multiple times because of paging
case_ids.append(case_id)
cases.append(case)
except Exception as e:
raise McxParsingError(json, "Unable to parse inbox") from e
def get_case(self, case_id):
""" Fetches detailed information about a case
"""
url = self._url("getCaseView")
payload = {'caseId': case_id}
json = self._post(url, json=payload)
try:
case = Case(json["GetCaseViewResult"])
except Exception as e:
raise McxParsingError(json, "Unable to parse case {}".format(case_id)) from e
return case
class Case:
""" A Case
"""
def __init__(self, case_view):
values = case_view["viewValues"]
self.case_id = values["CaseId"]
self.alert_name = values["AlertName"]
self.owner = values["OwnerFullName"]
self.time_to_close = values["TimeToCloseDisplay"]
self.time_to_close_goal = values["TimeToCloseGoalDisplay"]
self.time_to_respond = values["TimeToRespondDisplay"]
self.time_to_respond_goal = values["TimeToRespondGoalDisplay"]
self.status_id = values["CaseStatusId"]
self.priority_id = values["CasePriorityId"]
self.respondent_id = values["RespondentId"]
self.survey_id = values["SurveyId"]
self.survey_name = values["SurveyName"]
self.status = ""
self.priority = ""
self.activity_notes = []
self.items = []
self.source_responses = []
items = case_view["caseView"]["CaseViewItems"]
self._parse_items(items)
self._parse_item_answers(values["ItemAnswers"])
self._parse_root_cause_answers(values["CaseRootCauseAnswers"])
self._parse_activity_notes(values["ActivityNotes"])
self._parse_source_responses(values["SourceResponses"])
self.status = self._lookup_item_dropdown_value(Item.STATUS, self.status_id)
self.priority = self._lookup_item_dropdown_value(Item.PRIORITY, self.priority_id)
def __str__(self):
items = "\n".join([str(a) for a in self.items])
activity_notes = "\n".join([str(n) for n in self.activity_notes])
source_responses = "\n".join([str(s) for s in self.source_responses])
return "id:{} owner:{} status:{} priority:{}\nACTIVITY NOTES:\n{}\n\nITEMS:\n{}\n\nRESPONSES:\n{}".format(self.case_id,
self.owner,
self.status,
self.priority,
activity_notes,
items,
source_responses)
@property
def dict(self):
""" Returns a dictionary representation of the standard properties, source_responses, and items with an answer
"""
COL_CASE_ID = "Case ID"
COL_OWNER = "Owner"
COL_TIME_TO_CLOSE = "Time To Close"
COL_TIME_TO_CLOSE_GOAL = "Time to Goal Close"
COL_TIME_TO_RESPOND = "Time To Respond"
COL_TIME_TO_RESPOND_GOAL = "Time To Goal Respond"
COL_STATUS = "Status"
COL_PRIORITY = "Priority"
COL_RESPONDEND_ID = "Respondent Id"
COL_SURVEY_ID = "Survey Id"
COL_SURVEY_NAME = "Survey Name"
case = {COL_CASE_ID: self.case_id,
COL_OWNER: self.owner,
COL_TIME_TO_CLOSE: self.time_to_close,
COL_TIME_TO_CLOSE_GOAL: self.time_to_close_goal,
COL_TIME_TO_RESPOND: self.time_to_respond,
COL_TIME_TO_RESPOND_GOAL: self.time_to_respond_goal,
COL_STATUS: self.status,
COL_PRIORITY: self.priority,
COL_RESPONDEND_ID: self.respondent_id,
COL_SURVEY_ID: self.survey_id,
COL_SURVEY_NAME: self.survey_name}
for item in self.items:
if item.answer or item.root_cause_answers:
case[item.case_item_text] = item.display_answer
# Activity notes are exported one per column
i = 1
COL_ACTIVITY_NOTES = "Activity Note {}"
for activity_note in self.activity_notes:
case[COL_ACTIVITY_NOTES.format(i)] = "{} @ {}: {}".format(activity_note.full_name,
parse_date(activity_note.date),
activity_note.note)
i += 1
for source_response in self.source_responses:
# sometimes the source responses don't have a question text so we use the case_item_id for the column header
if source_response.question_text:
case[source_response.question_text] = source_response.answer_text
else:
case[str(source_response.case_item_id)] = source_response.answer_text
return case
def _lookup_item_dropdown_value(self, case_question_type_id, value):
item = self._find_item_by_type(case_question_type_id)
if item:
dropdown = item._find_dropdown(value)
if dropdown:
return dropdown.text
return None
def _parse_items(self, items):
for item_dict in items:
item = Item(item_dict)
self.items.append(item)
def _parse_activity_notes(self, activity_notes):
for note_dict in activity_notes:
self.activity_notes.append(ActivityNote(note_dict))
def _parse_item_answers(self, item_answers):
for item_answer_dict in item_answers:
item = self._find_item(item_answer_dict["CaseItemId"])
if item:
item.add_answer(item_answer_dict)
def _parse_root_cause_answers(self, root_cause_answers):
for root_cause_answer_dict in root_cause_answers:
item = self._find_item(root_cause_answer_dict["CaseItemId"])
if item:
item.add_root_cause_answer(root_cause_answer_dict)
def _parse_source_responses(self, source_responses):
for source_response_dict in source_responses:
self.source_responses.append(SourceResponse(source_response_dict))
def _find_item(self, case_item_id):
return next((x for x in self.items if x.case_item_id == case_item_id), None)
def _find_item_by_type(self, case_question_type_id):
return next((x for x in self.items if x.case_question_type_id == case_question_type_id), None)
class Item:
def __init__(self, values):
self.case_item_id = values["CaseItemId"]
self.case_question_type_id = values["CaseQuestionTypeId"]
self.case_item_text = values["CaseItemText"]
self.dropdown_values = []
self.root_cause_values = []
self.root_cause_answers = []
self.answer = None
self.display_answer = ""
self._parse_dropdown_values(values["DropdownValues"])
self._parse_root_cause_values(values["RootCauseValues"])
self._build_root_cause_tree()
def __str__(self):
dropdowns = ", ".join([str(d) for d in self.dropdown_values])
root_causes = self._draw_root_cause_tree()
root_causes_answers = self._draw_root_cause_answers()
return """\n==========\nitem_id:{} question_type: {} text:{} display:{}\n
dropdown:\n{}\n
rootcauses:\n{}\n
rootcause_answers:\n{}\n
answer:\n{}""".format(self.case_item_id,
self.case_question_type_id,
self.case_item_text,
self.display_answer,
dropdowns,
root_causes,
root_causes_answers,
self.answer)
def _draw_root_cause_tree(self):
roots = [r for r in self.root_cause_values if r.is_root is True]
tree = ""
for root in roots:
for pre, _, node in RenderTree(root):
tree = "{}{}{}\n".format(tree, pre, node.root_cause_name)
return tree
def _draw_root_cause_answers(self):
answers = ""
leaf_answers = [a for a in self.root_cause_answers if a.root_cause.is_leaf]
for leaf_answer in leaf_answers:
leaf = leaf_answer.root_cause.root_cause_name
ancestors = " > ".join([c.root_cause_name for c in leaf_answer.root_cause.anchestors])
answers = "{}{} > {}\n".format(answers, ancestors, leaf)
return answers
def _parse_dropdown_values(self, dropdown_values):
for dropdown_dict in dropdown_values:
dropdown = Dropdown(dropdown_dict)
self.dropdown_values.append(dropdown)
def _parse_root_cause_values(self, root_cause_values):
for root_cause_dict in root_cause_values:
root_cause = RootCause(root_cause_dict)
self.root_cause_values.append(root_cause)
def _build_root_cause_tree(self):
# assign parents
for root_cause in self.root_cause_values:
if root_cause.parent_tree_id != "#":
root_cause.parent = self._find_root_cause(root_cause.parent_tree_id)
def _find_root_cause(self, tree_id):
return next((r for r in self.root_cause_values if r.tree_id == tree_id), None)
# case_question_type_ids
CASE_ID = 1
PROGRAM_NAME = 2
CREATED_DATE = 3
STATUS = 4
PRIORITY = 5
ROOT_CAUSE = 6
ACTIVITY_NOTES = 7
OWNER = 9
ALERT_NAME = 10
SHORT_TEXT_BOX = 11
LONG_TEXT_BOX = 12
DROPDOWN = 13
SURVEY_EXCERPT = 15
CLOSED_DATE = 16
SURVEY_NAME = 17
TIME_TO_RESPOND = 18
TIME_TO_CLOSE = 19
EXPLANATION_TEXT = 20
DIVIDER = 21
WATCHERS = 22
LAST_MODIFIED = 25
DATE_PICKER = 26
NUMERIC = 27
def _find_dropdown(self, value):
return next((x for x in self.dropdown_values if x.id == value), None)
def add_answer(self, values):
self.answer = Answer(values)
if self.answer.is_empty:
self.display_value = ""
elif self.case_question_type_id in [self.SHORT_TEXT_BOX, self.LONG_TEXT_BOX, self.DATE_PICKER]:
self.display_answer = self.answer.text_value
elif self.case_question_type_id == self.NUMERIC:
self.display_answer = self.answer.double_value
elif self.case_question_type_id == self.DROPDOWN:
dropdown = self._find_dropdown(self.answer.int_value)
if dropdown:
self.display_answer = dropdown.text
def add_root_cause_answer(self, values):
answer = RootCauseAnswer(values)
answer.root_cause = self._find_root_cause(answer.tree_id)
self.root_cause_answers.append(answer)
self.display_answer = self._draw_root_cause_answers()
class ActivityNote:
def __init__(self, values):
self.note = values["ActivityNote"]
self.date = values["ActivityNoteDate"]
self.full_name = values["FullName"]
def __str__(self):
return "{}@{}: {}".format(self.full_name, self.date, self.note)
class Dropdown:
def __init__(self, values):
self.id = values["Id"]
self.text = values["Text"]
def __str__(self):
return "{}:{}".format(self.id, self.text)
class RootCause(NodeMixin):
def __init__(self, values):
self.case_item_id = values["CaseItemId"]
self.case_root_cause_id = values["CaseRootCauseId"]
self.root_cause_name = values["RootCauseName"]
self.parent_tree_id = values["ParentTreeId"]
self.tree_id = values["TreeId"]
self.parent = None
def __str__(self):
return "item_id:{} root_cause_id:{} root_cause_name:{} parent_tree_id:{} tree_id:{}".format(self.case_item_id,
self.case_root_cause_id,
self.root_cause_name,
self.parent_tree_id,
self.tree_id)
class RootCauseAnswer:
def __init__(self, values):
self.case_item_id = values["CaseItemId"]
self.case_root_cause_id = values["CaseRootCauseId"]
self.tree_id = values["TreeId"]
self.root_cause = None
def __str__(self):
return "item_id:{} root_cause_id:{} tree_id:{}".format(self.case_item_id, self.case_root_cause_id, self.tree_id)
class Answer:
def __init__(self, values):
self.case_item_answer_id = values["CaseItemAnswerId"]
self.case_item_id = values["CaseItemId"]
self.case_question_type_id = values["CaseQuestionTypeId"]
self.is_empty = values["IsEmpty"]
self.bool_value = values["BoolValue"]
self.double_value = values["DoubleValue"]
self.int_value = values["IntValue"]
self.text_value = values["TextValue"]
self.time_value = values["TimeValue"]
def __str__(self):
return "id:{} question_type:{} bool:{} double:{} int:{} text:{} time:{}".format(self.case_item_answer_id,
self.case_question_type_id,
self.bool_value,
self.double_value,
self.int_value,
self.text_value,
self.time_value)
class SourceResponse:
def __init__(self, values):
self.case_item_id = values["Key"]
self.question_text = values["Value"]["QuestionText"]
self.answer_text = values["Value"]["AnswerText"]
def __str__(self):
return "item_id:{} text:{} answer:{}".format(self.case_item_id, self.question_text, self.answer_text)
|
mit
| 4,570,036,573,036,988,400
| 39.149402
| 143
| 0.541851
| false
| 3.972211
| false
| false
| false
|
clebersfonseca/google-python-exercises
|
basic/list1.py
|
1
|
3265
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
t = 0
for w in words:
if len(w) >= 2 and w[0] == w[-1]:
t += 1
return t
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
listx = []
listall = []
for w in words:
if w[0] == 'x':
listx.append(w)
else:
listall.append(w)
listx.sort()
listx.extend(sorted(listall))
return listx
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
def get_last(t):
return t[-1]
return sorted(tuples, key=get_last)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print('match_ends')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print()
print('front_x')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print()
print('sort_last')
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
apache-2.0
| 1,280,166,186,578,082,600
| 29.514019
| 79
| 0.580398
| false
| 3.037209
| true
| false
| false
|
NicovincX2/Python-3.5
|
Physique/Mouvement/Dynamique/Systèmes oscillants/Oscillateur harmonique/oscillateur_harmonique_periode_animation.py
|
1
|
2155
|
# -*- coding: utf-8 -*-
import os
'''
Simple résolution numérique de l'équation d'un oscillateur harmonique pour
illustrer l'isochronisme des oscillations quelle que soit l'amplitude de départ
avec animation au cours du temps.
'''
import numpy as np # Pour np.linspace
import scipy as sp # Simple alias usuel
import scipy.integrate # Pour l'intégration
import matplotlib.pyplot as plt # Pour les dessins
from matplotlib import animation # Pour l'animation progressive
omega0 = 1 # On définit la pulsation propre
def equadiff(y, t):
'''Renvoie l'action du système dx/dt = vx et dvx/dt = -omega0**2 * x
soit bien l'oscillateur harmonique x'' + omega0**2 * x = 0'''
x, vx = y # y contient position et vitesse
return [vx, - omega0**2 * x] # On renvoie un doublet pour [dx/dt,dvx/dt]
nb_CI = 10 # Nombre de conditions initiales explorées
t = np.linspace(0, 10, 1000) # Le temps total d'intégration
x0 = np.linspace(-5, 5, nb_CI) # Les positions initiales choisies
v0 = [0] * nb_CI # Les vitesses initiales choisies
oscillateurs = []
lignes = []
fig = plt.figure(figsize=(10, 8))
for i in range(nb_CI): # Pour chaque condition initiale
# L'intégration proprement dite
sol = sp.integrate.odeint(equadiff, [x0[i], v0[i]], t)
x = sol[:, 0] # Récupération de la position
l, = plt.plot(t, x) # et affichage
oscillateurs.append(x)
lignes.append(l)
def init():
for l in lignes:
l.set_xdata([])
l.set_ydata([])
def animate(i):
for l, x in zip(lignes, oscillateurs):
l.set_ydata(x[:i])
l.set_xdata(t[:i])
# Il ne reste que le traitement cosmétique
plt.title('Oscillateur harmonique pour differentes amplitudes initiales')
plt.ylabel('Position (unite arbitraire)')
plt.xlabel('Temps (unite arbitraire)')
anim = animation.FuncAnimation(fig, animate, len(
t), interval=20, init_func=init, blit=False)
plt.show()
# plt.savefig('PNG/S01_oscillateur_harmonique_periode.png')
os.system("pause")
|
gpl-3.0
| -7,314,351,340,730,637,000
| 29.169014
| 79
| 0.634921
| false
| 2.86747
| false
| false
| false
|
dufferzafar/critiquebrainz
|
critiquebrainz/frontend/apis/relationships/artist.py
|
1
|
3750
|
"""
Relationship processor for artist entity.
"""
from urlparse import urlparse
from flask_babel import gettext
import urllib
def process(artist):
"""Handles processing supported relation lists."""
if 'artist-relation-list' in artist and artist['artist-relation-list']:
artist['band-members'] = _artist(artist['artist-relation-list'])
if 'url-relation-list' in artist and artist['url-relation-list']:
artist['external-urls'] = _url(artist['url-relation-list'])
return artist
def _artist(list):
"""Processor for Artist-Artist relationship.
:returns Band members.
"""
band_members = []
for relation in list:
if relation['type'] == 'member of band':
band_members.append(relation)
return band_members
def _url(list):
"""Processor for Artist-URL relationship."""
basic_types = {
'wikidata': {'name': gettext('Wikidata'), 'icon': 'wikidata-16.png', },
'discogs': {'name': gettext('Discogs'), 'icon': 'discogs-16.png', },
'allmusic': {'name': gettext('Allmusic'), 'icon': 'allmusic-16.png', },
'bandcamp': {'name': gettext('Bandcamp'), 'icon': 'bandcamp-16.png', },
'official homepage': {'name': gettext('Official homepage'), 'icon': 'home-16.png', },
'BBC Music page': {'name': gettext('BBC Music'), },
}
external_urls = []
for relation in list:
if relation['type'] in basic_types:
external_urls.append(dict(relation.items() + basic_types[relation['type']].items()))
else:
try:
target = urlparse(relation['target'])
if relation['type'] == 'lyrics':
external_urls.append(dict(
relation.items() + {
'name': gettext('Lyrics'),
'disambiguation': target.netloc,
}.items()))
elif relation['type'] == 'wikipedia':
external_urls.append(dict(
relation.items() + {
'name': gettext('Wikipedia'),
'disambiguation': target.netloc.split('.')[0] + ':' +
urllib.unquote(target.path.split('/')[2]).decode('utf8').replace("_", " "),
'icon': 'wikipedia-16.png',
}.items()))
elif relation['type'] == 'youtube':
path = target.path.split('/')
if path[1] == 'user' or path[1] == 'channel':
disambiguation = path[2]
else:
disambiguation = path[1]
external_urls.append(dict(
relation.items() + {
'name': gettext('YouTube'),
'disambiguation': disambiguation,
'icon': 'youtube-16.png',
}.items()))
elif relation['type'] == 'social network':
if target.netloc == 'twitter.com':
external_urls.append(dict(
relation.items() + {
'name': gettext('Twitter'),
'disambiguation': target.path.split('/')[1],
'icon': 'twitter-16.png',
}.items()))
else:
# TODO(roman): Process other types here
pass
except Exception as e: # FIXME(roman): Too broad exception clause.
# TODO(roman): Log error.
pass
external_urls.sort()
return external_urls
|
gpl-2.0
| 5,770,933,709,101,641,000
| 42.103448
| 121
| 0.476
| false
| 4.62963
| false
| false
| false
|
electrumalt/electrum-ixc
|
gui/qt/transaction_dialog.py
|
1
|
7687
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, time, datetime, re, threading
from electrum_ixc.i18n import _, set_language
from electrum_ixc.util import print_error, print_msg
import os.path, json, ast, traceback
import shutil
import StringIO
try:
import PyQt4
except Exception:
sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'")
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_ixc import transaction
from electrum_ixc.plugins import run_hook
from util import MyTreeWidget
from util import MONOSPACE_FONT
class TxDialog(QDialog):
def __init__(self, tx, parent):
self.tx = tx
tx_dict = tx.as_dict()
self.parent = parent
self.wallet = parent.wallet
QDialog.__init__(self)
self.setMinimumWidth(600)
self.setWindowTitle(_("Transaction"))
self.setModal(1)
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(_("Transaction ID:")))
self.tx_hash_e = QLineEdit()
self.tx_hash_e.setReadOnly(True)
vbox.addWidget(self.tx_hash_e)
self.status_label = QLabel()
vbox.addWidget(self.status_label)
self.date_label = QLabel()
vbox.addWidget(self.date_label)
self.amount_label = QLabel()
vbox.addWidget(self.amount_label)
self.fee_label = QLabel()
vbox.addWidget(self.fee_label)
self.add_io(vbox)
vbox.addStretch(1)
self.buttons = buttons = QHBoxLayout()
vbox.addLayout( buttons )
buttons.addStretch(1)
self.sign_button = b = QPushButton(_("Sign"))
b.clicked.connect(self.sign)
buttons.addWidget(b)
self.broadcast_button = b = QPushButton(_("Broadcast"))
b.clicked.connect(lambda: self.parent.broadcast_transaction(self.tx))
b.hide()
buttons.addWidget(b)
self.save_button = b = QPushButton(_("Save"))
b.clicked.connect(self.save)
buttons.addWidget(b)
cancelButton = QPushButton(_("Close"))
cancelButton.clicked.connect(lambda: self.done(0))
buttons.addWidget(cancelButton)
cancelButton.setDefault(True)
b = QPushButton()
b.setIcon(QIcon(":icons/qrcode.png"))
b.clicked.connect(self.show_qr)
buttons.insertWidget(1,b)
run_hook('transaction_dialog', self)
self.update()
def show_qr(self):
text = self.tx.raw.decode('hex')
try:
self.parent.show_qrcode(text, 'Transaction')
except Exception as e:
self.show_message(str(e))
def sign(self):
self.parent.sign_raw_transaction(self.tx)
self.update()
def save(self):
name = 'signed_%s.txn' % (self.tx.hash()[0:8]) if self.tx.is_complete() else 'unsigned.txn'
fileName = self.parent.getSaveFileName(_("Select where to save your signed transaction"), name, "*.txn")
if fileName:
with open(fileName, "w+") as f:
f.write(json.dumps(self.tx.as_dict(),indent=4) + '\n')
self.show_message(_("Transaction saved successfully"))
def update(self):
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(self.tx)
if self.wallet.can_sign(self.tx):
self.sign_button.show()
else:
self.sign_button.hide()
if self.tx.is_complete():
status = _("Signed")
tx_hash = self.tx.hash()
if tx_hash in self.wallet.transactions.keys():
conf, timestamp = self.wallet.verifier.get_confirmations(tx_hash)
if timestamp:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
time_str = 'pending'
status = _("%d confirmations")%conf
self.broadcast_button.hide()
else:
time_str = None
conf = 0
self.broadcast_button.show()
else:
s, r = self.tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed (%d/%d)'%(s,r))
time_str = None
self.broadcast_button.hide()
tx_hash = 'unknown'
self.tx_hash_e.setText(tx_hash)
self.status_label.setText(_('Status:') + ' ' + status)
if time_str is not None:
self.date_label.setText(_("Date: %s")%time_str)
self.date_label.show()
else:
self.date_label.hide()
# if we are not synchronized, we cannot tell
if self.parent.network is None or not self.parent.network.is_running() or not self.parent.network.is_connected():
return
if not self.wallet.up_to_date:
return
if is_relevant:
if is_mine:
if fee is not None:
self.amount_label.setText(_("Amount sent:")+' %s'% self.parent.format_amount(v-fee) + ' ' + self.parent.base_unit())
self.fee_label.setText(_("Transaction fee")+': %s'% self.parent.format_amount(fee) + ' ' + self.parent.base_unit())
else:
self.amount_label.setText(_("Amount sent:")+' %s'% self.parent.format_amount(v) + ' ' + self.parent.base_unit())
self.fee_label.setText(_("Transaction fee")+': '+ _("unknown"))
else:
self.amount_label.setText(_("Amount received:")+' %s'% self.parent.format_amount(v) + ' ' + self.parent.base_unit())
else:
self.amount_label.setText(_("Transaction unrelated to your wallet"))
run_hook('transaction_dialog_update', self)
def add_io(self, vbox):
if self.tx.locktime > 0:
vbox.addWidget(QLabel("LockTime: %d\n" % self.tx.locktime))
vbox.addWidget(QLabel(_("Inputs")))
def format_input(x):
if x.get('is_coinbase'):
return 'coinbase'
else:
_hash = x.get('prevout_hash')
return _hash[0:8] + '...' + _hash[-8:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
lines = map(format_input, self.tx.inputs )
i_text = QTextEdit()
i_text.setFont(QFont(MONOSPACE_FONT))
i_text.setText('\n'.join(lines))
i_text.setReadOnly(True)
i_text.setMaximumHeight(100)
vbox.addWidget(i_text)
vbox.addWidget(QLabel(_("Outputs")))
lines = map(lambda x: x[0] + u'\t\t' + self.parent.format_amount(x[1]) if x[1] else x[0], self.tx.get_outputs())
o_text = QTextEdit()
o_text.setFont(QFont(MONOSPACE_FONT))
o_text.setText('\n'.join(lines))
o_text.setReadOnly(True)
o_text.setMaximumHeight(100)
vbox.addWidget(o_text)
def show_message(self, msg):
QMessageBox.information(self, _('Message'), msg, _('OK'))
|
gpl-3.0
| 436,080,397,216,934,200
| 32.714912
| 136
| 0.589046
| false
| 3.731553
| false
| false
| false
|
fungos/gemuo
|
src/gemuo/engine/relpor.py
|
1
|
1926
|
#
# GemUO
#
# (c) 2005-2012 Max Kellermann <max@duempel.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import re
from twisted.python import log
import uo.packets as p
from gemuo.engine import Engine
tilepic_re = re.compile(r'\{ tilepic \d+ \d+ (\d+) \}')
class RelPorCaptcha(Engine):
"""Responds to the captcha gumps on the Rel Por freeshard."""
def _on_captcha(self, packet):
tiles = []
total = 0
for m in tilepic_re.findall(packet.layout):
value = int(m)
total += value
tiles.append(value)
log.msg("Captcha: " + ','.join(map(hex, tiles)))
if len(tiles) == 0: return
# see which tile id deviates the most
avg = total / len(tiles)
d = map(lambda value: abs(avg - value), tiles)
m = max(zip(d, range(len(d))), key=lambda value: value[0])
# pick this tile
response = m[1]
log.msg("Captcha response: %#x" % tiles[response])
# and send the gump response
self._client.send(p.GumpResponse(serial=packet.serial,
gump_id=packet.gump_id,
button_id=1,
switches=[response]))
def on_packet(self, packet):
if isinstance(packet, p.DisplayGumpPacked) and \
len(packet.text) == 1 and \
'Which of these things is not like the others' in packet.text[0]:
self._on_captcha(packet)
|
gpl-2.0
| 277,350,442,926,236,400
| 32.789474
| 80
| 0.590343
| false
| 3.739806
| false
| false
| false
|
nlsynth/iroha
|
examples/config-examples.py
|
1
|
1102
|
#! /usr/bin/python
# Run this to generate Makefile, then run 'make'
EXAMPLES = {
'minimum' : {'minimum'},
'copy' : {'copy'},
'loop' : {'loop', 'example_common'},
'xorshift' : {'xorshift', 'example_common'},
}
EXAMPLE_OBJS = ['example_common', 'copy', 'loop',
'minimum', 'xorshift']
OUTPUT = 'Makefile'
ofh = open(OUTPUT, 'w')
ofh.write('# Generated by config-examples.py\n\n')
ofh.write('all\t: ' + ' '.join(EXAMPLES) + '\n\n')
ofh.write('clean\t:\n')
ofh.write('\trm -f *.o\n')
ofh.write('\trm -f ' + ' '.join(EXAMPLES) + '\n')
for e in EXAMPLE_OBJS:
ofh.write(e + '.o\t: ' + e + '.cpp\n')
ofh.write('\tg++ -std=c++11 -Wall -g -I../src -c ' + e + '.cpp\n\n')
for k, v in EXAMPLES.iteritems():
objs = []
for o in v:
objs.append(o + '.o')
obj_lst = ' '.join(objs)
ofh.write(k + '\t: ../src/out/Default/obj.target/src/libiroha.a ' + obj_lst + '\n')
ofh.write('\tg++ -o ' + k + ' ' + obj_lst + ' -L../src/out/Default/obj.target/src/ -liroha -lverilog_writer -lnumeric -liroha\n')
print('Generated Makefile. Please run \'make\'')
|
bsd-3-clause
| -4,760,997,667,858,220,000
| 29.611111
| 133
| 0.554446
| false
| 2.568765
| false
| false
| false
|
jbaayen/sympy
|
sympy/thirdparty/pyglet/pyglet/font/__init__.py
|
1
|
21133
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load fonts and render text.
This is a fairly-low level interface to text rendering. Obtain a font using
`load`::
from pyglet import font
arial = font.load('Arial', 14, bold=True, italic=False)
pyglet will load any system-installed fonts. You can add additional fonts
(for example, from your program resources) using `add_file` or
`add_directory`.
Obtain a list of `Glyph` objects for a string of text using the `Font`
object::
text = 'Hello, world!'
glyphs = arial.get_glyphs(text)
The most efficient way to render these glyphs is with a `GlyphString`::
glyph_string = GlyphString(text, glyphs)
glyph_string.draw()
There are also a variety of methods in both `Font` and
`GlyphString` to facilitate word-wrapping.
A convenient way to render a string of text is with a `Text`::
text = Text(font, text)
text.draw()
See the `pyglet.font.base` module for documentation on the base classes used
by this package.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 1493 2007-12-08 09:20:38Z Alex.Holkner $'
import sys
import os
import math
import pyglet
from pyglet.gl import *
from pyglet import window
from pyglet import image
class GlyphString(object):
'''An immutable string of glyphs that can be rendered quickly.
This class is ideal for quickly rendering single or multi-line strings
of text that use the same font. To wrap text using a glyph string,
call `get_break_index` to find the optimal breakpoint for each line,
the repeatedly call `draw` for each breakpoint.
'''
def __init__(self, text, glyphs, x=0, y=0):
'''Create a glyph string.
The `text` string is used to determine valid breakpoints; all glyphs
must have already been determined using
`pyglet.font.base.Font.get_glyphs`. The string
will be positioned with the baseline of the left-most glyph at the
given coordinates.
:Parameters:
`text` : str or unicode
String to represent.
`glyphs` : list of `pyglet.font.base.Glyph`
Glyphs representing `text`.
`x` : float
X coordinate of the left-side bearing of the left-most glyph.
`y` : float
Y coordinate of the baseline.
'''
# Create an interleaved array in GL_T2F_V3F format and determine
# state changes required.
lst = []
texture = None
self.text = text
self.states = []
self.cumulative_advance = [] # for fast post-string breaking
state_from = 0
state_length = 0
for i, glyph in enumerate(glyphs):
if glyph.owner != texture:
if state_length:
self.states.append((state_from, state_length, texture))
texture = glyph.owner
state_from = i
state_length = 0
state_length += 1
t = glyph.tex_coords
lst += [t[0], t[1], t[2], 1.,
x + glyph.vertices[0], y + glyph.vertices[1], 0., 1.,
t[3], t[4], t[5], 1.,
x + glyph.vertices[2], y + glyph.vertices[1], 0., 1.,
t[6], t[7], t[8], 1.,
x + glyph.vertices[2], y + glyph.vertices[3], 0., 1.,
t[9], t[10], t[11], 1.,
x + glyph.vertices[0], y + glyph.vertices[3], 0., 1.]
x += glyph.advance
self.cumulative_advance.append(x)
self.states.append((state_from, state_length, texture))
self.array = (c_float * len(lst))(*lst)
self.width = x
def get_break_index(self, from_index, width):
'''Find a breakpoint within the text for a given width.
Returns a valid breakpoint after `from_index` so that the text
between `from_index` and the breakpoint fits within `width` pixels.
This method uses precomputed cumulative glyph widths to give quick
answer, and so is much faster than
`pyglet.font.base.Font.get_glyphs_for_width`.
:Parameters:
`from_index` : int
Index of text to begin at, or 0 for the beginning of the
string.
`width` : float
Maximum width to use.
:rtype: int
:return: the index of text which will be used as the breakpoint, or
`from_index` if there is no valid breakpoint.
'''
to_index = from_index
if from_index >= len(self.text):
return from_index
if from_index:
width += self.cumulative_advance[from_index-1]
for i, (c, w) in enumerate(
zip(self.text[from_index:],
self.cumulative_advance[from_index:])):
if c in u'\u0020\u200b':
to_index = i + from_index + 1
if c == '\n':
return i + from_index + 1
if w > width:
return to_index
return to_index
def get_subwidth(self, from_index, to_index):
'''Return the width of a slice of this string.
:Parameters:
`from_index` : int
The start index of the string to measure.
`to_index` : int
The end index (exclusive) of the string to measure.
:rtype: float
'''
if to_index <= from_index:
return 0
width = self.cumulative_advance[to_index-1]
if from_index:
width -= self.cumulative_advance[from_index-1]
return width
def draw(self, from_index=0, to_index=None):
'''Draw a region of the glyph string.
Assumes texture state is enabled. To enable the texture state::
from pyglet.gl import *
glEnable(GL_TEXTURE_2D)
:Parameters:
`from_index` : int
Start index of text to render.
`to_index` : int
End index (exclusive) of text to render.
'''
if from_index >= len(self.text) or \
from_index == to_index or \
not self.text:
return
# XXX Safe to assume all required textures will use same blend state I
# think. (otherwise move this into loop)
self.states[0][2].apply_blend_state()
if from_index:
glPushMatrix()
glTranslatef(-self.cumulative_advance[from_index-1], 0, 0)
if to_index is None:
to_index = len(self.text)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glInterleavedArrays(GL_T4F_V4F, 0, self.array)
for state_from, state_length, texture in self.states:
if state_from + state_length < from_index:
continue
state_from = max(state_from, from_index)
state_length = min(state_length, to_index - state_from)
if state_length <= 0:
break
glBindTexture(GL_TEXTURE_2D, texture.id)
glDrawArrays(GL_QUADS, state_from * 4, state_length * 4)
glPopClientAttrib()
if from_index:
glPopMatrix()
class Text(object):
'''Simple displayable text.
This is a convenience class for rendering strings of text. It takes
care of caching the vertices so the text can be rendered every frame with
little performance penalty.
Text can be word-wrapped by specifying a `width` to wrap into. If the
width is not specified, it gives the width of the text as laid out.
:Ivariables:
`x` : int
X coordinate of the text
`y` : int
Y coordinate of the text
'''
_layout_width = None # Width to layout text to
_text_width = 0 # Calculated width of text
_text_height = 0 # Calculated height of text (bottom descender to top
# ascender)
_dirty = False # Flag if require layout
# Alignment constants
#: Align the left edge of the text to the given X coordinate.
LEFT = 'left'
#: Align the horizontal center of the text to the given X coordinate.
CENTER = 'center'
#: Align the right edge of the text to the given X coordinate.
RIGHT = 'right'
#: Align the bottom of the descender of the final line of text with the
#: given Y coordinate.
BOTTOM = 'bottom'
#: Align the baseline of the first line of text with the given Y
#: coordinate.
BASELINE = 'baseline'
#: Align the top of the ascender of the first line of text with the given
#: Y coordinate.
TOP = 'top'
_halign = LEFT
_valign = BASELINE
def __init__(self, font, text='', x=0, y=0, z=0, color=(1,1,1,1),
width=None, halign=LEFT, valign=BASELINE):
'''Create displayable text.
:Parameters:
`font` : `Font`
Font to render the text in.
`text` : str
Initial string to render.
`x` : float
X coordinate of the left edge of the text.
`y` : float
Y coordinate of the baseline of the text. If the text is
word-wrapped, this refers to the first line of text.
`z` : float
Z coordinate of the text plane.
`color` : 4-tuple of float
Color to render the text in. Alpha values can be specified
in the fourth component.
`width` : float
Width to limit the rendering to. Text will be word-wrapped
if necessary.
`halign` : str
Alignment of the text. See `Text.halign` for details.
`valign` : str
Controls positioning of the text based off the y coordinate.
One of BASELINE, BOTTOM, CENTER or TOP. Defaults to BASELINE.
'''
self._dirty = True
self.font = font
self._text = text
self.color = color
self.x = x
self.y = y
self.leading = 0
self._layout_width = width
self._halign = halign
self._valign = valign
def _clean(self):
'''Resolve changed layout'''
# Adding a space to the end of the text simplifies the inner loop
# of the wrapping layout. It ensures there is a breakpoint returned at
# the end of the string (GlyphString cannot guarantee this otherwise
# it would not be useable with styled layout algorithms).
text = self._text + ' '
glyphs = self.font.get_glyphs(text)
self._glyph_string = GlyphString(text, glyphs)
self.lines = []
i = 0
if self._layout_width is None:
self._text_width = 0
while '\n' in text[i:]:
end = text.index('\n', i)
self.lines.append((i, end))
self._text_width = max(self._text_width,
self._glyph_string.get_subwidth(i, end))
i = end + 1
# Discard the artifical appended space.
end = len(text) - 1
if i < end:
self.lines.append((i, end))
self._text_width = max(self._text_width,
self._glyph_string.get_subwidth(i, end))
else:
bp = self._glyph_string.get_break_index(i, self._layout_width)
while i < len(text) and bp > i:
if text[bp-1] == '\n':
self.lines.append((i, bp - 1))
else:
self.lines.append((i, bp))
i = bp
bp = self._glyph_string.get_break_index(i, self._layout_width)
if i < len(text) - 1:
self.lines.append((i, len(text)))
self.line_height = self.font.ascent - self.font.descent + self.leading
self._text_height = self.line_height * len(self.lines)
self._dirty = False
def draw(self):
'''Render the text.
This method makes no assumptions about the projection. Using the
default projection set up by pyglet, coordinates refer to window-space
and the text will be aligned to the window. Other projections can
be used to render text into 3D space.
The OpenGL state is not modified by this method.
'''
if self._dirty:
self._clean()
y = self.y
if self._valign == self.BOTTOM:
y += self.height - self.font.ascent
elif self._valign == self.CENTER:
y += self.height // 2 - self.font.ascent
elif self._valign == self.TOP:
y -= self.font.ascent
glPushAttrib(GL_CURRENT_BIT | GL_ENABLE_BIT)
glEnable(GL_TEXTURE_2D)
glColor4f(*self.color)
glPushMatrix()
glTranslatef(0, y, 0)
for start, end in self.lines:
width = self._glyph_string.get_subwidth(start, end)
x = self.x
align_width = self._layout_width or 0
if self._halign == self.RIGHT:
x += align_width - width
elif self._halign == self.CENTER:
x += align_width // 2 - width // 2
glTranslatef(x, 0, 0)
self._glyph_string.draw(start, end)
glTranslatef(-x, -self.line_height, 0)
glPopMatrix()
glPopAttrib()
def _get_width(self):
if self._dirty:
self._clean()
if self._layout_width:
return self._layout_width
return self._text_width
def _set_width(self, width):
self._layout_width = width
self._dirty = True
width = property(_get_width, _set_width,
doc='''Width of the text.
When set, this enables word-wrapping to the specified width.
Otherwise, the width of the text as it will be rendered can be
determined.
:type: float
''')
def _get_height(self):
if self._dirty:
self._clean()
return self._text_height
height = property(_get_height,
doc='''Height of the text.
This property is the ascent minus the descent of the font, unless
there is more than one line of word-wrapped text, in which case
the height takes into account the line leading. Read-only.
:type: float
''')
def _set_text(self, text):
self._text = text
self._dirty = True
text = property(lambda self: self._text, _set_text,
doc='''Text to render.
The glyph vertices are only recalculated as needed, so multiple
changes to the text can be performed with no performance penalty.
:type: str
''')
def _set_halign(self, halign):
self._halign = halign
self._dirty = True
halign = property(lambda self: self._halign, _set_halign,
doc='''Horizontal alignment of the text.
The text is positioned relative to `x` and `width` according to this
property, which must be one of the alignment constants `LEFT`,
`CENTER` or `RIGHT`.
:type: str
''')
def _set_valign(self, valign):
self._valign = valign
self._dirty = True
valign = property(lambda self: self._valign, _set_valign,
doc='''Vertical alignment of the text.
The text is positioned relative to `y` according to this property,
which must be one of the alignment constants `BOTTOM`, `BASELINE`,
`CENTER` or `TOP`.
:type: str
''')
if not getattr(sys, 'is_epydoc', False):
if sys.platform == 'darwin':
from pyglet.font.carbon import CarbonFont
_font_class = CarbonFont
elif sys.platform in ('win32', 'cygwin'):
if pyglet.options['font'][0] == 'win32':
from pyglet.font.win32 import Win32Font
_font_class = Win32Font
elif pyglet.options['font'][0] == 'gdiplus':
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
assert False, 'Unknown font driver'
else:
from pyglet.font.freetype import FreeTypeFont
_font_class = FreeTypeFont
def load(name, size, bold=False, italic=False, dpi=None):
'''Load a font for rendering.
:Parameters:
`name` : str, or list of str
Font family, for example, "Times New Roman". If a list of names
is provided, the first one matching a known font is used. If no
font can be matched to the name(s), a default font is used.
`size` : float
Size of the font, in points. The returned font may be an exact
match or the closest available.
`bold` : bool
If True, a bold variant is returned, if one exists for the given
family and size.
`italic` : bool
If True, an italic variant is returned, if one exists for the given
family and size.
`dpi` : float
If specified, the assumed resolution of the display device, for
the purposes of determining the pixel size of the font. If not
specified, the platform's native resolution is used (72 DPI on Mac
OS X, 96 DPI on Windows, 120 DPI on Windows with large fonts, and
user-settable on Linux).
:rtype: `Font`
'''
# Find first matching name
if type(name) in (tuple, list):
for n in name:
if _font_class.have_font(n):
name = n
break
else:
name = None
# Locate or create font cache
shared_object_space = get_current_context().object_space
if not hasattr(shared_object_space, 'pyglet_font_font_cache'):
shared_object_space.pyglet_font_font_cache = {}
font_cache = shared_object_space.pyglet_font_font_cache
# Look for font name in font cache
descriptor = (name, size, bold, italic, dpi)
if descriptor in font_cache:
return font_cache[descriptor]
# Not in cache, create from scratch
font = _font_class(name, size, bold=bold, italic=italic, dpi=dpi)
font_cache[descriptor] = font
return font
def add_file(font):
'''Add a font to pyglet's search path.
In order to load a font that is not installed on the system, you must
call this method to tell pyglet that it exists. You can supply
either a filename or any file-like object.
The font format is platform-dependent, but is typically a TrueType font
file containing a single font face. Note that to load this file after
adding it you must specify the face name to `load`, not the filename.
:Parameters:
`font` : str or file
Filename or file-like object to load fonts from.
'''
if type(font) in (str, unicode):
font = open(font, 'rb')
if hasattr(font, 'read'):
font = font.read()
_font_class.add_font_data(font)
def add_directory(dir):
'''Add a directory of fonts to pyglet's search path.
This function simply calls `add_file` for each file with a ``.ttf``
extension in the given directory. Subdirectories are not searched.
:Parameters:
`dir` : str
Directory that contains font files.
'''
import os
for file in os.listdir(dir):
if file[-4:].lower() == '.ttf':
add_file(os.path.join(dir, file))
|
bsd-3-clause
| -2,993,675,939,295,804,400
| 34.221667
| 79
| 0.58288
| false
| 4.111479
| false
| false
| false
|
JackDanger/sentry
|
tests/acceptance/test_project_keys.py
|
1
|
2933
|
from __future__ import absolute_import
from datetime import datetime
from django.utils import timezone
from sentry.models import ProjectKey
from sentry.testutils import AcceptanceTestCase
class ProjectKeysTest(AcceptanceTestCase):
def setUp(self):
super(ProjectKeysTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
name='Rowdy Tiger',
owner=None,
)
self.team = self.create_team(
organization=self.org,
name='Mariachi Band'
)
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
ProjectKey.objects.filter(project=self.project).delete()
ProjectKey.objects.create(
project=self.project,
label='Default',
public_key='5cc0482a13d248ff99f9717101dd6356',
secret_key='410fd998318844b8894775f36184ec28',
)
self.login_as(self.user)
self.path = '/{}/{}/settings/keys/'.format(self.org.slug, self.project.slug)
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('project keys')
self.browser.wait_until('.ref-keys')
class ProjectKeyDetailsTest(AcceptanceTestCase):
def setUp(self):
super(ProjectKeyDetailsTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
name='Rowdy Tiger',
owner=None,
)
self.team = self.create_team(
organization=self.org,
name='Mariachi Band'
)
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
self.pk = ProjectKey.objects.create(
project=self.project,
label='Default',
public_key='5cc0482a13d248ff99f9717101dd6356',
secret_key='410fd998318844b8894775f36184ec28',
date_added=datetime(2015, 10, 1, 21, 19, 5, 648517, tzinfo=timezone.utc),
)
self.login_as(self.user)
self.path = '/{}/{}/settings/keys/{}/'.format(
self.org.slug, self.project.slug, self.pk.public_key,
)
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('project key details')
self.browser.wait_until('.ref-key-details')
|
bsd-3-clause
| 2,673,051,990,279,457,300
| 30.537634
| 85
| 0.582339
| false
| 3.81901
| true
| false
| false
|
sgibbes/zonal_stats_app
|
utilities/zstats_subprocess.py
|
1
|
2378
|
import sys
import os
import sqlite3
import prep_shapefile
import arcpy
from arcpy.sa import *
import datetime
import simpledbf
arcpy.CheckOutExtension("Spatial")
value = sys.argv[1]
zone = sys.argv[2]
final_aoi = sys.argv[3]
cellsize = sys.argv[4]
analysis = sys.argv[5]
start = int(sys.argv[6])
stop = int(sys.argv[7])
arcpy.env.overwriteOutput = True
for i in range(start, stop):
print("prepping feature id {}".format(i))
# select one individual feature from the input shapefile
mask = prep_shapefile.zonal_stats_mask(final_aoi, i)
scratch_wkspc = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'scratch.gdb')
# set environments
arcpy.env.extent = mask
arcpy.env.mask = mask
arcpy.env.cellSize = cellsize
arcpy.env.snapRaster = value
arcpy.env.scratchWorkspace = scratch_wkspc
arcpy.env.workspace = scratch_wkspc
tables_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tables')
z_stats_tbl = os.path.join(tables_dir, 'output_{}.dbf'.format(i))
start_time = datetime.datetime.now()
print("running zstats")
outzstats = ZonalStatisticsAsTable(zone, "VALUE", value, z_stats_tbl, "DATA", "SUM")
end_time = datetime.datetime.now() - start_time
print("debug:time elapsed: {}".format(end_time))
# convert the output zstats table into a pandas DF
dbf = simpledbf.Dbf5(z_stats_tbl)
df = dbf.to_dataframe()
# populate a new field "id" with the FID and analysis with the sum
df['ID'] = i
df[analysis] = df['SUM']
# sometimes this value came back as an object, so here we are fixing that bug
df.VALUE = df.VALUE.astype(int)
# name of the sql database to store the sql table
zstats_results_db = os.path.join(tables_dir, 'zstats_results_db.db')
# create a connection to the sql database
conn = sqlite3.connect(zstats_results_db)
# append the dataframe to the database
df.to_sql(analysis, conn, if_exists='append')
# delete these because they create a lock
del df
del dbf
os.remove(z_stats_tbl)
# reset these environments. Otherwise the shapefile is redefined based on features within the extent
arcpy.env.extent = None
arcpy.env.mask = None
arcpy.env.cellSize = None
arcpy.env.snapRaster = None
print('process succeeded for id {0}'.format(i))
|
apache-2.0
| -2,103,962,230,845,745,000
| 28.358025
| 108
| 0.687132
| false
| 3.321229
| false
| false
| false
|
JordanP/openstack-snippets
|
ospurge/ospurge/main.py
|
1
|
8851
|
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import concurrent.futures
import logging
import operator
import sys
import threading
import typing
import os_client_config
import shade
from ospurge import exceptions
from ospurge.resources.base import ServiceResource
from ospurge import utils
if typing.TYPE_CHECKING: # pragma: no cover
from typing import Optional # noqa: F401
def configure_logging(verbose: bool) -> None:
log_level = logging.INFO if verbose else logging.WARNING
logging.basicConfig(
format='%(levelname)s:%(name)s:%(asctime)s:%(message)s',
level=log_level
)
logging.getLogger(
'requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
def create_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Purge resources from an Openstack project."
)
parser.add_argument(
"--verbose", action="store_true",
help="Make output verbose"
)
parser.add_argument(
"--dry-run", action="store_true",
help="List project's resources"
)
parser.add_argument(
"--delete-shared-resources", action="store_true",
help="Whether to delete shared resources (public images and external "
"networks)"
)
parser.add_argument(
"--admin-role-name", default="admin",
help="Name of admin role. Defaults to 'admin'. This role will be "
"temporarily granted on the project to purge to the "
"authenticated user."
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--purge-project", metavar="ID_OR_NAME",
help="ID or Name of project to purge. This option requires "
"to authenticate with admin credentials."
)
group.add_argument(
"--purge-own-project", action="store_true",
help="Purge resources of the project used to authenticate. Useful "
"if you don't have the admin credentials of the cloud."
)
return parser
class CredentialsManager(object):
def __init__(self, options: argparse.Namespace) -> None:
self.options = options
self.revoke_role_after_purge = False
self.disable_project_after_purge = False
self.cloud = None # type: Optional[shade.OpenStackCloud]
self.operator_cloud = None # type: Optional[shade.OperatorCloud]
if options.purge_own_project:
self.cloud = shade.openstack_cloud(argparse=options)
self.user_id = self.cloud.keystone_session.get_user_id()
self.project_id = self.cloud.keystone_session.get_project_id()
else:
self.operator_cloud = shade.operator_cloud(argparse=options)
self.user_id = self.operator_cloud.keystone_session.get_user_id()
project = self.operator_cloud.get_project(options.purge_project)
if not project:
raise exceptions.OSProjectNotFound(
"Unable to find project '{}'".format(options.purge_project)
)
self.project_id = project['id']
# If project is not enabled, we must disable it after purge.
self.disable_project_after_purge = not project.enabled
# Reuse the information passed to get the `OperatorCloud` but
# change the project. This way we bind/re-scope to the project
# we want to purge, not the project we authenticated to.
self.cloud = shade.openstack_cloud(
**utils.replace_project_info(
self.operator_cloud.cloud_config.config,
self.project_id
)
)
auth_args = self.cloud.cloud_config.get_auth_args()
logging.warning(
"Going to list and/or delete resources from project '%s'",
options.purge_project or auth_args.get('project_name')
or auth_args.get('project_id')
)
def ensure_role_on_project(self) -> None:
if self.operator_cloud and self.operator_cloud.grant_role(
self.options.admin_role_name,
project=self.options.purge_project, user=self.user_id
):
logging.warning(
"Role 'Member' granted to user '%s' on project '%s'",
self.user_id, self.options.purge_project
)
self.revoke_role_after_purge = True
def revoke_role_on_project(self) -> None:
self.operator_cloud.revoke_role(
self.options.admin_role_name, user=self.user_id,
project=self.options.purge_project)
logging.warning(
"Role 'Member' revoked from user '%s' on project '%s'",
self.user_id, self.options.purge_project
)
def ensure_enabled_project(self) -> None:
if self.operator_cloud and self.disable_project_after_purge:
self.operator_cloud.update_project(self.project_id, enabled=True)
logging.warning("Project '%s' was disabled before purge and it is "
"now enabled", self.options.purge_project)
def disable_project(self) -> None:
self.operator_cloud.update_project(self.project_id, enabled=False)
logging.warning("Project '%s' was disabled before purge and it is "
"now also disabled", self.options.purge_project)
@utils.monkeypatch_oscc_logging_warning
def runner(
resource_mngr: ServiceResource, options: argparse.Namespace,
exit: threading.Event
) -> None:
try:
if not options.dry_run:
resource_mngr.wait_for_check_prerequisite(exit)
for resource in resource_mngr.list():
# No need to continue if requested to exit.
if exit.is_set():
return
if resource_mngr.should_delete(resource):
logging.info("Going to delete %s",
resource_mngr.to_str(resource))
if options.dry_run:
continue
utils.call_and_ignore_notfound(resource_mngr.delete, resource)
except Exception as exc:
log = logging.error
recoverable = False
if hasattr(exc, 'inner_exception'):
# inner_exception is a tuple (type, value, traceback)
# mypy complains: "Exception" has no attribute "inner_exception"
exc_info = exc.inner_exception # type: ignore
if exc_info[0].__name__.lower().endswith('endpointnotfound'):
log = logging.info
recoverable = True
log("Can't deal with %s: %r", resource_mngr.__class__.__name__, exc)
if not recoverable:
exit.set()
def main() -> None:
parser = create_argument_parser()
cloud_config = os_client_config.OpenStackConfig()
cloud_config.register_argparse_arguments(parser, sys.argv)
options = parser.parse_args()
configure_logging(options.verbose)
creds_manager = CredentialsManager(options=options)
creds_manager.ensure_enabled_project()
creds_manager.ensure_role_on_project()
resource_managers = sorted(
[cls(creds_manager) for cls in utils.get_all_resource_classes()],
key=operator.methodcaller('order')
)
# This is an `Event` used to signal whether one of the threads encountered
# an unrecoverable error, at which point all threads should exit because
# otherwise there's a chance the cleanup process never finishes.
exit = threading.Event()
# Dummy function to work around `ThreadPoolExecutor.map()` not accepting
# a callable with arguments.
def partial_runner(resource_manager: ServiceResource) -> None:
runner(resource_manager, options=options,
exit=exit) # pragma: no cover
try:
with concurrent.futures.ThreadPoolExecutor(8) as executor:
executor.map(partial_runner, resource_managers)
except KeyboardInterrupt:
exit.set()
if creds_manager.revoke_role_after_purge:
creds_manager.revoke_role_on_project()
if creds_manager.disable_project_after_purge:
creds_manager.disable_project()
sys.exit(int(exit.is_set()))
if __name__ == "__main__": # pragma: no cover
main()
|
apache-2.0
| -7,863,330,502,847,431,000
| 35.726141
| 79
| 0.632471
| false
| 4.1691
| true
| false
| false
|
ideascube/ideascube
|
ideascube/blog/migrations/0001_initial.py
|
1
|
1992
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=100, verbose_name='title')),
('author_text', models.CharField(max_length=300, verbose_name='author text', blank=True)),
('summary', models.CharField(max_length=300, verbose_name='summary')),
('image', models.ImageField(upload_to=b'blog/image', verbose_name='image', blank=True)),
('text', models.TextField(verbose_name='text')),
('published_at', models.DateTimeField(verbose_name='publication date')),
('status', models.PositiveSmallIntegerField(default=1, verbose_name='Status', choices=[(1, 'draft'), (2, 'published'), (3, 'deleted')])),
('lang', models.CharField(default=b'en', max_length=10, verbose_name='Language', choices=[(b'en', b'English'), (b'fr', 'Fran\xe7ais'), (b'ar', '\u0627\u0644\u0639\u0631\u0628\u064a\u0629')])),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
options={
'abstract': False,
},
),
]
|
agpl-3.0
| -7,513,129,243,814,382,000
| 51.421053
| 208
| 0.601908
| false
| 3.898239
| false
| false
| false
|
chenzilin/git-repo
|
git_refs.py
|
1
|
3980
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from trace import Trace
import platform_utils
HEAD = 'HEAD'
R_CHANGES = 'refs/changes/'
R_HEADS = 'refs/heads/'
R_TAGS = 'refs/tags/'
R_PUB = 'refs/published/'
R_M = 'refs/remotes/m/'
class GitRefs(object):
def __init__(self, gitdir):
self._gitdir = gitdir
self._phyref = None
self._symref = None
self._mtime = {}
@property
def all(self):
self._EnsureLoaded()
return self._phyref
def get(self, name):
try:
return self.all[name]
except KeyError:
return ''
def deleted(self, name):
if self._phyref is not None:
if name in self._phyref:
del self._phyref[name]
if name in self._symref:
del self._symref[name]
if name in self._mtime:
del self._mtime[name]
def symref(self, name):
try:
self._EnsureLoaded()
return self._symref[name]
except KeyError:
return ''
def _EnsureLoaded(self):
if self._phyref is None or self._NeedUpdate():
self._LoadAll()
def _NeedUpdate(self):
Trace(': scan refs %s', self._gitdir)
for name, mtime in self._mtime.items():
try:
if mtime != os.path.getmtime(os.path.join(self._gitdir, name)):
return True
except OSError:
return True
return False
def _LoadAll(self):
Trace(': load refs %s', self._gitdir)
self._phyref = {}
self._symref = {}
self._mtime = {}
self._ReadPackedRefs()
self._ReadLoose('refs/')
self._ReadLoose1(os.path.join(self._gitdir, HEAD), HEAD)
scan = self._symref
attempts = 0
while scan and attempts < 5:
scan_next = {}
for name, dest in scan.items():
if dest in self._phyref:
self._phyref[name] = self._phyref[dest]
else:
scan_next[name] = dest
scan = scan_next
attempts += 1
def _ReadPackedRefs(self):
path = os.path.join(self._gitdir, 'packed-refs')
try:
fd = open(path, 'r')
mtime = os.path.getmtime(path)
except IOError:
return
except OSError:
return
try:
for line in fd:
line = str(line)
if line[0] == '#':
continue
if line[0] == '^':
continue
line = line[:-1]
p = line.split(' ')
ref_id = p[0]
name = p[1]
self._phyref[name] = ref_id
finally:
fd.close()
self._mtime['packed-refs'] = mtime
def _ReadLoose(self, prefix):
base = os.path.join(self._gitdir, prefix)
for name in platform_utils.listdir(base):
p = os.path.join(base, name)
if platform_utils.isdir(p):
self._mtime[prefix] = os.path.getmtime(base)
self._ReadLoose(prefix + name + '/')
elif name.endswith('.lock'):
pass
else:
self._ReadLoose1(p, prefix + name)
def _ReadLoose1(self, path, name):
try:
fd = open(path)
except IOError:
return
try:
try:
mtime = os.path.getmtime(path)
ref_id = fd.readline()
except (IOError, OSError):
return
finally:
fd.close()
try:
ref_id = ref_id.decode()
except AttributeError:
pass
if not ref_id:
return
ref_id = ref_id[:-1]
if ref_id.startswith('ref: '):
self._symref[name] = ref_id[5:]
else:
self._phyref[name] = ref_id
self._mtime[name] = mtime
|
apache-2.0
| -7,995,023,122,377,508,000
| 22.690476
| 74
| 0.585427
| false
| 3.528369
| false
| false
| false
|
remibergsma/cosmic
|
cosmic-core/systemvm/patches/debian/config/opt/cloud/bin/cs/CsConfig.py
|
1
|
2202
|
# -- coding: utf-8 --
from CsAddress import CsAddress
from CsDatabag import CsCmdLine
class CsConfig(object):
"""
A class to cache all the stuff that the other classes need
"""
__LOG_FILE = "/var/log/cloud.log"
__LOG_LEVEL = "DEBUG"
__LOG_FORMAT = "%(asctime)s %(levelname)-8s %(message)s"
cl = None
def __init__(self):
self.fw = []
self.ingress_rules = {}
def set_address(self):
self.ips = CsAddress("ips", self)
@classmethod
def get_cmdline_instance(cls):
if cls.cl is None:
cls.cl = CsCmdLine("cmdline")
return cls.cl
def cmdline(self):
return self.get_cmdline_instance()
def address(self):
return self.ips
def get_fw(self):
return self.fw
def get_ingress_rules(self, key):
if self.ingress_rules.has_key(key):
return self.ingress_rules[key]
return None
def set_ingress_rules(self, key, ingress_rules):
self.ingress_rules[key] = ingress_rules
def get_logger(self):
return self.__LOG_FILE
def get_level(self):
return self.__LOG_LEVEL
def is_vpc(self):
return self.cl.get_type() == 'vpcrouter'
def is_router(self):
return self.cl.get_type() == 'router'
def is_dhcp(self):
return self.cl.get_type() == 'dhcpsrvr'
def has_dns(self):
return not self.use_extdns()
def has_metadata(self):
return any((self.is_vpc(), self.is_router(), self.is_dhcp()))
def use_extdns(self):
return self.cmdline().idata().get('useextdns', 'false') == 'true'
def get_domain(self):
return self.cl.get_domain()
def get_dns(self):
conf = self.cmdline().idata()
dns = []
if not self.use_extdns():
if not self.is_vpc() and self.cl.is_redundant() and self.cl.get_guest_gw():
dns.append(self.cl.get_guest_gw())
else:
dns.append(self.address().get_guest_ip())
for name in ["dns1", "dns2"]:
if name in conf:
dns.append(conf[name])
return dns
def get_format(self):
return self.__LOG_FORMAT
|
apache-2.0
| 761,955,633,890,424,600
| 24.022727
| 87
| 0.563124
| false
| 3.517572
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.